code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
""" get poly data(face center, face id, etc), select face, create object by face data setPosition for vertex (random) import sys module_path = '/home/shrimo/Desktop/course/git/vfx_dev/maya/general_lesson' if module_path not in sys.path: sys.path.append(module_path) import lesson_v01 reload(lesson_v01) lesson_v01.start() """ import maya.cmds as cmds import maya.api.OpenMaya as om2 import random class Face(): def __init__(self, shape, face_index, vertex, center): self.face_path = '{shape}.f[{index}]'.format( shape=shape, index=face_index) self.vertex = vertex self.face_index = face_index self.face_center = center def get_shapes(): # get selected object # print(cmds.ls()) # print(cmds.ls(selection=True)) return cmds.ls(selection=True, shapes=True, dagObjects=True) def get_faces(shapes): # cmds.select(clear=True) # print(shapes) face_data = [] for shape in shapes: mSel = om2.MSelectionList() mSel.add(shape) mDagPath, mObj = mSel.getComponent(0) geo = om2.MItMeshPolygon(mDagPath, mObj) while not geo.isDone(): center = geo.center() print 'face index: {}'.format(geo.index()) vertices = [] for i in geo.getPoints(om2.MSpace.kWorld): vertices.append((i[0], i[1], i[2])) face_in = Face(shape, geo.index(), vertices, center) face_data.append(face_in) geo.next(0) return face_data def get_vertex(shapes): vertex_data = [] spc = om2.MSpace.kWorld for shape in shapes: mSel = om2.MSelectionList() mSel.add(shape) mDagPath, mObj = mSel.getComponent(0) vtx = om2.MItMeshVertex(mDagPath, mObj) while not vtx.isDone(): vtx_pos = vtx.position(spc) print 'vertex index: {}'.format(vtx.index()), vtx_pos face_in = Face(shape, vtx.index(), vtx_pos, None) vertex_data.append(face_in) vtx.next() return vertex_data def set_pos_vertex(shapes, up_y): spc = om2.MSpace.kWorld for shape in shapes: mSel = om2.MSelectionList() mSel.add(shape) mDagPath, mObj = mSel.getComponent(0) vtx = om2.MItMeshVertex(mDagPath, mObj) while not vtx.isDone(): vtx_pos = vtx.position(spc) print 'vertex:'+str(vtx.index()), vtx_pos.y if vtx.index() & 1: vtx_pos.y += up_y vtx.setPosition(vtx_pos, spc) vtx.next() vtx.updateSurface() def set_random_vertex(shapes, up_y): spc = om2.MSpace.kWorld for shape in shapes: mSel = om2.MSelectionList() mSel.add(shape) mDagPath, mObj = mSel.getComponent(0) vtx = om2.MItMeshVertex(mDagPath, mObj) while not vtx.isDone(): vtx_pos = vtx.position(spc) print 'vertex:'+str(vtx.index()), vtx_pos.y vtx_pos.z += random.uniform(0, up_y) vtx.setPosition(vtx_pos, spc) vtx.next() vtx.updateSurface() def create_boxes(shapes, group_name, shape_name, on_face): if on_face: face_data = get_faces(shapes) else: face_data = get_vertex(shapes) cmds.group(em=True, name=group_name) for face in face_data: # print(face.face_index, face.face_path, face.face_center) if face.face_index & 1: cmds.select(face.face_path, add=True) p_name = shape_name + str(face.face_index) cmds.polyCube(n=p_name) # create polyCube name by p_ + face index cmds.setAttr(p_name+'.scale', 0.3, 0.3, 0.3) if on_face: cmds.setAttr( p_name+'.translate', face.face_center[0], face.face_center[1], face.face_center[2]) else: cmds.setAttr(p_name+'.translate', face.vertex.x, face.vertex.y, face.vertex.z) cmds.select(all=True) cmds.parent(p_name, group_name) # cmds.group(p_name, parent=group_name) cmds.select(all=True) def start(): # shapes = cmds.ls(selection=True, shapes=True, dagObjects=True) # set_pos_vertex(get_shapes(), 1) # set_random_vertex(get_shapes(), 1) create_boxes(get_shapes(), 'boxes', 'v_', 0)
normal
{ "blob_id": "723d8819b5341f1397163533f59c17ba1a74b77d", "index": 1310, "step-1": "\"\"\"\nget poly data(face center, face id, etc), select face, create object by face data\nsetPosition for vertex (random)\n\nimport sys\nmodule_path = '/home/shrimo/Desktop/course/git/vfx_dev/maya/general_lesson'\nif module_path not in sys.path:\n sys.path.append(module_path)\n \n\nimport lesson_v01\nreload(lesson_v01)\nlesson_v01.start()\n\"\"\"\n\n\nimport maya.cmds as cmds\nimport maya.api.OpenMaya as om2\nimport random\n\n\nclass Face():\n def __init__(self, shape, face_index, vertex, center):\n self.face_path = '{shape}.f[{index}]'.format(\n shape=shape,\n index=face_index)\n self.vertex = vertex\n self.face_index = face_index\n self.face_center = center\n\n\ndef get_shapes():\n # get selected object\n # print(cmds.ls())\n # print(cmds.ls(selection=True))\n return cmds.ls(selection=True, shapes=True, dagObjects=True)\n\n\ndef get_faces(shapes):\n # cmds.select(clear=True)\n # print(shapes)\n face_data = []\n for shape in shapes:\n mSel = om2.MSelectionList()\n mSel.add(shape)\n mDagPath, mObj = mSel.getComponent(0)\n geo = om2.MItMeshPolygon(mDagPath, mObj)\n while not geo.isDone():\n center = geo.center()\n print 'face index: {}'.format(geo.index())\n vertices = []\n for i in geo.getPoints(om2.MSpace.kWorld):\n vertices.append((i[0], i[1], i[2]))\n face_in = Face(shape, geo.index(), vertices, center)\n face_data.append(face_in)\n geo.next(0)\n\n return face_data\n\n\ndef get_vertex(shapes):\n vertex_data = []\n spc = om2.MSpace.kWorld\n for shape in shapes:\n mSel = om2.MSelectionList()\n mSel.add(shape)\n mDagPath, mObj = mSel.getComponent(0)\n vtx = om2.MItMeshVertex(mDagPath, mObj)\n while not vtx.isDone():\n vtx_pos = vtx.position(spc)\n print 'vertex index: {}'.format(vtx.index()), vtx_pos\n face_in = Face(shape, vtx.index(), vtx_pos, None)\n vertex_data.append(face_in)\n vtx.next()\n\n return vertex_data\n\n\ndef set_pos_vertex(shapes, up_y):\n spc = om2.MSpace.kWorld\n for shape in shapes:\n mSel = om2.MSelectionList()\n mSel.add(shape)\n mDagPath, mObj = mSel.getComponent(0)\n vtx = om2.MItMeshVertex(mDagPath, mObj)\n while not vtx.isDone():\n vtx_pos = vtx.position(spc)\n print 'vertex:'+str(vtx.index()), vtx_pos.y\n if vtx.index() & 1:\n vtx_pos.y += up_y\n vtx.setPosition(vtx_pos, spc)\n vtx.next()\n\n vtx.updateSurface()\n\n\ndef set_random_vertex(shapes, up_y):\n spc = om2.MSpace.kWorld\n for shape in shapes:\n mSel = om2.MSelectionList()\n mSel.add(shape)\n mDagPath, mObj = mSel.getComponent(0)\n vtx = om2.MItMeshVertex(mDagPath, mObj)\n while not vtx.isDone():\n vtx_pos = vtx.position(spc)\n print 'vertex:'+str(vtx.index()), vtx_pos.y\n vtx_pos.z += random.uniform(0, up_y)\n vtx.setPosition(vtx_pos, spc)\n vtx.next()\n\n vtx.updateSurface()\n\n\ndef create_boxes(shapes, group_name, shape_name, on_face):\n if on_face:\n face_data = get_faces(shapes)\n else:\n face_data = get_vertex(shapes)\n cmds.group(em=True, name=group_name)\n for face in face_data:\n # print(face.face_index, face.face_path, face.face_center)\n if face.face_index & 1:\n cmds.select(face.face_path, add=True)\n p_name = shape_name + str(face.face_index)\n cmds.polyCube(n=p_name) # create polyCube name by p_ + face index\n cmds.setAttr(p_name+'.scale', 0.3, 0.3, 0.3)\n if on_face:\n cmds.setAttr(\n p_name+'.translate', face.face_center[0], face.face_center[1], face.face_center[2])\n else:\n cmds.setAttr(p_name+'.translate', face.vertex.x,\n face.vertex.y, face.vertex.z)\n cmds.select(all=True)\n cmds.parent(p_name, group_name) \n # cmds.group(p_name, parent=group_name)\n cmds.select(all=True)\n\n\ndef start():\n # shapes = cmds.ls(selection=True, shapes=True, dagObjects=True)\n # set_pos_vertex(get_shapes(), 1)\n # set_random_vertex(get_shapes(), 1)\n create_boxes(get_shapes(), 'boxes', 'v_', 0)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from .factories import *
normal
{ "blob_id": "c036e6a0a9f06b08ee3eb43655dd833b46fd1e76", "index": 3690, "step-1": "<mask token>\n", "step-2": "from .factories import *\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
from DataStructures.BST.util import * def storeInorder(root, inorder): if root is None: return storeInorder(root.left, inorder) inorder.append(root.data) storeInorder(root.right, inorder) def arrayToBST(arr, root): # Base Case if root is None: return # First update the left subtree arrayToBST(arr, root.left) # now update root's data delete the value from array root.data = arr[0] arr.pop(0) # Finally update the right subtree arrayToBST(arr, root.right) def binaryTreeToBST(root): if root is None: return # Create the temp array and store the inorder traveral of tree arr = [] storeInorder(root, arr) # Sort the array arr.sort() # copy array elements back to binary tree arrayToBST(arr, root) if __name__ == '__main__': root = Node(10) root.left = Node(30) root.right = Node(15) root.left.left = Node(20) root.right.right = Node(5) binaryTreeToBST(root) inorder(root)
normal
{ "blob_id": "d2af2b25a1ba2db93c977a13fe0273919bc2e6e0", "index": 7768, "step-1": "<mask token>\n\n\ndef storeInorder(root, inorder):\n if root is None:\n return\n storeInorder(root.left, inorder)\n inorder.append(root.data)\n storeInorder(root.right, inorder)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef storeInorder(root, inorder):\n if root is None:\n return\n storeInorder(root.left, inorder)\n inorder.append(root.data)\n storeInorder(root.right, inorder)\n\n\ndef arrayToBST(arr, root):\n if root is None:\n return\n arrayToBST(arr, root.left)\n root.data = arr[0]\n arr.pop(0)\n arrayToBST(arr, root.right)\n\n\ndef binaryTreeToBST(root):\n if root is None:\n return\n arr = []\n storeInorder(root, arr)\n arr.sort()\n arrayToBST(arr, root)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef storeInorder(root, inorder):\n if root is None:\n return\n storeInorder(root.left, inorder)\n inorder.append(root.data)\n storeInorder(root.right, inorder)\n\n\ndef arrayToBST(arr, root):\n if root is None:\n return\n arrayToBST(arr, root.left)\n root.data = arr[0]\n arr.pop(0)\n arrayToBST(arr, root.right)\n\n\ndef binaryTreeToBST(root):\n if root is None:\n return\n arr = []\n storeInorder(root, arr)\n arr.sort()\n arrayToBST(arr, root)\n\n\nif __name__ == '__main__':\n root = Node(10)\n root.left = Node(30)\n root.right = Node(15)\n root.left.left = Node(20)\n root.right.right = Node(5)\n binaryTreeToBST(root)\n inorder(root)\n", "step-4": "from DataStructures.BST.util import *\n\n\ndef storeInorder(root, inorder):\n if root is None:\n return\n storeInorder(root.left, inorder)\n inorder.append(root.data)\n storeInorder(root.right, inorder)\n\n\ndef arrayToBST(arr, root):\n if root is None:\n return\n arrayToBST(arr, root.left)\n root.data = arr[0]\n arr.pop(0)\n arrayToBST(arr, root.right)\n\n\ndef binaryTreeToBST(root):\n if root is None:\n return\n arr = []\n storeInorder(root, arr)\n arr.sort()\n arrayToBST(arr, root)\n\n\nif __name__ == '__main__':\n root = Node(10)\n root.left = Node(30)\n root.right = Node(15)\n root.left.left = Node(20)\n root.right.right = Node(5)\n binaryTreeToBST(root)\n inorder(root)\n", "step-5": "from DataStructures.BST.util import *\n\n\ndef storeInorder(root, inorder):\n if root is None:\n return\n\n storeInorder(root.left, inorder)\n inorder.append(root.data)\n storeInorder(root.right, inorder)\n\n\ndef arrayToBST(arr, root):\n # Base Case\n if root is None:\n return\n\n # First update the left subtree\n arrayToBST(arr, root.left)\n\n # now update root's data delete the value from array\n root.data = arr[0]\n arr.pop(0)\n\n # Finally update the right subtree\n arrayToBST(arr, root.right)\n\n\ndef binaryTreeToBST(root):\n if root is None:\n return\n\n # Create the temp array and store the inorder traveral of tree\n arr = []\n storeInorder(root, arr)\n # Sort the array\n arr.sort()\n # copy array elements back to binary tree\n arrayToBST(arr, root)\n\n\nif __name__ == '__main__':\n root = Node(10)\n root.left = Node(30)\n root.right = Node(15)\n root.left.left = Node(20)\n root.right.right = Node(5)\n\n binaryTreeToBST(root)\n\n inorder(root)\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
# # Copyright John Reid 2009 # """ Code to handle bootstrap analyses. """ from itertools import cycle import random import bisect def generate_bootstrap_samples(num_samples, test_universe, test_set_sizes): """ Yield samples that match the sizes given in test_set_sizes """ for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)): yield random.sample(test_universe, sample_size) def calculate_bootstrap_statistics(samples, statistic): "Calculate the bootstrap statistics for the samples." stats = list(map(statistic, samples)) stats.sort() return stats def bootstrap_p_value(bootstrap_stats, stat_value): """ Calculate the p-value for the statistic's value given the bootstrap values. """ return 1. - bisect.bisect_left(bootstrap_stats, stat_value) / float(len(bootstrap_stats))
normal
{ "blob_id": "752affdfa1481b9a19a9b7dfe76f9d5d11c80073", "index": 4678, "step-1": "<mask token>\n\n\ndef bootstrap_p_value(bootstrap_stats, stat_value):\n \"\"\"\n Calculate the p-value for the statistic's value given the bootstrap values.\n \"\"\"\n return 1.0 - bisect.bisect_left(bootstrap_stats, stat_value) / float(len\n (bootstrap_stats))\n", "step-2": "<mask token>\n\n\ndef generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n \"\"\"\n Yield samples that match the sizes given in test_set_sizes\n \"\"\"\n for sample_idx, sample_size in zip(range(num_samples), cycle(\n test_set_sizes)):\n yield random.sample(test_universe, sample_size)\n\n\n<mask token>\n\n\ndef bootstrap_p_value(bootstrap_stats, stat_value):\n \"\"\"\n Calculate the p-value for the statistic's value given the bootstrap values.\n \"\"\"\n return 1.0 - bisect.bisect_left(bootstrap_stats, stat_value) / float(len\n (bootstrap_stats))\n", "step-3": "<mask token>\n\n\ndef generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n \"\"\"\n Yield samples that match the sizes given in test_set_sizes\n \"\"\"\n for sample_idx, sample_size in zip(range(num_samples), cycle(\n test_set_sizes)):\n yield random.sample(test_universe, sample_size)\n\n\ndef calculate_bootstrap_statistics(samples, statistic):\n \"\"\"Calculate the bootstrap statistics for the samples.\"\"\"\n stats = list(map(statistic, samples))\n stats.sort()\n return stats\n\n\ndef bootstrap_p_value(bootstrap_stats, stat_value):\n \"\"\"\n Calculate the p-value for the statistic's value given the bootstrap values.\n \"\"\"\n return 1.0 - bisect.bisect_left(bootstrap_stats, stat_value) / float(len\n (bootstrap_stats))\n", "step-4": "<mask token>\nfrom itertools import cycle\nimport random\nimport bisect\n\n\ndef generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n \"\"\"\n Yield samples that match the sizes given in test_set_sizes\n \"\"\"\n for sample_idx, sample_size in zip(range(num_samples), cycle(\n test_set_sizes)):\n yield random.sample(test_universe, sample_size)\n\n\ndef calculate_bootstrap_statistics(samples, statistic):\n \"\"\"Calculate the bootstrap statistics for the samples.\"\"\"\n stats = list(map(statistic, samples))\n stats.sort()\n return stats\n\n\ndef bootstrap_p_value(bootstrap_stats, stat_value):\n \"\"\"\n Calculate the p-value for the statistic's value given the bootstrap values.\n \"\"\"\n return 1.0 - bisect.bisect_left(bootstrap_stats, stat_value) / float(len\n (bootstrap_stats))\n", "step-5": "#\n# Copyright John Reid 2009\n#\n\n\n\"\"\"\nCode to handle bootstrap analyses.\n\"\"\"\n\nfrom itertools import cycle\nimport random\nimport bisect\n\n\ndef generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n \"\"\"\n Yield samples that match the sizes given in test_set_sizes\n \"\"\"\n for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):\n yield random.sample(test_universe, sample_size)\n\n\ndef calculate_bootstrap_statistics(samples, statistic):\n \"Calculate the bootstrap statistics for the samples.\"\n stats = list(map(statistic, samples))\n stats.sort()\n return stats\n\n\ndef bootstrap_p_value(bootstrap_stats, stat_value):\n \"\"\"\n Calculate the p-value for the statistic's value given the bootstrap values.\n \"\"\"\n return 1. - bisect.bisect_left(bootstrap_stats, stat_value) / float(len(bootstrap_stats))\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
## adapted from https://matplotlib.org/examples/api/radar_chart.html import numpy as np import matplotlib.pyplot as plt from matplotlib.path import Path from matplotlib.spines import Spine from matplotlib.projections.polar import PolarAxes from matplotlib.projections import register_projection def radar_factory(num_vars, frame='circle'): theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False) theta += np.pi/2 def draw_poly_patch(self): verts = unit_poly_verts(theta) return plt.Polygon(verts, closed=True, edgecolor='k') def draw_circle_patch(self): return plt.Circle((0.5, 0.5), 0.5) patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch} if frame not in patch_dict: raise ValueError('unknown value for `frame`: %s' % frame) class RadarAxes(PolarAxes): name = 'radar' RESOLUTION = 1 draw_patch = patch_dict[frame] def fill(self, *args, **kwargs): closed = kwargs.pop('closed', True) return super(RadarAxes, self).fill(closed=closed, *args, **kwargs) def plot(self, *args, **kwargs): lines = super(RadarAxes, self).plot(*args, **kwargs) for line in lines: self._close_line(line) def _close_line(self, line): x, y = line.get_data() if x[0] != x[-1]: x = np.concatenate((x, [x[0]])) y = np.concatenate((y, [y[0]])) line.set_data(x, y) def set_varlabels(self, labels): self.set_thetagrids(np.degrees(theta), labels) def _gen_axes_patch(self): return self.draw_patch() def _gen_axes_spines(self): if frame == 'circle': return PolarAxes._gen_axes_spines(self) spine_type = 'circle' verts = unit_poly_verts(theta) verts.append(verts[0]) path = Path(verts) spine = Spine(self, spine_type, path) spine.set_transform(self.transAxes) return {'polar': spine} register_projection(RadarAxes) return theta def unit_poly_verts(theta): x0, y0, r = [0.5] * 3 verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta] return verts def labels_to_colors(labels): cmap = plt.get_cmap('viridis') color_dict = {} unique_labels = np.unique(labels) for i, v in enumerate(unique_labels): color_dict[v] = cmap(i / len(unique_labels)) colors = [color_dict[l] for l in labels] return colors def radar_chart(data, labels, show_axis=False, fill_polygon=False): theta = radar_factory(len(data[0]), frame='circle') colors = labels_to_colors(labels) fig, ax = plt.subplots(figsize=(5,5), subplot_kw=dict(projection='radar'), facecolor='white') ax.axis('on' if show_axis else 'off') fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.85, bottom=0.05) for record, color in zip(data, colors): ax.plot(theta, record, color=color) if fill_polygon: ax.fill(theta, record, facecolor=color, alpha=0.25) return fig, ax
normal
{ "blob_id": "ddf64ea5ecbd3aa737cd788924035cccb5544fec", "index": 5544, "step-1": "<mask token>\n\n\ndef radar_factory(num_vars, frame='circle'):\n theta = np.linspace(0, 2 * np.pi, num_vars, endpoint=False)\n theta += np.pi / 2\n\n def draw_poly_patch(self):\n verts = unit_poly_verts(theta)\n return plt.Polygon(verts, closed=True, edgecolor='k')\n\n def draw_circle_patch(self):\n return plt.Circle((0.5, 0.5), 0.5)\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\n if frame not in patch_dict:\n raise ValueError('unknown value for `frame`: %s' % frame)\n\n\n class RadarAxes(PolarAxes):\n name = 'radar'\n RESOLUTION = 1\n draw_patch = patch_dict[frame]\n\n def fill(self, *args, **kwargs):\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(*args, closed=closed, **kwargs)\n\n def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)\n\n def _close_line(self, line):\n x, y = line.get_data()\n if x[0] != x[-1]:\n x = np.concatenate((x, [x[0]]))\n y = np.concatenate((y, [y[0]]))\n line.set_data(x, y)\n\n def set_varlabels(self, labels):\n self.set_thetagrids(np.degrees(theta), labels)\n\n def _gen_axes_patch(self):\n return self.draw_patch()\n\n def _gen_axes_spines(self):\n if frame == 'circle':\n return PolarAxes._gen_axes_spines(self)\n spine_type = 'circle'\n verts = unit_poly_verts(theta)\n verts.append(verts[0])\n path = Path(verts)\n spine = Spine(self, spine_type, path)\n spine.set_transform(self.transAxes)\n return {'polar': spine}\n register_projection(RadarAxes)\n return theta\n\n\n<mask token>\n\n\ndef radar_chart(data, labels, show_axis=False, fill_polygon=False):\n theta = radar_factory(len(data[0]), frame='circle')\n colors = labels_to_colors(labels)\n fig, ax = plt.subplots(figsize=(5, 5), subplot_kw=dict(projection=\n 'radar'), facecolor='white')\n ax.axis('on' if show_axis else 'off')\n fig.subplots_adjust(wspace=0.25, hspace=0.2, top=0.85, bottom=0.05)\n for record, color in zip(data, colors):\n ax.plot(theta, record, color=color)\n if fill_polygon:\n ax.fill(theta, record, facecolor=color, alpha=0.25)\n return fig, ax\n", "step-2": "<mask token>\n\n\ndef radar_factory(num_vars, frame='circle'):\n theta = np.linspace(0, 2 * np.pi, num_vars, endpoint=False)\n theta += np.pi / 2\n\n def draw_poly_patch(self):\n verts = unit_poly_verts(theta)\n return plt.Polygon(verts, closed=True, edgecolor='k')\n\n def draw_circle_patch(self):\n return plt.Circle((0.5, 0.5), 0.5)\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\n if frame not in patch_dict:\n raise ValueError('unknown value for `frame`: %s' % frame)\n\n\n class RadarAxes(PolarAxes):\n name = 'radar'\n RESOLUTION = 1\n draw_patch = patch_dict[frame]\n\n def fill(self, *args, **kwargs):\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(*args, closed=closed, **kwargs)\n\n def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)\n\n def _close_line(self, line):\n x, y = line.get_data()\n if x[0] != x[-1]:\n x = np.concatenate((x, [x[0]]))\n y = np.concatenate((y, [y[0]]))\n line.set_data(x, y)\n\n def set_varlabels(self, labels):\n self.set_thetagrids(np.degrees(theta), labels)\n\n def _gen_axes_patch(self):\n return self.draw_patch()\n\n def _gen_axes_spines(self):\n if frame == 'circle':\n return PolarAxes._gen_axes_spines(self)\n spine_type = 'circle'\n verts = unit_poly_verts(theta)\n verts.append(verts[0])\n path = Path(verts)\n spine = Spine(self, spine_type, path)\n spine.set_transform(self.transAxes)\n return {'polar': spine}\n register_projection(RadarAxes)\n return theta\n\n\ndef unit_poly_verts(theta):\n x0, y0, r = [0.5] * 3\n verts = [(r * np.cos(t) + x0, r * np.sin(t) + y0) for t in theta]\n return verts\n\n\n<mask token>\n\n\ndef radar_chart(data, labels, show_axis=False, fill_polygon=False):\n theta = radar_factory(len(data[0]), frame='circle')\n colors = labels_to_colors(labels)\n fig, ax = plt.subplots(figsize=(5, 5), subplot_kw=dict(projection=\n 'radar'), facecolor='white')\n ax.axis('on' if show_axis else 'off')\n fig.subplots_adjust(wspace=0.25, hspace=0.2, top=0.85, bottom=0.05)\n for record, color in zip(data, colors):\n ax.plot(theta, record, color=color)\n if fill_polygon:\n ax.fill(theta, record, facecolor=color, alpha=0.25)\n return fig, ax\n", "step-3": "<mask token>\n\n\ndef radar_factory(num_vars, frame='circle'):\n theta = np.linspace(0, 2 * np.pi, num_vars, endpoint=False)\n theta += np.pi / 2\n\n def draw_poly_patch(self):\n verts = unit_poly_verts(theta)\n return plt.Polygon(verts, closed=True, edgecolor='k')\n\n def draw_circle_patch(self):\n return plt.Circle((0.5, 0.5), 0.5)\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\n if frame not in patch_dict:\n raise ValueError('unknown value for `frame`: %s' % frame)\n\n\n class RadarAxes(PolarAxes):\n name = 'radar'\n RESOLUTION = 1\n draw_patch = patch_dict[frame]\n\n def fill(self, *args, **kwargs):\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(*args, closed=closed, **kwargs)\n\n def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)\n\n def _close_line(self, line):\n x, y = line.get_data()\n if x[0] != x[-1]:\n x = np.concatenate((x, [x[0]]))\n y = np.concatenate((y, [y[0]]))\n line.set_data(x, y)\n\n def set_varlabels(self, labels):\n self.set_thetagrids(np.degrees(theta), labels)\n\n def _gen_axes_patch(self):\n return self.draw_patch()\n\n def _gen_axes_spines(self):\n if frame == 'circle':\n return PolarAxes._gen_axes_spines(self)\n spine_type = 'circle'\n verts = unit_poly_verts(theta)\n verts.append(verts[0])\n path = Path(verts)\n spine = Spine(self, spine_type, path)\n spine.set_transform(self.transAxes)\n return {'polar': spine}\n register_projection(RadarAxes)\n return theta\n\n\ndef unit_poly_verts(theta):\n x0, y0, r = [0.5] * 3\n verts = [(r * np.cos(t) + x0, r * np.sin(t) + y0) for t in theta]\n return verts\n\n\ndef labels_to_colors(labels):\n cmap = plt.get_cmap('viridis')\n color_dict = {}\n unique_labels = np.unique(labels)\n for i, v in enumerate(unique_labels):\n color_dict[v] = cmap(i / len(unique_labels))\n colors = [color_dict[l] for l in labels]\n return colors\n\n\ndef radar_chart(data, labels, show_axis=False, fill_polygon=False):\n theta = radar_factory(len(data[0]), frame='circle')\n colors = labels_to_colors(labels)\n fig, ax = plt.subplots(figsize=(5, 5), subplot_kw=dict(projection=\n 'radar'), facecolor='white')\n ax.axis('on' if show_axis else 'off')\n fig.subplots_adjust(wspace=0.25, hspace=0.2, top=0.85, bottom=0.05)\n for record, color in zip(data, colors):\n ax.plot(theta, record, color=color)\n if fill_polygon:\n ax.fill(theta, record, facecolor=color, alpha=0.25)\n return fig, ax\n", "step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\nfrom matplotlib.spines import Spine\nfrom matplotlib.projections.polar import PolarAxes\nfrom matplotlib.projections import register_projection\n\n\ndef radar_factory(num_vars, frame='circle'):\n theta = np.linspace(0, 2 * np.pi, num_vars, endpoint=False)\n theta += np.pi / 2\n\n def draw_poly_patch(self):\n verts = unit_poly_verts(theta)\n return plt.Polygon(verts, closed=True, edgecolor='k')\n\n def draw_circle_patch(self):\n return plt.Circle((0.5, 0.5), 0.5)\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\n if frame not in patch_dict:\n raise ValueError('unknown value for `frame`: %s' % frame)\n\n\n class RadarAxes(PolarAxes):\n name = 'radar'\n RESOLUTION = 1\n draw_patch = patch_dict[frame]\n\n def fill(self, *args, **kwargs):\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(*args, closed=closed, **kwargs)\n\n def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)\n\n def _close_line(self, line):\n x, y = line.get_data()\n if x[0] != x[-1]:\n x = np.concatenate((x, [x[0]]))\n y = np.concatenate((y, [y[0]]))\n line.set_data(x, y)\n\n def set_varlabels(self, labels):\n self.set_thetagrids(np.degrees(theta), labels)\n\n def _gen_axes_patch(self):\n return self.draw_patch()\n\n def _gen_axes_spines(self):\n if frame == 'circle':\n return PolarAxes._gen_axes_spines(self)\n spine_type = 'circle'\n verts = unit_poly_verts(theta)\n verts.append(verts[0])\n path = Path(verts)\n spine = Spine(self, spine_type, path)\n spine.set_transform(self.transAxes)\n return {'polar': spine}\n register_projection(RadarAxes)\n return theta\n\n\ndef unit_poly_verts(theta):\n x0, y0, r = [0.5] * 3\n verts = [(r * np.cos(t) + x0, r * np.sin(t) + y0) for t in theta]\n return verts\n\n\ndef labels_to_colors(labels):\n cmap = plt.get_cmap('viridis')\n color_dict = {}\n unique_labels = np.unique(labels)\n for i, v in enumerate(unique_labels):\n color_dict[v] = cmap(i / len(unique_labels))\n colors = [color_dict[l] for l in labels]\n return colors\n\n\ndef radar_chart(data, labels, show_axis=False, fill_polygon=False):\n theta = radar_factory(len(data[0]), frame='circle')\n colors = labels_to_colors(labels)\n fig, ax = plt.subplots(figsize=(5, 5), subplot_kw=dict(projection=\n 'radar'), facecolor='white')\n ax.axis('on' if show_axis else 'off')\n fig.subplots_adjust(wspace=0.25, hspace=0.2, top=0.85, bottom=0.05)\n for record, color in zip(data, colors):\n ax.plot(theta, record, color=color)\n if fill_polygon:\n ax.fill(theta, record, facecolor=color, alpha=0.25)\n return fig, ax\n", "step-5": "## adapted from https://matplotlib.org/examples/api/radar_chart.html\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\nfrom matplotlib.spines import Spine\nfrom matplotlib.projections.polar import PolarAxes\nfrom matplotlib.projections import register_projection\n\n\ndef radar_factory(num_vars, frame='circle'):\n theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)\n theta += np.pi/2\n def draw_poly_patch(self):\n verts = unit_poly_verts(theta)\n return plt.Polygon(verts, closed=True, edgecolor='k')\n def draw_circle_patch(self):\n return plt.Circle((0.5, 0.5), 0.5)\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\n if frame not in patch_dict:\n raise ValueError('unknown value for `frame`: %s' % frame)\n class RadarAxes(PolarAxes):\n name = 'radar'\n RESOLUTION = 1\n draw_patch = patch_dict[frame]\n def fill(self, *args, **kwargs):\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)\n def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)\n def _close_line(self, line):\n x, y = line.get_data()\n if x[0] != x[-1]:\n x = np.concatenate((x, [x[0]]))\n y = np.concatenate((y, [y[0]]))\n line.set_data(x, y)\n def set_varlabels(self, labels):\n self.set_thetagrids(np.degrees(theta), labels)\n def _gen_axes_patch(self):\n return self.draw_patch()\n def _gen_axes_spines(self):\n if frame == 'circle':\n return PolarAxes._gen_axes_spines(self)\n spine_type = 'circle'\n verts = unit_poly_verts(theta)\n verts.append(verts[0])\n path = Path(verts)\n spine = Spine(self, spine_type, path)\n spine.set_transform(self.transAxes)\n return {'polar': spine}\n register_projection(RadarAxes)\n return theta\n\n\ndef unit_poly_verts(theta):\n x0, y0, r = [0.5] * 3\n verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]\n return verts\n\ndef labels_to_colors(labels):\n cmap = plt.get_cmap('viridis')\n color_dict = {}\n unique_labels = np.unique(labels)\n for i, v in enumerate(unique_labels):\n color_dict[v] = cmap(i / len(unique_labels))\n colors = [color_dict[l] for l in labels]\n return colors\n\ndef radar_chart(data, labels, show_axis=False, fill_polygon=False):\n theta = radar_factory(len(data[0]), frame='circle')\n colors = labels_to_colors(labels)\n fig, ax = plt.subplots(figsize=(5,5), subplot_kw=dict(projection='radar'), facecolor='white')\n ax.axis('on' if show_axis else 'off')\n fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.85, bottom=0.05)\n for record, color in zip(data, colors):\n ax.plot(theta, record, color=color)\n if fill_polygon:\n ax.fill(theta, record, facecolor=color, alpha=0.25)\n return fig, ax\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from db_connector import insert_item_details, insert_user_details from Item_details import ItemDetails def mechant_service(user_id): print('================================') print('Merchant Page') print('================================') heading='=============================================\nenter your choice:\n1. Create item \n2. View item \n3. View order list \n4. Accept \n5. logout \n============================================= \n' int_value='' while int_value!=1 and int_value!=2 and int_value!=3 and int_value!=4 and int_value!=5: result=input(heading) try: int_value=int(result) if int_value==1: create_item(user_id) int_value='' elif int_value==2: print('view item') int_value='' elif int_value==3: print('View order list') int_value='' elif int_value==4: print('Accept') int_value='' elif int_value==5: print('logout successfully') return except Exception as e: print(e) print('\n\ninvalid input') def create_item(user_id): flag=False while flag==False: product_name=input('Enter the name of the product : ') flag=validate_product_name(product_name) flag=False while flag==False: price=input('Enter the price : ') flag=validate_product_price(price) flag=False while flag==False: qty=input('Enter the qty : ') flag=validate_product_quantity(qty) item_detail=ItemDetails(product_name,price,qty,user_id) insert_item_details(item_detail) print('successfully created') def validate_product_name(name): if name.isalpha(): if len(name) > 3 and len(name) <=10: return True else: print('name should be more than 3 and less than 10 letters') return False else: print("name should contain only alphanumeric") return False def validate_product_price(price): if price.isnumeric(): return True else: print("price should contain only numeric") return False def validate_product_quantity(qty): if qty.isnumeric(): return True else: print("qty should contain only numeric") return False
normal
{ "blob_id": "d5dae7ab6eb34c82ae795730ecae666c4f81f10a", "index": 4160, "step-1": "<mask token>\n\n\ndef create_item(user_id):\n flag = False\n while flag == False:\n product_name = input('Enter the name of the product : ')\n flag = validate_product_name(product_name)\n flag = False\n while flag == False:\n price = input('Enter the price : ')\n flag = validate_product_price(price)\n flag = False\n while flag == False:\n qty = input('Enter the qty : ')\n flag = validate_product_quantity(qty)\n item_detail = ItemDetails(product_name, price, qty, user_id)\n insert_item_details(item_detail)\n print('successfully created')\n\n\n<mask token>\n\n\ndef validate_product_quantity(qty):\n if qty.isnumeric():\n return True\n else:\n print('qty should contain only numeric')\n return False\n", "step-2": "<mask token>\n\n\ndef mechant_service(user_id):\n print('================================')\n print('Merchant Page')\n print('================================')\n heading = \"\"\"=============================================\nenter your choice:\n1. Create item \n2. View item \n3. View order list \n4. Accept \n5. logout \n============================================= \n\"\"\"\n int_value = ''\n while (int_value != 1 and int_value != 2 and int_value != 3 and \n int_value != 4 and int_value != 5):\n result = input(heading)\n try:\n int_value = int(result)\n if int_value == 1:\n create_item(user_id)\n int_value = ''\n elif int_value == 2:\n print('view item')\n int_value = ''\n elif int_value == 3:\n print('View order list')\n int_value = ''\n elif int_value == 4:\n print('Accept')\n int_value = ''\n elif int_value == 5:\n print('logout successfully')\n return\n except Exception as e:\n print(e)\n print('\\n\\ninvalid input')\n\n\ndef create_item(user_id):\n flag = False\n while flag == False:\n product_name = input('Enter the name of the product : ')\n flag = validate_product_name(product_name)\n flag = False\n while flag == False:\n price = input('Enter the price : ')\n flag = validate_product_price(price)\n flag = False\n while flag == False:\n qty = input('Enter the qty : ')\n flag = validate_product_quantity(qty)\n item_detail = ItemDetails(product_name, price, qty, user_id)\n insert_item_details(item_detail)\n print('successfully created')\n\n\n<mask token>\n\n\ndef validate_product_price(price):\n if price.isnumeric():\n return True\n else:\n print('price should contain only numeric')\n return False\n\n\ndef validate_product_quantity(qty):\n if qty.isnumeric():\n return True\n else:\n print('qty should contain only numeric')\n return False\n", "step-3": "<mask token>\n\n\ndef mechant_service(user_id):\n print('================================')\n print('Merchant Page')\n print('================================')\n heading = \"\"\"=============================================\nenter your choice:\n1. Create item \n2. View item \n3. View order list \n4. Accept \n5. logout \n============================================= \n\"\"\"\n int_value = ''\n while (int_value != 1 and int_value != 2 and int_value != 3 and \n int_value != 4 and int_value != 5):\n result = input(heading)\n try:\n int_value = int(result)\n if int_value == 1:\n create_item(user_id)\n int_value = ''\n elif int_value == 2:\n print('view item')\n int_value = ''\n elif int_value == 3:\n print('View order list')\n int_value = ''\n elif int_value == 4:\n print('Accept')\n int_value = ''\n elif int_value == 5:\n print('logout successfully')\n return\n except Exception as e:\n print(e)\n print('\\n\\ninvalid input')\n\n\ndef create_item(user_id):\n flag = False\n while flag == False:\n product_name = input('Enter the name of the product : ')\n flag = validate_product_name(product_name)\n flag = False\n while flag == False:\n price = input('Enter the price : ')\n flag = validate_product_price(price)\n flag = False\n while flag == False:\n qty = input('Enter the qty : ')\n flag = validate_product_quantity(qty)\n item_detail = ItemDetails(product_name, price, qty, user_id)\n insert_item_details(item_detail)\n print('successfully created')\n\n\ndef validate_product_name(name):\n if name.isalpha():\n if len(name) > 3 and len(name) <= 10:\n return True\n else:\n print('name should be more than 3 and less than 10 letters')\n return False\n else:\n print('name should contain only alphanumeric')\n return False\n\n\ndef validate_product_price(price):\n if price.isnumeric():\n return True\n else:\n print('price should contain only numeric')\n return False\n\n\ndef validate_product_quantity(qty):\n if qty.isnumeric():\n return True\n else:\n print('qty should contain only numeric')\n return False\n", "step-4": "from db_connector import insert_item_details, insert_user_details\nfrom Item_details import ItemDetails\n\n\ndef mechant_service(user_id):\n print('================================')\n print('Merchant Page')\n print('================================')\n heading = \"\"\"=============================================\nenter your choice:\n1. Create item \n2. View item \n3. View order list \n4. Accept \n5. logout \n============================================= \n\"\"\"\n int_value = ''\n while (int_value != 1 and int_value != 2 and int_value != 3 and \n int_value != 4 and int_value != 5):\n result = input(heading)\n try:\n int_value = int(result)\n if int_value == 1:\n create_item(user_id)\n int_value = ''\n elif int_value == 2:\n print('view item')\n int_value = ''\n elif int_value == 3:\n print('View order list')\n int_value = ''\n elif int_value == 4:\n print('Accept')\n int_value = ''\n elif int_value == 5:\n print('logout successfully')\n return\n except Exception as e:\n print(e)\n print('\\n\\ninvalid input')\n\n\ndef create_item(user_id):\n flag = False\n while flag == False:\n product_name = input('Enter the name of the product : ')\n flag = validate_product_name(product_name)\n flag = False\n while flag == False:\n price = input('Enter the price : ')\n flag = validate_product_price(price)\n flag = False\n while flag == False:\n qty = input('Enter the qty : ')\n flag = validate_product_quantity(qty)\n item_detail = ItemDetails(product_name, price, qty, user_id)\n insert_item_details(item_detail)\n print('successfully created')\n\n\ndef validate_product_name(name):\n if name.isalpha():\n if len(name) > 3 and len(name) <= 10:\n return True\n else:\n print('name should be more than 3 and less than 10 letters')\n return False\n else:\n print('name should contain only alphanumeric')\n return False\n\n\ndef validate_product_price(price):\n if price.isnumeric():\n return True\n else:\n print('price should contain only numeric')\n return False\n\n\ndef validate_product_quantity(qty):\n if qty.isnumeric():\n return True\n else:\n print('qty should contain only numeric')\n return False\n", "step-5": "from db_connector import insert_item_details, insert_user_details\nfrom Item_details import ItemDetails\n\n\ndef mechant_service(user_id):\n print('================================')\n print('Merchant Page')\n print('================================')\n heading='=============================================\\nenter your choice:\\n1. Create item \\n2. View item \\n3. View order list \\n4. Accept \\n5. logout \\n============================================= \\n'\n int_value=''\n while int_value!=1 and int_value!=2 and int_value!=3 and int_value!=4 and int_value!=5:\n result=input(heading)\n try:\n int_value=int(result)\n if int_value==1:\n create_item(user_id)\n int_value=''\n elif int_value==2:\n print('view item')\n int_value=''\n elif int_value==3:\n print('View order list')\n int_value=''\n elif int_value==4:\n print('Accept')\n int_value=''\n elif int_value==5:\n print('logout successfully')\n return \n except Exception as e:\n print(e)\n print('\\n\\ninvalid input') \n\ndef create_item(user_id):\n flag=False\n while flag==False:\n product_name=input('Enter the name of the product : ')\n flag=validate_product_name(product_name)\n \n flag=False\n while flag==False:\n price=input('Enter the price : ')\n flag=validate_product_price(price)\n\n flag=False\n while flag==False:\n qty=input('Enter the qty : ')\n flag=validate_product_quantity(qty) \n\n\n item_detail=ItemDetails(product_name,price,qty,user_id) \n insert_item_details(item_detail) \n print('successfully created')\n \n\ndef validate_product_name(name):\n if name.isalpha():\n if len(name) > 3 and len(name) <=10:\n return True\n else:\n print('name should be more than 3 and less than 10 letters') \n return False \n else:\n print(\"name should contain only alphanumeric\")\n return False \n\ndef validate_product_price(price):\n if price.isnumeric():\n return True\n else:\n print(\"price should contain only numeric\")\n return False \n\ndef validate_product_quantity(qty):\n if qty.isnumeric():\n return True\n else:\n print(\"qty should contain only numeric\")\n return False \n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
# -*- coding: utf-8 -*- from __future__ import absolute_import from .document import ParsedDocument,XmlDocument from .corenlp import StanfordCoreNLP from .annotation import KBPAnnMgr,ApfAnnMgr import os import codecs import sys from . import _list_files def _sequence_tag_bio(doc): outlines = u'' mentions= doc._annotate sentences = doc._text_spans for id,sentence in enumerate(sentences): tokens= sentence['tokens'] tok_num = len(tokens) mention_tags = ['O']* tok_num coref_tags = ['-']*tok_num for mention in mentions: if mention['sent_id'] != id: continue mention_tokens= mention['mention_tokens'] md_tag = mention['md_tag'] coref_tag = mention['coref_tag'] tokids=[] for token in mention_tokens: (sent, tok) = [int(d) for d in token.split('_')] if sent != id: print 'mention cross sentence at {}'.format(sentence['origin_text']) continue tokids.append(tok) for pos,tokid in enumerate(tokids): curr_md = md_tag curr_coref = coref_tag if pos ==0: curr_md = 'B-' + curr_md else: curr_md = 'I-' +curr_md if pos == 0: curr_coref = '(' + curr_coref if pos == len(tokids) -1: curr_coref = curr_coref + ')' if pos > 0 and pos < len(tokids) -1: curr_coref = '-' if mention_tags[tokid] == 'O': mention_tags[tokid] = curr_md coref_tags[tokid]= curr_coref source =[] target =[] for token,mention,coref in zip(tokens,mention_tags, coref_tags): token_feature= [token['word_lower'].replace(u'#',u'@'), token['word'].replace(u'#',u'@'), token['caps'].replace(u'#',u'@'), token['pos'].replace(u'#',u'@'), token['ner'].replace(u'#',u'@')] if token.has_key(u'comb-word'): token_feature.append( token[u'comb-word'].replace(u'#',u'@')) source.append('#'.join(token_feature)) target.append(mention) source = u' '.join(source) target = u' '.join(target) outlines += u'{}|||{} </s>\n'.format(source,target) return outlines def build_tree_tag(mentions, tok_num): mentions.sort(cmp = lambda x,y:cmp(x[0], y[0])) tag_out=[('X',[],[]) for i in range(tok_num)] for mention in mentions: (start,end, mtype)= mention tag_out[start][1].append('('+mtype) tag_out[end][2].append(')'+mtype) otags=[] for tag in tag_out: pre= ' '.join(tag[1]).strip() suc =' '.join(tag[2][::-1]).strip() if pre != '': otags.append(pre) otags.append(tag[0]) if suc != '': otags.append(suc) otags= ' '.join(otags) max_tag_num = max([len(x[1]) for x in tag_out]) if max_tag_num >1: print 'nested tag:{}'.format(otags) return otags def _sequence_tag_x(doc): outlines = u'' mentions= doc._annotate sentences = doc._text_spans for id,sentence in enumerate(sentences): tokens= sentence['tokens'] tok_num = len(tokens) curr_mentions = [] for mention in mentions: if mention['sent_id'] != id: continue mention_tokens= mention['mention_tokens'] md_tag = mention['md_tag'] tok_start= int(mention_tokens[0].split('_')[1]) tok_end = int(mention_tokens[-1].split('_')[1]) curr_mentions.append((tok_start,tok_end, md_tag)) target =build_tree_tag(curr_mentions, tok_num) source =[] for token in tokens: token_feature= [token['word_lower'].replace(u'#',u'@'), token['word'].replace(u'#',u'@'), token['caps'].replace(u'#',u'@'), token['pos'].replace(u'#',u'@'), token['ner'].replace(u'#',u'@')] if token.has_key(u'comb-word'): token_feature.append( token[u'comb-word'].replace(u'#',u'@')) source.append('#'.join(token_feature)) source = u' '.join(source) outlines += u'{}|||{} </s>\n'.format(source,target.decode('utf-8')) return outlines #in format 'BIO' will ignore all nested tags,in format 'XX' will build tree sequence def gen_sequence_tags(json_dir, outfile, fmt='BIO', encoding = 'utf-8'): fout= codecs.open(outfile, 'w', encoding= encoding) seqtag_func= None if fmt == 'BIO': seqtag_func= _sequence_tag_bio elif fmt =='XX': seqtag_func= _sequence_tag_x else: print 'unknown format {}'.format(fmt) return files = _list_files(json_dir, '.json') for f in files: print 'processing {}'.format(f) doc = ParsedDocument() doc.load(f) outlines = seqtag_func(doc) fout.write(outlines) fout.flush() fout.close()
normal
{ "blob_id": "80b8b77498f915a85185f829e8c7d5becdab8068", "index": 9286, "step-1": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom .document import ParsedDocument,XmlDocument\nfrom .corenlp import StanfordCoreNLP\nfrom .annotation import KBPAnnMgr,ApfAnnMgr\nimport os\nimport codecs\nimport sys\nfrom . import _list_files\ndef _sequence_tag_bio(doc):\n outlines = u''\n mentions= doc._annotate\n sentences = doc._text_spans\n for id,sentence in enumerate(sentences):\n tokens= sentence['tokens']\n tok_num = len(tokens)\n mention_tags = ['O']* tok_num\n coref_tags = ['-']*tok_num\n for mention in mentions:\n if mention['sent_id'] != id:\n continue\n mention_tokens= mention['mention_tokens']\n md_tag = mention['md_tag']\n coref_tag = mention['coref_tag']\n tokids=[]\n for token in mention_tokens:\n (sent, tok) = [int(d) for d in token.split('_')]\n if sent != id:\n print 'mention cross sentence at {}'.format(sentence['origin_text'])\n continue\n tokids.append(tok)\n for pos,tokid in enumerate(tokids):\n curr_md = md_tag\n curr_coref = coref_tag\n if pos ==0:\n curr_md = 'B-' + curr_md\n else:\n curr_md = 'I-' +curr_md\n if pos == 0:\n curr_coref = '(' + curr_coref\n if pos == len(tokids) -1:\n curr_coref = curr_coref + ')'\n if pos > 0 and pos < len(tokids) -1:\n curr_coref = '-'\n if mention_tags[tokid] == 'O':\n mention_tags[tokid] = curr_md\n coref_tags[tokid]= curr_coref\n source =[]\n target =[]\n for token,mention,coref in zip(tokens,mention_tags, coref_tags):\n token_feature= [token['word_lower'].replace(u'#',u'@'), token['word'].replace(u'#',u'@'),\n token['caps'].replace(u'#',u'@'), token['pos'].replace(u'#',u'@'),\n token['ner'].replace(u'#',u'@')]\n if token.has_key(u'comb-word'):\n token_feature.append( token[u'comb-word'].replace(u'#',u'@'))\n source.append('#'.join(token_feature))\n target.append(mention)\n source = u' '.join(source)\n target = u' '.join(target)\n outlines += u'{}|||{} </s>\\n'.format(source,target)\n return outlines\n\ndef build_tree_tag(mentions, tok_num):\n mentions.sort(cmp = lambda x,y:cmp(x[0], y[0]))\n tag_out=[('X',[],[]) for i in range(tok_num)]\n for mention in mentions:\n (start,end, mtype)= mention\n tag_out[start][1].append('('+mtype)\n tag_out[end][2].append(')'+mtype)\n otags=[]\n for tag in tag_out:\n pre= ' '.join(tag[1]).strip()\n suc =' '.join(tag[2][::-1]).strip()\n if pre != '':\n otags.append(pre)\n otags.append(tag[0])\n if suc != '':\n otags.append(suc)\n otags= ' '.join(otags)\n max_tag_num = max([len(x[1]) for x in tag_out])\n if max_tag_num >1:\n print 'nested tag:{}'.format(otags)\n \n return otags\n \ndef _sequence_tag_x(doc):\n outlines = u''\n mentions= doc._annotate\n sentences = doc._text_spans\n for id,sentence in enumerate(sentences):\n tokens= sentence['tokens']\n tok_num = len(tokens)\n curr_mentions = []\n for mention in mentions:\n if mention['sent_id'] != id:\n continue\n mention_tokens= mention['mention_tokens']\n md_tag = mention['md_tag']\n \n tok_start= int(mention_tokens[0].split('_')[1])\n tok_end = int(mention_tokens[-1].split('_')[1])\n curr_mentions.append((tok_start,tok_end, md_tag))\n \n target =build_tree_tag(curr_mentions, tok_num)\n source =[]\n for token in tokens:\n token_feature= [token['word_lower'].replace(u'#',u'@'), token['word'].replace(u'#',u'@'),\n token['caps'].replace(u'#',u'@'), token['pos'].replace(u'#',u'@'),\n token['ner'].replace(u'#',u'@')]\n if token.has_key(u'comb-word'):\n token_feature.append( token[u'comb-word'].replace(u'#',u'@'))\n source.append('#'.join(token_feature))\n source = u' '.join(source)\n \n outlines += u'{}|||{} </s>\\n'.format(source,target.decode('utf-8'))\n return outlines \n\n#in format 'BIO' will ignore all nested tags,in format 'XX' will build tree sequence\ndef gen_sequence_tags(json_dir, outfile, fmt='BIO', encoding = 'utf-8'):\n fout= codecs.open(outfile, 'w', encoding= encoding)\n seqtag_func= None\n if fmt == 'BIO':\n seqtag_func= _sequence_tag_bio\n elif fmt =='XX':\n seqtag_func= _sequence_tag_x\n else:\n print 'unknown format {}'.format(fmt)\n return\n files = _list_files(json_dir, '.json')\n for f in files:\n print 'processing {}'.format(f)\n doc = ParsedDocument()\n doc.load(f)\n outlines = seqtag_func(doc)\n fout.write(outlines)\n fout.flush()\n fout.close()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
""" Urls for CAE_Web Audio_Visual app. """ from django.conf.urls import url from . import views app_name = 'cae_web_audio_visual' urlpatterns = [ ]
normal
{ "blob_id": "5debc97e99bbd78b17e545896d718d4b0eac8519", "index": 2430, "step-1": "<mask token>\n", "step-2": "<mask token>\napp_name = 'cae_web_audio_visual'\nurlpatterns = []\n", "step-3": "<mask token>\nfrom django.conf.urls import url\nfrom . import views\napp_name = 'cae_web_audio_visual'\nurlpatterns = []\n", "step-4": "\"\"\"\nUrls for CAE_Web Audio_Visual app.\n\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\n\napp_name = 'cae_web_audio_visual'\nurlpatterns = [\n\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# coding: utf-8 # # Read Bathy data from ERDDAP # In[ ]: get_ipython().system(u'conda install basemap --yes') # In[1]: import numpy as np import matplotlib.pyplot as plt import urllib import netCDF4 from mpl_toolkits.basemap import Basemap # In[2]: # Definine the domain of interest minlat = 42 maxlat = 45 minlon = -67 maxlon = -61.5 isub = 5 # Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html # using the netCDF output option base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?' query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon) url = base_url+query print url # In[3]: # store data in NetCDF file file='usgsCeSrtm30v6.nc' urllib.urlretrieve (url, file) # In[4]: # open NetCDF data in nc = netCDF4.Dataset(file) ncv = nc.variables print ncv.keys() # In[5]: lon = ncv['longitude'][:] lat = ncv['latitude'][:] lons, lats = np.meshgrid(lon,lat) topo = ncv['topo'][:,:] # In[ ]: # Create map m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat,llcrnrlon=minlon, urcrnrlon=maxlon,resolution='h') fig1 = plt.figure(figsize=(10,8)) cs = m.pcolormesh(lons,lats,topo,cmap=plt.cm.jet,latlon=True) m.drawcoastlines() m.drawmapboundary() plt.title('SMRT30 - Bathymetry/Topography') cbar = plt.colorbar(orientation='horizontal', extend='both') cbar.ax.set_xlabel('meters') # Save figure (without 'white' borders) plt.savefig('topo.png', bbox_inches='tight')
normal
{ "blob_id": "6d0340a08701b0c4f34e9b833bca27cf455d682d", "index": 827, "step-1": "\n# coding: utf-8\n\n# # Read Bathy data from ERDDAP\n\n# In[ ]:\n\nget_ipython().system(u'conda install basemap --yes')\n\n\n# In[1]:\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport urllib\nimport netCDF4\nfrom mpl_toolkits.basemap import Basemap\n\n\n# In[2]:\n\n# Definine the domain of interest\nminlat = 42\nmaxlat = 45\nminlon = -67\nmaxlon = -61.5\nisub = 5\n \n# Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html\n# using the netCDF output option\nbase_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?'\nquery='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon)\nurl = base_url+query\nprint url\n\n\n# In[3]:\n\n# store data in NetCDF file\nfile='usgsCeSrtm30v6.nc'\nurllib.urlretrieve (url, file)\n\n\n# In[4]:\n\n# open NetCDF data in \nnc = netCDF4.Dataset(file)\nncv = nc.variables\nprint ncv.keys()\n\n\n# In[5]:\n\nlon = ncv['longitude'][:]\nlat = ncv['latitude'][:]\nlons, lats = np.meshgrid(lon,lat)\ntopo = ncv['topo'][:,:]\n\n\n# In[ ]:\n\n# Create map\nm = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat,llcrnrlon=minlon, urcrnrlon=maxlon,resolution='h')\nfig1 = plt.figure(figsize=(10,8))\ncs = m.pcolormesh(lons,lats,topo,cmap=plt.cm.jet,latlon=True)\nm.drawcoastlines()\nm.drawmapboundary()\nplt.title('SMRT30 - Bathymetry/Topography')\ncbar = plt.colorbar(orientation='horizontal', extend='both')\ncbar.ax.set_xlabel('meters')\n \n# Save figure (without 'white' borders)\nplt.savefig('topo.png', bbox_inches='tight')\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import numpy as np import sympy as sp from copy import copy from typing import Any, get_type_hints, Dict from inspect import getclosurevars, getsource, getargs import ast from ast import parse, get_source_segment from .numpy import NumPy from .torch import torch_defs defines = {} defines.update(torch_defs) def check_type(item, target): assert item == target def exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any], ret: Any): def get_value(v): if isinstance(v, ast.BinOp): a = get_value(v.left) b = get_value(v.right) return a elif isinstance(v, ast.Name): return loc.get(v.id) elif isinstance(v, ast.Call): args = [get_value(a) for a in v.args] func = loc.get(v.func.id, None) or glob.get(v.func.id, None) return func(*args) elif isinstance(v, ast.List): return [get_value(e) for e in v.elts] elif isinstance(v, ast.Constant): return v.value seg = get_source_segment(source, v) return eval(seg, glob, loc) for line in body: if isinstance(line, ast.Return): value = get_value(line.value) check_type(value, ret) elif isinstance(line, ast.If): loc1, loc2 = copy(loc), copy(loc) exec_lines(source, line.body, loc1, glob, ret) exec_lines(source, line.orelse, loc2, glob, ret) elif isinstance(line, ast.Assign): value = get_value(line.value) t = line.targets else: exec(get_source_segment(source, line), glob, loc) def check(func): args = getargs(func.__code__) hints = get_type_hints(func) cv = getclosurevars(func) loc_vars = {n: Any for n in args.args} ret = hints.pop('return') if 'return' in hints else None loc_vars.update(hints) glob_vars = {} for k, v in cv.globals.items(): if v is np: glob_vars[k] = NumPy() else: glob_vars[k] = defines.get(v, None) or v source = getsource(func) f_ast = parse(source).body[0] body = f_ast.body exec_lines(source, body, loc_vars, glob_vars, ret) defines[func] = 1 return func
normal
{ "blob_id": "430b5ca7212983743cadc36a2ada987bb721174a", "index": 3537, "step-1": "<mask token>\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n", "step-2": "<mask token>\ndefines.update(torch_defs)\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n", "step-3": "<mask token>\ndefines = {}\ndefines.update(torch_defs)\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n", "step-4": "import numpy as np\nimport sympy as sp\nfrom copy import copy\nfrom typing import Any, get_type_hints, Dict\nfrom inspect import getclosurevars, getsource, getargs\nimport ast\nfrom ast import parse, get_source_segment\nfrom .numpy import NumPy\nfrom .torch import torch_defs\ndefines = {}\ndefines.update(torch_defs)\n\n\ndef check_type(item, target):\n assert item == target\n\n\ndef exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any],\n ret: Any):\n\n def get_value(v):\n if isinstance(v, ast.BinOp):\n a = get_value(v.left)\n b = get_value(v.right)\n return a\n elif isinstance(v, ast.Name):\n return loc.get(v.id)\n elif isinstance(v, ast.Call):\n args = [get_value(a) for a in v.args]\n func = loc.get(v.func.id, None) or glob.get(v.func.id, None)\n return func(*args)\n elif isinstance(v, ast.List):\n return [get_value(e) for e in v.elts]\n elif isinstance(v, ast.Constant):\n return v.value\n seg = get_source_segment(source, v)\n return eval(seg, glob, loc)\n for line in body:\n if isinstance(line, ast.Return):\n value = get_value(line.value)\n check_type(value, ret)\n elif isinstance(line, ast.If):\n loc1, loc2 = copy(loc), copy(loc)\n exec_lines(source, line.body, loc1, glob, ret)\n exec_lines(source, line.orelse, loc2, glob, ret)\n elif isinstance(line, ast.Assign):\n value = get_value(line.value)\n t = line.targets\n else:\n exec(get_source_segment(source, line), glob, loc)\n\n\ndef check(func):\n args = getargs(func.__code__)\n hints = get_type_hints(func)\n cv = getclosurevars(func)\n loc_vars = {n: Any for n in args.args}\n ret = hints.pop('return') if 'return' in hints else None\n loc_vars.update(hints)\n glob_vars = {}\n for k, v in cv.globals.items():\n if v is np:\n glob_vars[k] = NumPy()\n else:\n glob_vars[k] = defines.get(v, None) or v\n source = getsource(func)\n f_ast = parse(source).body[0]\n body = f_ast.body\n exec_lines(source, body, loc_vars, glob_vars, ret)\n defines[func] = 1\n return func\n", "step-5": null, "step-ids": [ 3, 4, 5, 6 ] }
[ 3, 4, 5, 6 ]
import sys, time from machine import Pin print('LOAD: blinker.py') def blink_connected_to_wifi(pin=23): _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]]) def blink_not_connected_to_wifi(pin=23): _blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, 0.5, 0.5]]) # pin - the pin, connected to LED # pattern - the array of items: [blink_count, on-period, off-period] def _blink_pattern(pin, pattern): p = Pin(pin, Pin.OUT) try: for item in pattern: for j in range(item[0]): p.value(1) time.sleep(item[1]) p.value(0) time.sleep(item[2]) except: p.value(0) Pin(pin, Pin.IN)
normal
{ "blob_id": "c0bd060990d00ab50c9f2d3060b7f975ff16e1ab", "index": 4105, "step-1": "<mask token>\n\n\ndef blink_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])\n\n\n<mask token>\n\n\ndef _blink_pattern(pin, pattern):\n p = Pin(pin, Pin.OUT)\n try:\n for item in pattern:\n for j in range(item[0]):\n p.value(1)\n time.sleep(item[1])\n p.value(0)\n time.sleep(item[2])\n except:\n p.value(0)\n Pin(pin, Pin.IN)\n", "step-2": "<mask token>\n\n\ndef blink_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])\n\n\ndef blink_not_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, \n 0.5, 0.5]])\n\n\ndef _blink_pattern(pin, pattern):\n p = Pin(pin, Pin.OUT)\n try:\n for item in pattern:\n for j in range(item[0]):\n p.value(1)\n time.sleep(item[1])\n p.value(0)\n time.sleep(item[2])\n except:\n p.value(0)\n Pin(pin, Pin.IN)\n", "step-3": "<mask token>\nprint('LOAD: blinker.py')\n\n\ndef blink_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])\n\n\ndef blink_not_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, \n 0.5, 0.5]])\n\n\ndef _blink_pattern(pin, pattern):\n p = Pin(pin, Pin.OUT)\n try:\n for item in pattern:\n for j in range(item[0]):\n p.value(1)\n time.sleep(item[1])\n p.value(0)\n time.sleep(item[2])\n except:\n p.value(0)\n Pin(pin, Pin.IN)\n", "step-4": "import sys, time\nfrom machine import Pin\nprint('LOAD: blinker.py')\n\n\ndef blink_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])\n\n\ndef blink_not_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, \n 0.5, 0.5]])\n\n\ndef _blink_pattern(pin, pattern):\n p = Pin(pin, Pin.OUT)\n try:\n for item in pattern:\n for j in range(item[0]):\n p.value(1)\n time.sleep(item[1])\n p.value(0)\n time.sleep(item[2])\n except:\n p.value(0)\n Pin(pin, Pin.IN)\n", "step-5": "import sys, time\nfrom machine import Pin\n\nprint('LOAD: blinker.py')\n\n\ndef blink_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[3, 0.5, 0.5], [4, 0.2, 0.2]])\n\n\ndef blink_not_connected_to_wifi(pin=23):\n _blink_pattern(pin, [[2, 0.2, 0.2], [1, 0.5, 0.5], [2, 0.2, 0.2], [1, 0.5, 0.5]])\n\n# pin - the pin, connected to LED\n# pattern - the array of items: [blink_count, on-period, off-period]\ndef _blink_pattern(pin, pattern):\n p = Pin(pin, Pin.OUT)\n try:\n for item in pattern:\n for j in range(item[0]):\n p.value(1)\n time.sleep(item[1])\n p.value(0)\n time.sleep(item[2])\n except:\n p.value(0)\n Pin(pin, Pin.IN)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import itertools import numpy as np import pandas as pd from scipy.sparse import coo_matrix def merge_and_split(inputs, labels): df = inputs.reset_index().merge(labels.reset_index(), on='utterance', how='inner').set_index('utterance') return df.feat, df.label def list_to_sparse(inputs): """Convert list of lists into scipy coo matrix. """ data = list(itertools.chain(*inputs)) row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in enumerate(inputs)])) col = list(itertools.chain(*[range(len(x)) for x in inputs])) s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for x in inputs]))) return s class BatchGenerator(object): def __init__(self, data, batch_size=1): self.inputs, self.labels = data self.batch_size = batch_size self.data_length = len(self.inputs) self.sequence_length = np.array([x.shape[0] for x in self.inputs]) def next_batch(self): self._suffle() start = 0 end = 0 batch_size, data_length = self.batch_size, self.data_length while end != data_length: end += batch_size end = data_length if end >= data_length else end yield self._get(start, end) start = end def _suffle(self): permutation = np.random.permutation(self.data_length) self.inputs = self.inputs[permutation] self.labels = self.labels[permutation] self.sequence_length = self.sequence_length[permutation] def _get(self, start, end): sequence_length = self.sequence_length[start:end] batch_sequence_length = np.max(sequence_length) inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length - len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]] ) labels = list_to_sparse(self.labels[start:end]) return inputs, labels, sequence_length
normal
{ "blob_id": "912928cea0f96e601eecfcb6dba695ef26a3c6e2", "index": 9618, "step-1": "<mask token>\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n", "step-2": "<mask token>\n\n\ndef list_to_sparse(inputs):\n \"\"\"Convert list of lists into scipy coo matrix.\n \"\"\"\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in\n enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for\n x in inputs])))\n return s\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n", "step-3": "<mask token>\n\n\ndef merge_and_split(inputs, labels):\n df = inputs.reset_index().merge(labels.reset_index(), on='utterance',\n how='inner').set_index('utterance')\n return df.feat, df.label\n\n\ndef list_to_sparse(inputs):\n \"\"\"Convert list of lists into scipy coo matrix.\n \"\"\"\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in\n enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for\n x in inputs])))\n return s\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n", "step-4": "import itertools\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import coo_matrix\n\n\ndef merge_and_split(inputs, labels):\n df = inputs.reset_index().merge(labels.reset_index(), on='utterance',\n how='inner').set_index('utterance')\n return df.feat, df.label\n\n\ndef list_to_sparse(inputs):\n \"\"\"Convert list of lists into scipy coo matrix.\n \"\"\"\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in\n enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for\n x in inputs])))\n return s\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n", "step-5": null, "step-ids": [ 5, 6, 7, 8 ] }
[ 5, 6, 7, 8 ]
""" Created on Fri Jan 07 20:53:58 2022 @author: Ankit Bharti """ from unittest import TestCase, main from cuboid_volume import * class TestCuboid(TestCase): def test_volume(self): self.assertAlmostEqual(cuboid_volume(2), 8) self.assertAlmostEqual(cuboid_volume(1), 1) self.assertAlmostEqual(cuboid_volume(0), 0) def test_input_value(self): self.assertRaises(TypeError, cuboid_volume, 'ank') def test_addition(self): self.assertEqual(add(3, 4), 7) self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2) def test_addition_input_value(self): self.assertRaises(TypeError, add, 'ank', 6) if __name__ == '__main__': main()
normal
{ "blob_id": "394f835064d070a30040b6f01b25b6f0e005827d", "index": 5010, "step-1": "<mask token>\n\n\nclass TestCuboid(TestCase):\n <mask token>\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n <mask token>\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TestCuboid(TestCase):\n\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TestCuboid(TestCase):\n\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "<mask token>\nfrom unittest import TestCase, main\nfrom cuboid_volume import *\n\n\nclass TestCuboid(TestCase):\n\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "\"\"\"\nCreated on Fri Jan 07 20:53:58 2022\n@author: Ankit Bharti\n\n\"\"\"\n\n\nfrom unittest import TestCase, main\nfrom cuboid_volume import *\n\n\nclass TestCuboid(TestCase):\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
people = 20 cats = 30 dogs = 15 if people < cats: print("Too many cats") elif people > cats: print("Not many cats") else: print("we cannnot decide")
normal
{ "blob_id": "0465e33d65c2ce47ebffeec38db6908826bf4934", "index": 299, "step-1": "<mask token>\n", "step-2": "<mask token>\nif people < cats:\n print('Too many cats')\nelif people > cats:\n print('Not many cats')\nelse:\n print('we cannnot decide')\n", "step-3": "people = 20\ncats = 30\ndogs = 15\nif people < cats:\n print('Too many cats')\nelif people > cats:\n print('Not many cats')\nelse:\n print('we cannnot decide')\n", "step-4": "people = 20\ncats = 30\ndogs = 15\n\nif people < cats:\n print(\"Too many cats\")\nelif people > cats:\n print(\"Not many cats\")\nelse:\n print(\"we cannnot decide\")", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
AuthorPath = 'data/Author.csv' PaperPath = 'buff/Paper.TitleCut.csv' PaperAuthorPath = 'data/PaperAuthor.csv' AffilListPath = 'buff/AffilList2.csv' StopwordPath = 'InternalData/en.lst'
normal
{ "blob_id": "690e7cc9047b3a445bf330524df52e2b359f1f13", "index": 958, "step-1": "<mask token>\n", "step-2": "AuthorPath = 'data/Author.csv'\nPaperPath = 'buff/Paper.TitleCut.csv'\nPaperAuthorPath = 'data/PaperAuthor.csv'\nAffilListPath = 'buff/AffilList2.csv'\nStopwordPath = 'InternalData/en.lst'\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
# (1) Obtain your values here (https://core.telegram.org/api/obtaining_api_id) api_id = 000000 api_hash = '00000000000000000000000' phone = '+000000000000' username = 'theone' project_id = 000000000
normal
{ "blob_id": "a5646a5d42dbf6e70e9d18f28513ee2df68a28b1", "index": 6886, "step-1": "<mask token>\n", "step-2": "api_id = 0\napi_hash = '00000000000000000000000'\nphone = '+000000000000'\nusername = 'theone'\nproject_id = 0\n", "step-3": "# (1) Obtain your values here (https://core.telegram.org/api/obtaining_api_id)\napi_id = 000000\napi_hash = '00000000000000000000000'\n\nphone = '+000000000000'\nusername = 'theone'\n\nproject_id = 000000000\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import os from flask import Flask, request, redirect, url_for, render_template, send_from_directory from werkzeug.utils import secure_filename import chardet as chardet import pandas as pd UPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/' DOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/' ALLOWED_EXTENSIONS = {'csv', 'txt'} app = Flask(__name__, static_url_path="/static") DIR_PATH = os.path.dirname(os.path.realpath(__file__)) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER # limit upload size upto 8mb app.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024 def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @app.route('/', methods=['GET', 'POST']) def index(): if request.method == 'POST': if 'file' not in request.files: print('No file attached in request') return redirect(request.url) file = request.files['file'] if file.filename == '': print('No file selected') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename), filename) return redirect(url_for('uploaded_file', filename=filename)) return render_template('index.html') def process_file(path, filename): check_encoding(path, filename) # with open(path, 'a') as f: # f.write("\nAdded processed content") def check_encoding(path, filename): with open(path, 'rb') as rawdata: result = chardet.detect(rawdata.read(10000)) df = pd.read_csv(path, encoding=result['encoding']) GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit('.', 1)[0] + '.xlsx') df.to_excel(GFG, index=False, encoding='utf-8') #output_stream = open(app.config['DOWNLOAD_FOLDER'] + 'output.xlsx', 'wb') #GFG.write(output_stream) GFG.save() @app.route('/uploads/<filename>') def uploaded_file(filename): return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.rsplit('.', 1)[0] + '.xlsx', as_attachment=True) if __name__ == '__main__': port = int(os.environ.get("PORT", 5000)) app.run(host='0.0.0.0', port=port)
normal
{ "blob_id": "eb17de8828a600832253c4cfeeb91503b6876dd7", "index": 9963, "step-1": "<mask token>\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if 'file' not in request.files:\n print('No file attached in request')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename\n ), filename)\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('index.html')\n\n\ndef process_file(path, filename):\n check_encoding(path, filename)\n\n\ndef check_encoding(path, filename):\n with open(path, 'rb') as rawdata:\n result = chardet.detect(rawdata.read(10000))\n df = pd.read_csv(path, encoding=result['encoding'])\n GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(\n '.', 1)[0] + '.xlsx')\n df.to_excel(GFG, index=False, encoding='utf-8')\n GFG.save()\n\n\[email protected]('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.\n rsplit('.', 1)[0] + '.xlsx', as_attachment=True)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if 'file' not in request.files:\n print('No file attached in request')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename\n ), filename)\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('index.html')\n\n\ndef process_file(path, filename):\n check_encoding(path, filename)\n\n\ndef check_encoding(path, filename):\n with open(path, 'rb') as rawdata:\n result = chardet.detect(rawdata.read(10000))\n df = pd.read_csv(path, encoding=result['encoding'])\n GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(\n '.', 1)[0] + '.xlsx')\n df.to_excel(GFG, index=False, encoding='utf-8')\n GFG.save()\n\n\[email protected]('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.\n rsplit('.', 1)[0] + '.xlsx', as_attachment=True)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n", "step-3": "<mask token>\nUPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/'\nDOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/'\nALLOWED_EXTENSIONS = {'csv', 'txt'}\napp = Flask(__name__, static_url_path='/static')\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if 'file' not in request.files:\n print('No file attached in request')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename\n ), filename)\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('index.html')\n\n\ndef process_file(path, filename):\n check_encoding(path, filename)\n\n\ndef check_encoding(path, filename):\n with open(path, 'rb') as rawdata:\n result = chardet.detect(rawdata.read(10000))\n df = pd.read_csv(path, encoding=result['encoding'])\n GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(\n '.', 1)[0] + '.xlsx')\n df.to_excel(GFG, index=False, encoding='utf-8')\n GFG.save()\n\n\[email protected]('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.\n rsplit('.', 1)[0] + '.xlsx', as_attachment=True)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n", "step-4": "import os\nfrom flask import Flask, request, redirect, url_for, render_template, send_from_directory\nfrom werkzeug.utils import secure_filename\nimport chardet as chardet\nimport pandas as pd\nUPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/'\nDOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/'\nALLOWED_EXTENSIONS = {'csv', 'txt'}\napp = Flask(__name__, static_url_path='/static')\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if 'file' not in request.files:\n print('No file attached in request')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename\n ), filename)\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('index.html')\n\n\ndef process_file(path, filename):\n check_encoding(path, filename)\n\n\ndef check_encoding(path, filename):\n with open(path, 'rb') as rawdata:\n result = chardet.detect(rawdata.read(10000))\n df = pd.read_csv(path, encoding=result['encoding'])\n GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit(\n '.', 1)[0] + '.xlsx')\n df.to_excel(GFG, index=False, encoding='utf-8')\n GFG.save()\n\n\[email protected]('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.\n rsplit('.', 1)[0] + '.xlsx', as_attachment=True)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n", "step-5": "import os\nfrom flask import Flask, request, redirect, url_for, render_template, send_from_directory\nfrom werkzeug.utils import secure_filename\nimport chardet as chardet\nimport pandas as pd\n\nUPLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/uploads/'\nDOWNLOAD_FOLDER = os.path.dirname(os.path.abspath(__file__)) + '/downloads/'\nALLOWED_EXTENSIONS = {'csv', 'txt'}\n\napp = Flask(__name__, static_url_path=\"/static\")\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER\n# limit upload size upto 8mb\napp.config['MAX_CONTENT_LENGTH'] = 8 * 1024 * 1024\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n if 'file' not in request.files:\n print('No file attached in request')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n process_file(os.path.join(app.config['UPLOAD_FOLDER'], filename), filename)\n return redirect(url_for('uploaded_file', filename=filename))\n return render_template('index.html')\n\n\ndef process_file(path, filename):\n check_encoding(path, filename)\n # with open(path, 'a') as f:\n # f.write(\"\\nAdded processed content\")\n\n\ndef check_encoding(path, filename):\n with open(path, 'rb') as rawdata:\n result = chardet.detect(rawdata.read(10000))\n df = pd.read_csv(path, encoding=result['encoding'])\n GFG = pd.ExcelWriter(app.config['DOWNLOAD_FOLDER'] + filename.rsplit('.', 1)[0] + '.xlsx')\n df.to_excel(GFG, index=False, encoding='utf-8')\n #output_stream = open(app.config['DOWNLOAD_FOLDER'] + 'output.xlsx', 'wb')\n #GFG.write(output_stream)\n GFG.save()\n\n \n\[email protected]('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['DOWNLOAD_FOLDER'], filename.rsplit('.', 1)[0] + '.xlsx', as_attachment=True)\n\n\nif __name__ == '__main__':\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
import zipfile, re f = zipfile.ZipFile("channel.zip") num = '90052' comments = [] while True: content = f.read(num + ".txt").decode("utf-8") print(content) comments.append(f.getinfo(num + ".txt").comment.decode("utf-8")) match = re.search("Next nothing is (\d+)", content) if match == None: break num = match.group(1) print("".join(comments)) url = "http://www.pythonchallenge.com/pc/def/hockey.html" print(url) # look at the letters that make the ascii art : they are : O makes h, x makes o, g makes k, e makes e, n makes y print("http://www.pythonchallenge.com/pc/def/oxygen.html")
normal
{ "blob_id": "b883e63c70f3dfeac3294989fab93c1331b6329c", "index": 7990, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile True:\n content = f.read(num + '.txt').decode('utf-8')\n print(content)\n comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))\n match = re.search('Next nothing is (\\\\d+)', content)\n if match == None:\n break\n num = match.group(1)\nprint(''.join(comments))\n<mask token>\nprint(url)\nprint('http://www.pythonchallenge.com/pc/def/oxygen.html')\n", "step-3": "<mask token>\nf = zipfile.ZipFile('channel.zip')\nnum = '90052'\ncomments = []\nwhile True:\n content = f.read(num + '.txt').decode('utf-8')\n print(content)\n comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))\n match = re.search('Next nothing is (\\\\d+)', content)\n if match == None:\n break\n num = match.group(1)\nprint(''.join(comments))\nurl = 'http://www.pythonchallenge.com/pc/def/hockey.html'\nprint(url)\nprint('http://www.pythonchallenge.com/pc/def/oxygen.html')\n", "step-4": "import zipfile, re\nf = zipfile.ZipFile('channel.zip')\nnum = '90052'\ncomments = []\nwhile True:\n content = f.read(num + '.txt').decode('utf-8')\n print(content)\n comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))\n match = re.search('Next nothing is (\\\\d+)', content)\n if match == None:\n break\n num = match.group(1)\nprint(''.join(comments))\nurl = 'http://www.pythonchallenge.com/pc/def/hockey.html'\nprint(url)\nprint('http://www.pythonchallenge.com/pc/def/oxygen.html')\n", "step-5": "import zipfile, re\n\nf = zipfile.ZipFile(\"channel.zip\")\nnum = '90052'\ncomments = []\n\nwhile True:\n content = f.read(num + \".txt\").decode(\"utf-8\")\n print(content)\n comments.append(f.getinfo(num + \".txt\").comment.decode(\"utf-8\"))\n match = re.search(\"Next nothing is (\\d+)\", content)\n if match == None:\n break\n num = match.group(1)\nprint(\"\".join(comments))\n\nurl = \"http://www.pythonchallenge.com/pc/def/hockey.html\"\nprint(url)\n# look at the letters that make the ascii art : they are : O makes h, x makes o, g makes k, e makes e, n makes y\n\nprint(\"http://www.pythonchallenge.com/pc/def/oxygen.html\")", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import turtle red = range(4); for i in red: turtle.forward(200) turtle.left(90) turtle.done()
normal
{ "blob_id": "38fceb57977cb792be1a63e8571cd222facdf656", "index": 1142, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in red:\n turtle.forward(200)\n turtle.left(90)\nturtle.done()\n", "step-3": "<mask token>\nred = range(4)\nfor i in red:\n turtle.forward(200)\n turtle.left(90)\nturtle.done()\n", "step-4": "import turtle\nred = range(4)\nfor i in red:\n turtle.forward(200)\n turtle.left(90)\nturtle.done()\n", "step-5": "import turtle\n\nred = range(4);\nfor i in red:\n\tturtle.forward(200)\n\tturtle.left(90)\n\nturtle.done()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from dynamic_rest import viewsets from django.shortcuts import render import os from rest_framework import permissions from rest_framework.response import Response from rest_framework.permissions import AllowAny from rest_framework.decorators import api_view, permission_classes, detail_route, list_route from rest_framework import mixins from rdkit import Chem import random from Bio.PDB.PDBParser import PDBParser import time from Bio.PDB.PDBIO import PDBIO import residues_scanning_command import prep_dock import threading from rosetta_workflow_all_scripts import rosetta_protein_prep, get_cst_file, change_pos, design_analysis import rosetta_workflow_all_scripts from django.utils.datastructures import MultiValueDictKeyError import multiprocessing as mul import time from models import SubmitParamter, Onlinedock from django.core.files import File from email.mime.text import MIMEText from email.header import Header from smtplib import SMTP_SSL from email.mime.multipart import MIMEMultipart from email import encoders from email.message import Message from email.mime.base import MIMEBase from dynamic_rest import viewsets import serializers from rest_framework.parsers import JSONParser from polls.serializers import SubmitParamsSerializer, OnlinedockSerializer from django.http import JsonResponse import zipfile import tempfile def send_file_zipped(the_file, recipients, email_content, sender='[email protected]'): zf = tempfile.TemporaryFile(prefix='mail', suffix='zip') zip = zipfile.ZipFile(zf, 'w') zip.write(the_file) zip.close() zf.seek(0) ### Create the message themsg = MIMEMultipart() themsg['Subject'] = 'File %s' % the_file themsg['To'] = ', '.join(recipients) themsg['From'] = sender themsg.attach(MIMEText(email_content, 'html', 'utf-8')) themsg.preamble = 'I am not using a MIME-aware mail reader.\n' msg = MIMEBase('application', 'zip') msg.set_payload(zf.read()) encoders.encode_base64(msg) msg.add_header('Content-Disposition', 'attachment', filename=the_file) themsg.attach(msg) themsg = themsg.as_string() ### Send the message import smtplib host_server = 'smtp.qq.com' sender_mail_addr = '[email protected]' pwd = 'utxfxpzcpsnzbbcc' smtp = SMTP_SSL(host_server) smtp.set_debuglevel(1) smtp.ehlo(host_server) smtp.login(sender_mail_addr, pwd) smtp.sendmail(sender, recipients, themsg) smtp.quit() # smtp = smtplib.SMTP() # smtp.connect() # smtp.sendmail(sender, recipients, themsg) # smtp.close() def get_pov_value(file): f = open(file) lines = f.readlines() f.close() for line in lines: if line.startswith('1'): value = float(line.split('|')[1].strip()) return value def main(job_name, mutation_radius, pov_radius, pH, mutation_info_list, protein, ligand_name, ligand_resseq, chain_id, email_addr): """ :param job_name: :param mutation_radius: :param pov_radius: :param pH: :param mutation_info_list: :param protein: :param ligand_name: :param ligand_resseq: :param chain_id: :return: """ current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) print current_time log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log') wild_protein_name = protein.name # job_dir = os.path.join(log_dir, job_name + '_' + current_time) job_dir = os.path.join(log_dir, job_name) if not os.path.exists(job_dir): os.mkdir(job_dir) wild_protein_file = os.path.join(job_dir, wild_protein_name) protein_str = protein.read() prep_dock.save_to_file(wild_protein_file, protein_str) prepare_protein_name = wild_protein_name.split('.')[0] + '_prep.pdb' ### Prepare protein prep_dock.prep_protein(wild_protein_name, prepare_protein_name, job_dir, pH) ### make mutation prep_dock.get_mut_protein(job_dir, job_name, mut_info_list=mutation_info_list, mutation_radius=mutation_radius, prepare_protein_name=prepare_protein_name) prepare_protein = os.path.join(job_dir, prepare_protein_name) mutation_protein_name = job_name + '_mut-2.pdb' mutation_protein = os.path.join(job_dir, mutation_protein_name) ### prep_pov prep_dock.get_pro_lig_povin((prepare_protein_name, prepare_protein, chain_id, ligand_resseq, ligand_name), pov_radius, protein_type='prep') prep_pov = os.path.join(job_dir, 'pov', 'prep', 'prep.log') ### mut_pov prep_dock.get_pro_lig_povin((mutation_protein_name, mutation_protein, chain_id, ligand_resseq, ligand_name), pov_radius, protein_type='mut') mut_pov = os.path.join(job_dir, 'pov', 'mut', 'mut.log') ### plip # prep_dock.get_plip_file(prepare_protein, mutation_protein) ### TMalign # prep_dock.TMalign(prepare_protein, mutation_protein) onlinedock, create = Onlinedock.objects.get_or_create(job_name=job_name) prep_protein_file = File(open(prepare_protein)) mut_protein_file = File(open(mutation_protein)) prep_pov_file = File(open(prep_pov)) mut_pov_file = File(open(mut_pov)) prep_pov_value = get_pov_value(prep_pov) mut_pov_value = get_pov_value(mut_pov) onlinedock.prep_protein.save(prepare_protein_name, prep_protein_file) onlinedock.mut_protein.save(mutation_protein_name, mut_protein_file) onlinedock.prep_pov.save('prep.log', prep_pov_file) onlinedock.mut_pov.save('mut.log', mut_pov_file) onlinedock.prep_pov_value = prep_pov_value onlinedock.mut_pov_value = mut_pov_value onlinedock.save() os.chdir(job_dir) os.system('zip related_info ' + prepare_protein + ' ' + mutation_protein + ' ' + prep_pov + ' ' + mut_pov) email_content = "Wellcome to Jianping Lin Group server~~" print(os.path.curdir) related_info = os.path.join(os.path.curdir, 'related_info.zip') send_file_zipped(related_info, email_addr, email_content=email_content) def test(): job_dir = '/home/jianping/django_test/longge/polls/log/1111/pov' # job_id = 0 @api_view(['POST']) @permission_classes([permissions.AllowAny]) def online_docking(request): # current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) # print current_time # log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log') job_name = request.data['job_name'] mutation_radius = request.data['mutation_radius'] ### mutation radius pov_radius = str(request.data['pov_radius']) ### povelty radius pH = request.data['pH'] mutation_info_list = request.data['mutation_info_list'] ### [chain, position, residue, ] protein = request.data['protein_file'] ligand_name = request.data['ligand_name'] ligand_resseq = int(request.data['ligand_resseq']) chain_id = request.data['chain_id'] email_addr = request.data['email_addr'] # main(job_name, mutation_radius, pov_radius, pH, mutation_info_list, protein, ligand_name, ligand_resseq, chain_id, email_addr) t = threading.Thread(target=main, args=(job_name, mutation_radius, pov_radius, pH, mutation_info_list, protein, ligand_name, ligand_resseq, chain_id, email_addr)) t.setDaemon(False) t.start() return Response('Conguratulations, you have submitted successfully!!!') @api_view(['POST']) @permission_classes([permissions.AllowAny]) def prepare_protein(request): job_name = request.data['job_name'] protein = request.data['protein'] job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design') work_dir = os.path.join(job_dir, job_name) if not os.path.exists(work_dir): os.mkdir(work_dir) protein_name, protein_name_pure = protein.name, protein.name.split('.')[0] local_protein_file = os.path.join(work_dir, protein_name) protein_str = protein.read() prep_dock.save_to_file(local_protein_file, protein_str) os.chdir(work_dir) protein_renumber_name, protein_renumber = protein_name_pure + '_renumber', protein_name_pure + '_renumber.pdb' os.system( 'python ../../rosetta_workflow_all_scripts/PDB_renumber.py -i ' + protein_name + ' -a -r > ' + protein_renumber_name + '.pdb') params, create = SubmitParamter.objects.get_or_create(job_name=job_name) prt = File(open(local_protein_file)) prt_renumber = File(open(protein_renumber)) params.protein_file.save(protein_name, prt) params.protein_renumber_file.save(protein_renumber, prt_renumber) params.save() # return Response(params) serializer = SubmitParamsSerializer(params) return JsonResponse(serializer.data, safe=False) # return Response('Successfully') @api_view(['POST']) @permission_classes([permissions.AllowAny]) def first_step(request): job_name = request.data['job_name'] # protein = request.data['protein'] ligand = request.data['ligand'] other_ligands = request.data['other_ligands'] ### ['A','215','MG','A','218','HOH','A','217','ATP'] other_ligands = other_ligands.split('[')[1].split(']')[0].split(',') other_ligands = [str(i) for i in other_ligands] res_chain = request.data['res_chain'] # 'A' res_ligand_chain = request.data['res_ligand_chain'] ## 'A' res_ligand_ID = request.data['res_ligand_ID'] ### '216' res_ligand_name = request.data['res_ligand_name'] ### 'PRP' # design_ligand_name = request.data['design_ligand_name'] ### 'ABC' ### third step ### # CST_A_chain_name = request.data['CST_A_chain_name'] current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) print current_time job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design') work_dir = os.path.join(job_dir, job_name) if not os.path.exists(work_dir): os.mkdir(work_dir) # protein_name, protein_name_pure = protein.name, protein.name.split('.')[0] # local_protein_file = os.path.join(work_dir, protein_name) # protein_str = protein.read() # prep_dock.save_to_file(local_protein_file, protein_str) ligand_name, ligand_name_pure = ligand.name, ligand.name.split('.')[0] local_ligand_file = os.path.join(work_dir, ligand_name) ligand_str = ligand.read() prep_dock.save_to_file(local_ligand_file, ligand_str) os.chdir(work_dir) # protein_renumber_name, protein_renumber = protein_name_pure + '_renumber', protein_name_pure + '_renumber.pdb' # os.system('python ../../rosetta_workflow_all_scripts/PDB_renumber.py -i ' + protein_name + ' -a -r > ' + protein_renumber_name + '.pdb') os.system('python ../../rosetta_workflow_all_scripts/design_ligand_prep.py ' + ligand_name) while True: if os.path.exists(ligand_name_pure+'.params'): break os.system('cp ../../rosetta_workflow_all_scripts/match.flags ./') os.system('cp ../../rosetta_workflow_all_scripts/match_grid.flags ./') for filename in os.listdir(work_dir): if filename.endswith('renumber.pdb'): protein_renumber_name = filename.split('.pdb')[0] protein_renumber = filename break prep_pdb, prep_pdb_pure = protein_renumber_name + '_prep.pdb', protein_renumber_name + '_prep' rosetta_protein_prep.prep_protein(protein_renumber, prep_pdb, res_chain, './') rosetta_protein_prep.get_ligand(prep_pdb, res_ligand_chain, res_ligand_ID, res_ligand_name) ### my code ### step = 3 other_ligands_class_list = [other_ligands[i: i+step] for i in range(0, len(other_ligands), step)] os.system('cp ' + protein_renumber_name + '_chain' + res_chain + '.pdb combi_ligands.pdb') if len(other_ligands) < 3: print 'There are no ligands that need to be retained' # os.system('cp ' + protein_renumber_name + '_chain' + res_chain + '.pdb combi_ligands.pdb') else: i = 0 for cls in other_ligands_class_list: combi_name = '_'.join(cls) print combi_name rosetta_protein_prep.get_ligand(protein_renumber, cls[0], cls[1], cls[2]) last_out_name = protein_renumber_name + '_chain' + combi_name + '.pdb' last_out_name_mol2 = protein_renumber_name + '_chain' + combi_name + '.mol2' rosetta_protein_prep.combi_pdb('combi_ligands.pdb', last_out_name) if cls[2] != 'HOH' and len(cls[2]) == 3: i += 1 os.system('obabel -ipdb ' + last_out_name + ' -omol2 -O ' + last_out_name_mol2) os.system('python /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/scripts/python/public/molfile_to_params.py ' + last_out_name_mol2 + '-n LG' + str(i)) os.system("sed -i '/^TER/c'TER'' combi_ligands.pdb") rosetta_protein_prep.get_grid('../../rosetta_workflow_all_scripts/match_grid.flags', prep_pdb_pure, res_chain, res_ligand_chain, res_ligand_ID, res_ligand_name) rosetta_protein_prep.get_match_flags('../../rosetta_workflow_all_scripts/match.flags', res_chain, 'ABC', prep_pdb_pure, ligand_name_pure) os.system('/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/gen_lig_grids.linuxgccrelease -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database @match_grid_out.flags') os.system('cp ' + protein_renumber + ' ./renumber.pdb') ### update database ### # params, created = SubmitParamter.objects.get_or_create( # job_name=job_name, # other_ligands=other_ligands, # res_chain=res_chain, # res_ligand_chain=res_ligand_chain, # res_ligand_name=res_ligand_name # ) params = SubmitParamter.objects.get(job_name=job_name) params.other_ligands = other_ligands params.res_chain = res_chain params.res_ligand_chain = res_ligand_chain params.res_ligand_name = res_ligand_name # prt = File(open(local_protein_file)) lgd = File(open(local_ligand_file)) # prt_renumber = File(open(protein_renumber)) ligand_params_file = File(open(ligand_name_pure+'.params')) pos_file = os.path.join('./inputs', prep_pdb_pure+'_chain'+res_chain+'.pdb_0.pos') pos_file_name = prep_pdb_pure+'_chain'+res_chain+'.pdb_0.pos' inputs_pos_file = File(open(pos_file)) # params.protein_file.save(protein_name, prt) params.ligand_file.save(ligand_name, lgd) # params.protein_renumber_file.save(protein_renumber, prt_renumber) params.ligand_params_file.save(ligand_name_pure+'.params', ligand_params_file) params.inputs_pos_file.save(pos_file_name, inputs_pos_file) params.save() serializer = SubmitParamsSerializer(params) return JsonResponse(serializer.data, safe=False) # return Response('Successful') @api_view(['POST']) @permission_classes([permissions.AllowAny]) def second_step(request): job_name = request.data['job_name'] constrain_info = request.data['constrain_info'] ### A:216:PRP:O2B:PB:O3A:A:131:ASP:OD2:CG:CB-O2B:PB:O3A-0.20:10.0:10.0:10.0:10.0:10.0-100.0:60.0:60.0:60.0:60.0:60.0-0:360.0:360.0:360.0:360.0:360.0-1:1:1:1:1:1, or A:216:PRP:O2B:PB:O3A:A:131:ASP:OD2:CG:CB-type:OH cat_ID = request.data['cat_ID'] # cst1 = request.data['cst1'] # cst2 = request.data['cst2'] # cst3 = request.data['cst3'] # three_atoms = request.data['three_atoms'] ### O2B:PB:O3A, type:OH # CST_A_chain_name = request.data['CST_A_chain_name'] ### 'A' # CST_A_residue_ID = int(request.data['CST_A_residue_ID']) ### '216' # CST_A_residue_name = request.data['CST_A_residue_name'] ### 'PRP' # Atom_A1 = request.data['Atom_A1'] ### 'O2B' # Atom_A2 = request.data['Atom_A2'] ### 'PB' # Atom_A3 = request.data['Atom_A3'] ### 'O3A' # CST_B_chain_name = request.data['CST_B_chain_name'] ### 'A' # CST_B_residue_ID = int(request.data['CST_B_residue_ID']) ### '131' # CST_B_residue_name = request.data['CST_B_residue_name'] ### 'ASP' # Atom_B1 = request.data['Atom_B1'] ### 'OD2' # Atom_B2 = request.data['Atom_B2'] ### 'CG' # Atom_B3 = request.data['Atom_B3'] ### 'CB' renumber_pdb = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design', job_name, 'renumber.pdb') work_dir = os.path.dirname(renumber_pdb) os.chdir(work_dir) ### my code ### #_______________________________________________________________ constrain_info_list = [cst.split('-') for cst in constrain_info.split(',') if cst is not ''] # for constrain_info in constrain_info_list: # if len(constrain_info) == 2: parse = PDBParser(PERMISSIVE=1) structure = parse.get_structure('renumber.pdb', renumber_pdb) w = open('match.cst', 'w') w.write('# cst constraint descriptior for renumber.pdb' + '\n\n\n') w.write('# NOTE\n\n\n') for idx, cst_info in enumerate(constrain_info_list): cst_result = get_cst_file.measure_dist_angle_dihe_new(structure, idx, cst_info) w.writelines(cst_result) w.close() # get_cst_file.measure_dist_angle_dihe(renumber_pdb, 'renumber.pdb', constrain_info_list, 'match.cst') # ____________________________________________________________ # get_cst_file.measure_dist_angle_dihe(renumber_pdb, 'renumber.pdb', [(CST_A_chain_name, CST_A_residue_ID, CST_A_residue_name, # Atom_A1, Atom_A2, Atom_A3, CST_B_chain_name, # CST_B_residue_ID, CST_B_residue_name, Atom_B1, # Atom_B2, Atom_B3), ], 'match.cst') os.system('cp match.cst ./inputs') inputs_dir = os.path.join(work_dir, 'inputs') os.chdir(inputs_dir) for filename in os.listdir(inputs_dir): if filename.endswith('_0.pos'): pos = os.path.join(inputs_dir, filename) os.system('cp ' + pos + ' ./pos.bk') change_pos.change_pos(filename, cat_ID) params = SubmitParamter.objects.get(job_name=job_name) params.constrain_info = constrain_info params.cat_ID = cat_ID match_cst_file = File(open('match.cst')) params.match_cst_file.save('match.cst', match_cst_file) params.save() # for filename in os.listdir(inputs_dir): # if filename.endswith('.params'): # os.system('/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/CstfileToTheozymePDB.linuxgccrelease -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database -extra_res_fa ' + filename + ' -match:geometric_constraint_file match.cst') return Response('Successful') @api_view(['POST']) @permission_classes([permissions.AllowAny]) def third_step(request): job_name = request.data['job_name'] # user_specialized_cst_file = request.data['user_specialized_cst_file'] job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design') work_dir = os.path.join(job_dir, job_name) os.chdir(work_dir) # if user_specialized_cst_file: # cst_str = user_specialized_cst_file.read() # user_defined_cst_file = os.path.join(work_dir, 'inputs', 'match.cst') # prep_dock.save_to_file(user_defined_cst_file, cst_str) try: cst_file = request.data['cst_file'] cst_str = cst_file.read() user_defined_cst_file = os.path.join(work_dir, 'inputs', 'match.cst') prep_dock.save_to_file(user_defined_cst_file, cst_str) params = SubmitParamter.objects.get(job_name=job_name) new_cst_file = File(open(user_defined_cst_file)) params.user_defined_cst_file.save('match.cst', new_cst_file) params.save() except MultiValueDictKeyError: pass try: os.system('/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/match.linuxgccrelease @match_out.flags -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database') # params = SubmitParamter.objects.get(job_name=job_name) # UM_pdb_list = [] # for filename in os.listdir(os.path.join(work_dir, 'inputs')): # if filename.startswith('UM'): # file = os.path.join(work_dir, 'inputs', filename) # UM_pdb = File(open(file)) UM_pdb_list = [filename for filename in os.listdir(os.path.join(work_dir, 'inputs')) if filename.startswith('UM')] params.UM_pdb_count = len(UM_pdb_list) params.save() # return Response('Successful, there are {} UM***.pdb'.format(len(UM_pdb_list))) serializer = SubmitParamsSerializer(params) return JsonResponse(serializer.data, safe=False) except: return Response('Failed, please check the constraint file and submit again !!!') from functools import wraps def timethis(func): @wraps(func) def wrapper(*args, **kwargs): start = time.time() result = func(*args, **kwargs) end = time.time() print(func.__name__, end-start) return result return wrapper @timethis def design_comand(match_file): command = "/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/enzyme_design.linuxgccrelease @design_out.flags -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database -s "+ match_file + " -out:file:o " + match_file + "_DE.out > " + match_file + "_design.log" os.system(command) # def get_design_params(ligand_name, params=('6.0', '8.0', '10.0', '12.0', '5')): # # """ # # :param ligand_name: # # :param params: (6.0, 8.0, 10, 12.0, 5) # # :return: # # """ # # command = '' # # os.system(command) from functools import partial def get_design_params(ligand_name, params=None): ### ligand_name not startswith('LG') endswith('params') if params is None: params = ('6.0', '8.0', '10.0', '12.0', '5') return partial(get_design_params, ligand_name)(params) # command = '' command = "sed -e 's/res_ligand_params_file/design_" + ligand_name + ".params/g' -e 's/enz_score.out/enz_score_" + ligand_name + ".out/g' -e 's/-cut1 6.0/-cut1 " + params[0] + "/g' -e 's/-cut2 10.0/-cut2 " + params[1] + "/g' -e 's/-cut3 15.0/-cut3 " + params[2] + "/g' -e 's/-cut4 20.0/-cut4 " + params[3] + "/g' -e 's/-nstruct 5/-nstruct " + params[4] + "/g' design.flags > design_out.flags" os.system(command) def send_email(email_addr, email_content, result_file): host_server = 'smtp.qq.com' sender_mail_addr = '[email protected]' pwd = 'utxfxpzcpsnzbbcc' receiver_mail_addr = email_addr mail_content = email_content mail_title = "JianpingLin's email" msg = MIMEMultipart() msg['Subject'] = Header(mail_title, 'utf-8') msg['From'] = sender_mail_addr msg['To'] = Header('Receiver', 'utf-8') msg.attach(MIMEText(mail_content, 'html', 'utf-8')) # att1 = MIMEText(open(result_file).read(), 'base64', 'utf-8') att1 = MIMEText(open(result_file).read(), 'base64') # import zipfile # att1 = MIMEText(zipfile.ZipFile(result_file), 'base64', 'utf-8') att1['Content-Type'] = 'application/octet-stream' att1['Content-Disposition'] = 'attachment; filename="match_design.tar.gz"' msg.attach(att1) smtp = SMTP_SSL(host_server) smtp.set_debuglevel(1) smtp.ehlo(host_server) smtp.login(sender_mail_addr, pwd) smtp.sendmail(sender_mail_addr, receiver_mail_addr, msg.as_string()) smtp.quit() @api_view(['POST']) @permission_classes([permissions.AllowAny]) def fourth_step(request): job_name = request.data['job_name'] design_mini_range = request.data['design_mini_range']### user_email = request.data['user_email'] # design_cst = request.data['design_cst'] job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design') work_dir = os.path.join(job_dir, job_name) os.chdir(work_dir) for filename in os.listdir(work_dir): ### ligand_name 必须是提交的mol2,不应该是LG.mol2 if filename.endswith('params') and not filename.startswith('LG'): ligand_name = filename.split('.params')[0] break match_design_dir = os.path.join(work_dir, 'match_design') if not os.path.exists(match_design_dir): os.mkdir(match_design_dir) # if design_cst != '': # cst_str = design_cst.read() # user_design_cst_file = os.path.join(work_dir, 'match_design', 'design.cst') # prep_dock.save_to_file(user_design_cst_file, cst_str) # else: # try: design_cst = request.data['design_cst'] cst_str = design_cst.read() user_design_cst_file = os.path.join(work_dir, 'match_design', 'design.cst') prep_dock.save_to_file(user_design_cst_file, cst_str) except MultiValueDictKeyError: os.system('cp ./inputs/match.cst ./match_design/design.cst') finally: os.system('mv UM*match*.pdb ./match_design') os.system('cp ../../rosetta_workflow_all_scripts/design.flags ./') ###To DO### # command = "sed -e 's/res_ligand_params_file/design_" + ligand_name + ".params/g' -e 's/enz_score.out/enz_score_" + ligand_name + ".out/g' design.flags > design_out.flags" # get_design_params(ligand_name, tuple(design_mini_range.split(';'))) ####TO DO### # os.system(command) if design_mini_range != '': #design_mini_range = req0uest.data['design_mini_range'] tpl_mini_range = tuple(design_mini_range.split(';')) if len(tpl_mini_range) != 5: return Response('Please check that the "Designable Range, Repackable Range and Number of Outputs" exists.') else: get_design_params(ligand_name, tpl_mini_range) else: get_design_params(ligand_name) os.system("sed -r '/^PDB_ROTAMERS/d' " + ligand_name + ".params > match_design/design_" + ligand_name + ".params") os.system('cp design_out.flags ./match_design') match_dir = os.path.join(work_dir, 'match_design') os.chdir(match_dir) match_file_list = [filename for filename in os.listdir(match_dir) if filename.startswith('UM')] # design_comand(match_file_list[0]) ###Post user### # pool = mul.Pool(5) # pool.map(design_comand, match_file_list) # pool.close() # pool.join() design_analysis.design_score(ligand_name, './') params = SubmitParamter.objects.get(job_name=job_name) params.user_email = user_email design_ligandname_out = 'design_' + ligand_name.split('.')[0] + '.out' file = File(open(design_ligandname_out)) params.design_ligand_name_out.save(design_ligandname_out, file) params.save() # os.chdir(work_dir) # os.system('zip -r match_design.zip match_design') # os.system('tar czvf match_design.tar.gz UM*DE*.pdb') os.system('zip match_design UM*DE*.pdb ' + design_ligandname_out) email_content = "Welcome to Jianping Lin's group" match_design_file = os.path.join('./', 'match_design.zip') # send_email(email_addr=user_email, email_content=email_content, result_file=design_ligandname_out) # send_email(email_addr=user_email, email_content=email_content, result_file=match_design_file) # send_file_zipped(design_ligandname_out, ['[email protected]']) send_file_zipped(match_design_file, user_email, email_content=email_content) serializer = SubmitParamsSerializer(params) return JsonResponse(serializer.data, safe=False) # return Response('Successfully, this process needs ') def get_analysis_params_dic(params): dic = {} temp_list = params.split(',') for param in temp_list: name, value = param.split(':') dic[name] = value return dic @api_view(['POST']) @permission_classes([permissions.AllowAny]) def fifth_step(request): job_name = request.data['job_name'] analysis_params = request.data['analysis_params'] ### all_cst value < 0.9\nSR_2_interf_E_1_5:-9, # analysis_dict = get_analysis_params_dic(analysis_params) ### {all_cst:0.9, SR_2_interf_E_1_5:-9} job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design') work_dir = os.path.join(job_dir, job_name) os.chdir(work_dir) for filename in os.listdir(work_dir): if filename.endswith('params') and not filename.startswith('LG'): ligand_name = filename.split('.params')[0] break match_dir = os.path.join(work_dir, 'match_design') os.chdir(match_dir) design_analysis.design_filter(ligand_name, analysis_params.strip()) # design_analysis.design_score(ligand_name, './') analysis_command = 'perl /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/src/apps/public/enzdes/DesignSelect.pl -d ' + 'design_'+ligand_name+'.out' + ' -c ' + 'design_'+ligand_name+'.filter' + ' -tag_column last > filtered_designs_' + ligand_name +'.out' print analysis_command os.system(analysis_command) # serializer = SubmitParamsSerializer(params) # return JsonResponse(serializer.data, safe=False) return Response('Successfully') class SubmitParamsViewSet(viewsets.DynamicModelViewSet): queryset = SubmitParamter.objects.all() serializer_class = serializers.SubmitParamsSerializer class OnlinedockViewSet(viewsets.DynamicModelViewSet): queryset = Onlinedock.objects.all() serializer_class = serializers.OnlinedockSerializer
normal
{ "blob_id": "8c6b7f29b8dca61a5218b51c85149c9642af5649", "index": 6665, "step-1": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom dynamic_rest import viewsets\nfrom django.shortcuts import render\nimport os\nfrom rest_framework import permissions\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.decorators import api_view, permission_classes, detail_route, list_route\nfrom rest_framework import mixins\nfrom rdkit import Chem\nimport random\nfrom Bio.PDB.PDBParser import PDBParser\nimport time\nfrom Bio.PDB.PDBIO import PDBIO\nimport residues_scanning_command\nimport prep_dock\nimport threading\nfrom rosetta_workflow_all_scripts import rosetta_protein_prep, get_cst_file, change_pos, design_analysis\nimport rosetta_workflow_all_scripts\nfrom django.utils.datastructures import MultiValueDictKeyError\nimport multiprocessing as mul\nimport time\nfrom models import SubmitParamter, Onlinedock\nfrom django.core.files import File\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom smtplib import SMTP_SSL\nfrom email.mime.multipart import MIMEMultipart\nfrom email import encoders\nfrom email.message import Message\nfrom email.mime.base import MIMEBase\nfrom dynamic_rest import viewsets\nimport serializers\nfrom rest_framework.parsers import JSONParser\nfrom polls.serializers import SubmitParamsSerializer, OnlinedockSerializer\nfrom django.http import JsonResponse\n\nimport zipfile\nimport tempfile\ndef send_file_zipped(the_file, recipients, email_content, sender='[email protected]'):\n zf = tempfile.TemporaryFile(prefix='mail', suffix='zip')\n zip = zipfile.ZipFile(zf, 'w')\n zip.write(the_file)\n zip.close()\n zf.seek(0)\n\n ### Create the message\n themsg = MIMEMultipart()\n themsg['Subject'] = 'File %s' % the_file\n themsg['To'] = ', '.join(recipients)\n themsg['From'] = sender\n themsg.attach(MIMEText(email_content, 'html', 'utf-8'))\n themsg.preamble = 'I am not using a MIME-aware mail reader.\\n'\n msg = MIMEBase('application', 'zip')\n msg.set_payload(zf.read())\n encoders.encode_base64(msg)\n msg.add_header('Content-Disposition', 'attachment', filename=the_file)\n themsg.attach(msg)\n themsg = themsg.as_string()\n\n ### Send the message\n import smtplib\n host_server = 'smtp.qq.com'\n sender_mail_addr = '[email protected]'\n pwd = 'utxfxpzcpsnzbbcc'\n\n smtp = SMTP_SSL(host_server)\n smtp.set_debuglevel(1)\n smtp.ehlo(host_server)\n smtp.login(sender_mail_addr, pwd)\n smtp.sendmail(sender, recipients, themsg)\n smtp.quit()\n\n # smtp = smtplib.SMTP()\n # smtp.connect()\n # smtp.sendmail(sender, recipients, themsg)\n # smtp.close()\ndef get_pov_value(file):\n f = open(file)\n lines = f.readlines()\n f.close()\n for line in lines:\n if line.startswith('1'):\n value = float(line.split('|')[1].strip())\n return value\n\ndef main(job_name, mutation_radius, pov_radius, pH, mutation_info_list, protein, ligand_name, ligand_resseq, chain_id, email_addr):\n \"\"\"\n :param job_name:\n :param mutation_radius:\n :param pov_radius:\n :param pH:\n :param mutation_info_list:\n :param protein:\n :param ligand_name:\n :param ligand_resseq:\n :param chain_id:\n :return:\n \"\"\"\n current_time = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\n print current_time\n log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log')\n wild_protein_name = protein.name\n # job_dir = os.path.join(log_dir, job_name + '_' + current_time)\n job_dir = os.path.join(log_dir, job_name)\n if not os.path.exists(job_dir):\n os.mkdir(job_dir)\n wild_protein_file = os.path.join(job_dir, wild_protein_name)\n protein_str = protein.read()\n prep_dock.save_to_file(wild_protein_file, protein_str)\n\n prepare_protein_name = wild_protein_name.split('.')[0] + '_prep.pdb'\n\n ### Prepare protein\n prep_dock.prep_protein(wild_protein_name, prepare_protein_name, job_dir, pH)\n\n ### make mutation\n prep_dock.get_mut_protein(job_dir, job_name, mut_info_list=mutation_info_list, mutation_radius=mutation_radius,\n prepare_protein_name=prepare_protein_name)\n\n prepare_protein = os.path.join(job_dir, prepare_protein_name)\n mutation_protein_name = job_name + '_mut-2.pdb'\n mutation_protein = os.path.join(job_dir, mutation_protein_name)\n ### prep_pov\n prep_dock.get_pro_lig_povin((prepare_protein_name, prepare_protein, chain_id, ligand_resseq, ligand_name), pov_radius, protein_type='prep')\n prep_pov = os.path.join(job_dir, 'pov', 'prep', 'prep.log')\n ### mut_pov\n prep_dock.get_pro_lig_povin((mutation_protein_name, mutation_protein, chain_id, ligand_resseq, ligand_name), pov_radius, protein_type='mut')\n mut_pov = os.path.join(job_dir, 'pov', 'mut', 'mut.log')\n ### plip\n # prep_dock.get_plip_file(prepare_protein, mutation_protein)\n ### TMalign\n # prep_dock.TMalign(prepare_protein, mutation_protein)\n\n onlinedock, create = Onlinedock.objects.get_or_create(job_name=job_name)\n prep_protein_file = File(open(prepare_protein))\n mut_protein_file = File(open(mutation_protein))\n prep_pov_file = File(open(prep_pov))\n mut_pov_file = File(open(mut_pov))\n\n prep_pov_value = get_pov_value(prep_pov)\n mut_pov_value = get_pov_value(mut_pov)\n\n\n\n\n onlinedock.prep_protein.save(prepare_protein_name, prep_protein_file)\n onlinedock.mut_protein.save(mutation_protein_name, mut_protein_file)\n onlinedock.prep_pov.save('prep.log', prep_pov_file)\n onlinedock.mut_pov.save('mut.log', mut_pov_file)\n onlinedock.prep_pov_value = prep_pov_value\n onlinedock.mut_pov_value = mut_pov_value\n onlinedock.save()\n\n os.chdir(job_dir)\n os.system('zip related_info ' + prepare_protein + ' ' + mutation_protein + ' ' + prep_pov + ' ' + mut_pov)\n email_content = \"Wellcome to Jianping Lin Group server~~\"\n print(os.path.curdir)\n related_info = os.path.join(os.path.curdir, 'related_info.zip')\n send_file_zipped(related_info, email_addr, email_content=email_content)\n\ndef test():\n job_dir = '/home/jianping/django_test/longge/polls/log/1111/pov'\n\n\n# job_id = 0\n@api_view(['POST'])\n@permission_classes([permissions.AllowAny])\ndef online_docking(request):\n # current_time = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\n # print current_time\n # log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log')\n\n job_name = request.data['job_name']\n mutation_radius = request.data['mutation_radius'] ### mutation radius\n pov_radius = str(request.data['pov_radius']) ### povelty radius\n pH = request.data['pH']\n mutation_info_list = request.data['mutation_info_list'] ### [chain, position, residue, ]\n protein = request.data['protein_file']\n ligand_name = request.data['ligand_name']\n ligand_resseq = int(request.data['ligand_resseq'])\n chain_id = request.data['chain_id']\n email_addr = request.data['email_addr']\n\n # main(job_name, mutation_radius, pov_radius, pH, mutation_info_list, protein, ligand_name, ligand_resseq, chain_id, email_addr)\n t = threading.Thread(target=main, args=(job_name, mutation_radius, pov_radius, pH, mutation_info_list, protein, ligand_name, ligand_resseq, chain_id, email_addr))\n t.setDaemon(False)\n t.start()\n return Response('Conguratulations, you have submitted successfully!!!')\n\n@api_view(['POST'])\n@permission_classes([permissions.AllowAny])\ndef prepare_protein(request):\n job_name = request.data['job_name']\n protein = request.data['protein']\n job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design')\n work_dir = os.path.join(job_dir, job_name)\n if not os.path.exists(work_dir):\n os.mkdir(work_dir)\n protein_name, protein_name_pure = protein.name, protein.name.split('.')[0]\n local_protein_file = os.path.join(work_dir, protein_name)\n protein_str = protein.read()\n prep_dock.save_to_file(local_protein_file, protein_str)\n\n os.chdir(work_dir)\n protein_renumber_name, protein_renumber = protein_name_pure + '_renumber', protein_name_pure + '_renumber.pdb'\n os.system(\n 'python ../../rosetta_workflow_all_scripts/PDB_renumber.py -i ' + protein_name + ' -a -r > ' + protein_renumber_name + '.pdb')\n params, create = SubmitParamter.objects.get_or_create(job_name=job_name)\n prt = File(open(local_protein_file))\n prt_renumber = File(open(protein_renumber))\n\n params.protein_file.save(protein_name, prt)\n params.protein_renumber_file.save(protein_renumber, prt_renumber)\n params.save()\n # return Response(params)\n serializer = SubmitParamsSerializer(params)\n return JsonResponse(serializer.data, safe=False)\n # return Response('Successfully')\n\n@api_view(['POST'])\n@permission_classes([permissions.AllowAny])\ndef first_step(request):\n job_name = request.data['job_name']\n # protein = request.data['protein']\n ligand = request.data['ligand']\n other_ligands = request.data['other_ligands'] ### ['A','215','MG','A','218','HOH','A','217','ATP']\n other_ligands = other_ligands.split('[')[1].split(']')[0].split(',')\n other_ligands = [str(i) for i in other_ligands]\n res_chain = request.data['res_chain'] # 'A'\n res_ligand_chain = request.data['res_ligand_chain'] ## 'A'\n res_ligand_ID = request.data['res_ligand_ID'] ### '216'\n res_ligand_name = request.data['res_ligand_name'] ### 'PRP'\n # design_ligand_name = request.data['design_ligand_name'] ### 'ABC'\n\n ### third step ###\n # CST_A_chain_name = request.data['CST_A_chain_name']\n\n current_time = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\n print current_time\n job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design')\n\n work_dir = os.path.join(job_dir, job_name)\n if not os.path.exists(work_dir):\n os.mkdir(work_dir)\n\n # protein_name, protein_name_pure = protein.name, protein.name.split('.')[0]\n # local_protein_file = os.path.join(work_dir, protein_name)\n # protein_str = protein.read()\n # prep_dock.save_to_file(local_protein_file, protein_str)\n\n ligand_name, ligand_name_pure = ligand.name, ligand.name.split('.')[0]\n local_ligand_file = os.path.join(work_dir, ligand_name)\n ligand_str = ligand.read()\n prep_dock.save_to_file(local_ligand_file, ligand_str)\n os.chdir(work_dir)\n # protein_renumber_name, protein_renumber = protein_name_pure + '_renumber', protein_name_pure + '_renumber.pdb'\n # os.system('python ../../rosetta_workflow_all_scripts/PDB_renumber.py -i ' + protein_name + ' -a -r > ' + protein_renumber_name + '.pdb')\n os.system('python ../../rosetta_workflow_all_scripts/design_ligand_prep.py ' + ligand_name)\n while True:\n if os.path.exists(ligand_name_pure+'.params'):\n break\n os.system('cp ../../rosetta_workflow_all_scripts/match.flags ./')\n os.system('cp ../../rosetta_workflow_all_scripts/match_grid.flags ./')\n\n for filename in os.listdir(work_dir):\n if filename.endswith('renumber.pdb'):\n protein_renumber_name = filename.split('.pdb')[0]\n protein_renumber = filename\n break\n\n prep_pdb, prep_pdb_pure = protein_renumber_name + '_prep.pdb', protein_renumber_name + '_prep'\n\n rosetta_protein_prep.prep_protein(protein_renumber, prep_pdb, res_chain, './')\n rosetta_protein_prep.get_ligand(prep_pdb, res_ligand_chain, res_ligand_ID, res_ligand_name)\n\n ### my code ###\n step = 3\n other_ligands_class_list = [other_ligands[i: i+step] for i in range(0, len(other_ligands), step)]\n os.system('cp ' + protein_renumber_name + '_chain' + res_chain + '.pdb combi_ligands.pdb')\n\n if len(other_ligands) < 3:\n print 'There are no ligands that need to be retained'\n # os.system('cp ' + protein_renumber_name + '_chain' + res_chain + '.pdb combi_ligands.pdb')\n else:\n i = 0\n for cls in other_ligands_class_list:\n combi_name = '_'.join(cls)\n print combi_name\n rosetta_protein_prep.get_ligand(protein_renumber, cls[0], cls[1], cls[2])\n last_out_name = protein_renumber_name + '_chain' + combi_name + '.pdb'\n last_out_name_mol2 = protein_renumber_name + '_chain' + combi_name + '.mol2'\n rosetta_protein_prep.combi_pdb('combi_ligands.pdb', last_out_name)\n\n if cls[2] != 'HOH' and len(cls[2]) == 3:\n i += 1\n os.system('obabel -ipdb ' + last_out_name + ' -omol2 -O ' + last_out_name_mol2)\n os.system('python /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/scripts/python/public/molfile_to_params.py ' + last_out_name_mol2 + '-n LG' + str(i))\n\n os.system(\"sed -i '/^TER/c'TER'' combi_ligands.pdb\")\n rosetta_protein_prep.get_grid('../../rosetta_workflow_all_scripts/match_grid.flags', prep_pdb_pure, res_chain, res_ligand_chain, res_ligand_ID, res_ligand_name)\n rosetta_protein_prep.get_match_flags('../../rosetta_workflow_all_scripts/match.flags', res_chain, 'ABC', prep_pdb_pure, ligand_name_pure)\n os.system('/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/gen_lig_grids.linuxgccrelease -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database @match_grid_out.flags')\n os.system('cp ' + protein_renumber + ' ./renumber.pdb')\n\n\n ### update database ###\n # params, created = SubmitParamter.objects.get_or_create(\n # job_name=job_name,\n # other_ligands=other_ligands,\n # res_chain=res_chain,\n # res_ligand_chain=res_ligand_chain,\n # res_ligand_name=res_ligand_name\n # )\n params = SubmitParamter.objects.get(job_name=job_name)\n params.other_ligands = other_ligands\n params.res_chain = res_chain\n params.res_ligand_chain = res_ligand_chain\n params.res_ligand_name = res_ligand_name\n\n # prt = File(open(local_protein_file))\n lgd = File(open(local_ligand_file))\n # prt_renumber = File(open(protein_renumber))\n ligand_params_file = File(open(ligand_name_pure+'.params'))\n pos_file = os.path.join('./inputs', prep_pdb_pure+'_chain'+res_chain+'.pdb_0.pos')\n pos_file_name = prep_pdb_pure+'_chain'+res_chain+'.pdb_0.pos'\n inputs_pos_file = File(open(pos_file))\n\n # params.protein_file.save(protein_name, prt)\n params.ligand_file.save(ligand_name, lgd)\n # params.protein_renumber_file.save(protein_renumber, prt_renumber)\n params.ligand_params_file.save(ligand_name_pure+'.params', ligand_params_file)\n params.inputs_pos_file.save(pos_file_name, inputs_pos_file)\n\n params.save()\n serializer = SubmitParamsSerializer(params)\n return JsonResponse(serializer.data, safe=False)\n # return Response('Successful')\n\n@api_view(['POST'])\n@permission_classes([permissions.AllowAny])\ndef second_step(request):\n job_name = request.data['job_name']\n constrain_info = request.data['constrain_info'] ### A:216:PRP:O2B:PB:O3A:A:131:ASP:OD2:CG:CB-O2B:PB:O3A-0.20:10.0:10.0:10.0:10.0:10.0-100.0:60.0:60.0:60.0:60.0:60.0-0:360.0:360.0:360.0:360.0:360.0-1:1:1:1:1:1, or A:216:PRP:O2B:PB:O3A:A:131:ASP:OD2:CG:CB-type:OH\n cat_ID = request.data['cat_ID']\n # cst1 = request.data['cst1']\n # cst2 = request.data['cst2']\n # cst3 = request.data['cst3']\n # three_atoms = request.data['three_atoms'] ### O2B:PB:O3A, type:OH\n # CST_A_chain_name = request.data['CST_A_chain_name'] ### 'A'\n # CST_A_residue_ID = int(request.data['CST_A_residue_ID']) ### '216'\n # CST_A_residue_name = request.data['CST_A_residue_name'] ### 'PRP'\n # Atom_A1 = request.data['Atom_A1'] ### 'O2B'\n # Atom_A2 = request.data['Atom_A2'] ### 'PB'\n # Atom_A3 = request.data['Atom_A3'] ### 'O3A'\n # CST_B_chain_name = request.data['CST_B_chain_name'] ### 'A'\n # CST_B_residue_ID = int(request.data['CST_B_residue_ID']) ### '131'\n # CST_B_residue_name = request.data['CST_B_residue_name'] ### 'ASP'\n # Atom_B1 = request.data['Atom_B1'] ### 'OD2'\n # Atom_B2 = request.data['Atom_B2'] ### 'CG'\n # Atom_B3 = request.data['Atom_B3'] ### 'CB'\n renumber_pdb = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design', job_name, 'renumber.pdb')\n work_dir = os.path.dirname(renumber_pdb)\n os.chdir(work_dir)\n\n ### my code ###\n #_______________________________________________________________\n constrain_info_list = [cst.split('-') for cst in constrain_info.split(',') if cst is not '']\n # for constrain_info in constrain_info_list:\n # if len(constrain_info) == 2:\n\n parse = PDBParser(PERMISSIVE=1)\n structure = parse.get_structure('renumber.pdb', renumber_pdb)\n w = open('match.cst', 'w')\n w.write('# cst constraint descriptior for renumber.pdb' + '\\n\\n\\n')\n w.write('# NOTE\\n\\n\\n')\n\n for idx, cst_info in enumerate(constrain_info_list):\n cst_result = get_cst_file.measure_dist_angle_dihe_new(structure, idx, cst_info)\n w.writelines(cst_result)\n w.close()\n\n # get_cst_file.measure_dist_angle_dihe(renumber_pdb, 'renumber.pdb', constrain_info_list, 'match.cst')\n # ____________________________________________________________\n\n # get_cst_file.measure_dist_angle_dihe(renumber_pdb, 'renumber.pdb', [(CST_A_chain_name, CST_A_residue_ID, CST_A_residue_name,\n # Atom_A1, Atom_A2, Atom_A3, CST_B_chain_name,\n # CST_B_residue_ID, CST_B_residue_name, Atom_B1,\n # Atom_B2, Atom_B3), ], 'match.cst')\n os.system('cp match.cst ./inputs')\n\n inputs_dir = os.path.join(work_dir, 'inputs')\n os.chdir(inputs_dir)\n for filename in os.listdir(inputs_dir):\n if filename.endswith('_0.pos'):\n pos = os.path.join(inputs_dir, filename)\n os.system('cp ' + pos + ' ./pos.bk')\n change_pos.change_pos(filename, cat_ID)\n params = SubmitParamter.objects.get(job_name=job_name)\n params.constrain_info = constrain_info\n params.cat_ID = cat_ID\n match_cst_file = File(open('match.cst'))\n params.match_cst_file.save('match.cst', match_cst_file)\n params.save()\n # for filename in os.listdir(inputs_dir):\n # if filename.endswith('.params'):\n # os.system('/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/CstfileToTheozymePDB.linuxgccrelease -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database -extra_res_fa ' + filename + ' -match:geometric_constraint_file match.cst')\n return Response('Successful')\n\n@api_view(['POST'])\n@permission_classes([permissions.AllowAny])\ndef third_step(request):\n job_name = request.data['job_name']\n # user_specialized_cst_file = request.data['user_specialized_cst_file']\n job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design')\n work_dir = os.path.join(job_dir, job_name)\n os.chdir(work_dir)\n # if user_specialized_cst_file:\n # cst_str = user_specialized_cst_file.read()\n # user_defined_cst_file = os.path.join(work_dir, 'inputs', 'match.cst')\n # prep_dock.save_to_file(user_defined_cst_file, cst_str)\n try:\n cst_file = request.data['cst_file']\n cst_str = cst_file.read()\n user_defined_cst_file = os.path.join(work_dir, 'inputs', 'match.cst')\n prep_dock.save_to_file(user_defined_cst_file, cst_str)\n params = SubmitParamter.objects.get(job_name=job_name)\n new_cst_file = File(open(user_defined_cst_file))\n params.user_defined_cst_file.save('match.cst', new_cst_file)\n params.save()\n except MultiValueDictKeyError:\n pass\n try:\n os.system('/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/match.linuxgccrelease @match_out.flags -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database')\n # params = SubmitParamter.objects.get(job_name=job_name)\n # UM_pdb_list = []\n # for filename in os.listdir(os.path.join(work_dir, 'inputs')):\n # if filename.startswith('UM'):\n # file = os.path.join(work_dir, 'inputs', filename)\n # UM_pdb = File(open(file))\n UM_pdb_list = [filename for filename in os.listdir(os.path.join(work_dir, 'inputs')) if filename.startswith('UM')]\n params.UM_pdb_count = len(UM_pdb_list)\n params.save()\n # return Response('Successful, there are {} UM***.pdb'.format(len(UM_pdb_list)))\n serializer = SubmitParamsSerializer(params)\n return JsonResponse(serializer.data, safe=False)\n except:\n return Response('Failed, please check the constraint file and submit again !!!')\n\nfrom functools import wraps\ndef timethis(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n print(func.__name__, end-start)\n return result\n return wrapper\n\n@timethis\ndef design_comand(match_file):\n command = \"/home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/bin/enzyme_design.linuxgccrelease @design_out.flags -database /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/database -s \"+ match_file + \" -out:file:o \" + match_file + \"_DE.out > \" + match_file + \"_design.log\"\n os.system(command)\n\n# def get_design_params(ligand_name, params=('6.0', '8.0', '10.0', '12.0', '5')):\n# # \"\"\"\n# # :param ligand_name:\n# # :param params: (6.0, 8.0, 10, 12.0, 5)\n# # :return:\n# # \"\"\"\n# # command = ''\n# # os.system(command)\n\nfrom functools import partial\ndef get_design_params(ligand_name, params=None): ### ligand_name not startswith('LG') endswith('params')\n if params is None:\n params = ('6.0', '8.0', '10.0', '12.0', '5')\n return partial(get_design_params, ligand_name)(params)\n # command = ''\n command = \"sed -e 's/res_ligand_params_file/design_\" + ligand_name + \".params/g' -e 's/enz_score.out/enz_score_\" + ligand_name + \".out/g' -e 's/-cut1 6.0/-cut1 \" + params[0] + \"/g' -e 's/-cut2 10.0/-cut2 \" + params[1] + \"/g' -e 's/-cut3 15.0/-cut3 \" + params[2] + \"/g' -e 's/-cut4 20.0/-cut4 \" + params[3] + \"/g' -e 's/-nstruct 5/-nstruct \" + params[4] + \"/g' design.flags > design_out.flags\"\n os.system(command)\n\ndef send_email(email_addr, email_content, result_file):\n host_server = 'smtp.qq.com'\n sender_mail_addr = '[email protected]'\n pwd = 'utxfxpzcpsnzbbcc'\n receiver_mail_addr = email_addr\n mail_content = email_content\n mail_title = \"JianpingLin's email\"\n msg = MIMEMultipart()\n msg['Subject'] = Header(mail_title, 'utf-8')\n msg['From'] = sender_mail_addr\n msg['To'] = Header('Receiver', 'utf-8')\n\n msg.attach(MIMEText(mail_content, 'html', 'utf-8'))\n # att1 = MIMEText(open(result_file).read(), 'base64', 'utf-8')\n att1 = MIMEText(open(result_file).read(), 'base64')\n # import zipfile\n # att1 = MIMEText(zipfile.ZipFile(result_file), 'base64', 'utf-8')\n att1['Content-Type'] = 'application/octet-stream'\n att1['Content-Disposition'] = 'attachment; filename=\"match_design.tar.gz\"'\n msg.attach(att1)\n\n smtp = SMTP_SSL(host_server)\n smtp.set_debuglevel(1)\n smtp.ehlo(host_server)\n smtp.login(sender_mail_addr, pwd)\n smtp.sendmail(sender_mail_addr, receiver_mail_addr, msg.as_string())\n smtp.quit()\n\n\n@api_view(['POST'])\n@permission_classes([permissions.AllowAny])\ndef fourth_step(request):\n job_name = request.data['job_name']\n design_mini_range = request.data['design_mini_range']###\n user_email = request.data['user_email']\n\n # design_cst = request.data['design_cst']\n job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design')\n work_dir = os.path.join(job_dir, job_name)\n os.chdir(work_dir)\n for filename in os.listdir(work_dir): ### ligand_name 必须是提交的mol2,不应该是LG.mol2\n if filename.endswith('params') and not filename.startswith('LG'):\n ligand_name = filename.split('.params')[0]\n break\n match_design_dir = os.path.join(work_dir, 'match_design')\n if not os.path.exists(match_design_dir):\n os.mkdir(match_design_dir)\n\n # if design_cst != '':\n # cst_str = design_cst.read()\n # user_design_cst_file = os.path.join(work_dir, 'match_design', 'design.cst')\n # prep_dock.save_to_file(user_design_cst_file, cst_str)\n # else:\n #\n try:\n design_cst = request.data['design_cst']\n cst_str = design_cst.read()\n user_design_cst_file = os.path.join(work_dir, 'match_design', 'design.cst')\n prep_dock.save_to_file(user_design_cst_file, cst_str)\n except MultiValueDictKeyError:\n os.system('cp ./inputs/match.cst ./match_design/design.cst')\n finally:\n os.system('mv UM*match*.pdb ./match_design')\n os.system('cp ../../rosetta_workflow_all_scripts/design.flags ./')\n ###To DO###\n # command = \"sed -e 's/res_ligand_params_file/design_\" + ligand_name + \".params/g' -e 's/enz_score.out/enz_score_\" + ligand_name + \".out/g' design.flags > design_out.flags\"\n # get_design_params(ligand_name, tuple(design_mini_range.split(';')))\n ####TO DO###\n # os.system(command)\n\n if design_mini_range != '':\n #design_mini_range = req0uest.data['design_mini_range']\n tpl_mini_range = tuple(design_mini_range.split(';'))\n if len(tpl_mini_range) != 5:\n return Response('Please check that the \"Designable Range, Repackable Range and Number of Outputs\" exists.')\n else:\n get_design_params(ligand_name, tpl_mini_range)\n else:\n get_design_params(ligand_name)\n\n os.system(\"sed -r '/^PDB_ROTAMERS/d' \" + ligand_name + \".params > match_design/design_\" + ligand_name + \".params\")\n os.system('cp design_out.flags ./match_design')\n match_dir = os.path.join(work_dir, 'match_design')\n os.chdir(match_dir)\n match_file_list = [filename for filename in os.listdir(match_dir) if filename.startswith('UM')]\n\n # design_comand(match_file_list[0])\n ###Post user###\n # pool = mul.Pool(5)\n # pool.map(design_comand, match_file_list)\n # pool.close()\n # pool.join()\n design_analysis.design_score(ligand_name, './')\n params = SubmitParamter.objects.get(job_name=job_name)\n params.user_email = user_email\n design_ligandname_out = 'design_' + ligand_name.split('.')[0] + '.out'\n\n file = File(open(design_ligandname_out))\n params.design_ligand_name_out.save(design_ligandname_out, file)\n params.save()\n\n # os.chdir(work_dir)\n # os.system('zip -r match_design.zip match_design')\n # os.system('tar czvf match_design.tar.gz UM*DE*.pdb')\n os.system('zip match_design UM*DE*.pdb ' + design_ligandname_out)\n\n email_content = \"Welcome to Jianping Lin's group\"\n match_design_file = os.path.join('./', 'match_design.zip')\n # send_email(email_addr=user_email, email_content=email_content, result_file=design_ligandname_out)\n # send_email(email_addr=user_email, email_content=email_content, result_file=match_design_file)\n # send_file_zipped(design_ligandname_out, ['[email protected]'])\n send_file_zipped(match_design_file, user_email, email_content=email_content)\n serializer = SubmitParamsSerializer(params)\n return JsonResponse(serializer.data, safe=False)\n # return Response('Successfully, this process needs ')\n\ndef get_analysis_params_dic(params):\n dic = {}\n temp_list = params.split(',')\n for param in temp_list:\n name, value = param.split(':')\n dic[name] = value\n return dic\n\n@api_view(['POST'])\n@permission_classes([permissions.AllowAny])\ndef fifth_step(request):\n job_name = request.data['job_name']\n analysis_params = request.data['analysis_params'] ### all_cst value < 0.9\\nSR_2_interf_E_1_5:-9,\n # analysis_dict = get_analysis_params_dic(analysis_params) ### {all_cst:0.9, SR_2_interf_E_1_5:-9}\n\n job_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Enzyme_design')\n work_dir = os.path.join(job_dir, job_name)\n os.chdir(work_dir)\n for filename in os.listdir(work_dir):\n if filename.endswith('params') and not filename.startswith('LG'):\n ligand_name = filename.split('.params')[0]\n break\n match_dir = os.path.join(work_dir, 'match_design')\n os.chdir(match_dir)\n design_analysis.design_filter(ligand_name, analysis_params.strip())\n # design_analysis.design_score(ligand_name, './')\n analysis_command = 'perl /home/jianping/Programs/rosetta_bin_linux_2018.09.60072_bundle/main/source/src/apps/public/enzdes/DesignSelect.pl -d ' + 'design_'+ligand_name+'.out' + ' -c ' + 'design_'+ligand_name+'.filter' + ' -tag_column last > filtered_designs_' + ligand_name +'.out'\n print analysis_command\n os.system(analysis_command)\n # serializer = SubmitParamsSerializer(params)\n # return JsonResponse(serializer.data, safe=False)\n return Response('Successfully')\n\nclass SubmitParamsViewSet(viewsets.DynamicModelViewSet):\n queryset = SubmitParamter.objects.all()\n serializer_class = serializers.SubmitParamsSerializer\n\nclass OnlinedockViewSet(viewsets.DynamicModelViewSet):\n queryset = Onlinedock.objects.all()\n serializer_class = serializers.OnlinedockSerializer\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import world import items class Quest: def __init__(self): raise NotImplementedError("Do not create raw quest classes") def __str__(self): return self.quest_name def give_reward(self, player): print("You receive: \n{} gold\n{} exp".format(self.reward_gold, self.reward_exp)) for item in self.reward_item: print(item) player.gold += self.reward_gold player.exp += self.reward_exp for item in self.reward_item: player.item_inventory.append(item) self.complete = True class NoobQuest(Quest): def __init__(self): self.quest_status = 0 self.quest_name = "Kill the Rat!" self.reward_gold = 250 self.reward_exp = 500 self.reward_item = [items.Longsword()] self.quest_log = [] self.complete = False def print_quest_log(self): print("Quest: {}".format(self.quest_name)) for n, q in enumerate(self.quest_log, 1): print("{}: {}".format(n, q)) def update_quest_log(self, quest_log_text): self.quest_log.append(quest_log_text) ''' #### Working on new quest architecture #### class QuestObject: def __init__(self): self.quest_status = 0 self.complete_status = 0 self.quest_name = "Quest Name" self.reward_gold = 0 self.reward_exp = 0 self.reward_item = [] self.quest_logs = [] self.player_log = [] self.complete = False def __str__(self): return self.quest_name def give_reward(self, player): if self.complete: print("You tried to get rewards twice! Something broke!") return print("You completed the quest: {}".format(self.quest_name)) print("Here is your reward:") for item in self.reward_item: print("* {}".format()) player.item_inventory.append(item) print("* {} Gold\n* {} Exp".format(self.reward_gold, self.reward_exp)) player.gold += self.reward_gold player.exp += self.reward_exp self.complete = True def set_quest_status(self, status): self.quest_status = status def update_player_log(self, index): self.player_log.append(self.quest_logs[index]) def can_be_completed(self): return self.quest_status == self.complete_status '''
normal
{ "blob_id": "4d31985cf1266619406d79a7dbae269c10f21bda", "index": 5510, "step-1": "<mask token>\n\n\nclass Quest:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass NoobQuest(Quest):\n\n def __init__(self):\n self.quest_status = 0\n self.quest_name = 'Kill the Rat!'\n self.reward_gold = 250\n self.reward_exp = 500\n self.reward_item = [items.Longsword()]\n self.quest_log = []\n self.complete = False\n\n def print_quest_log(self):\n print('Quest: {}'.format(self.quest_name))\n for n, q in enumerate(self.quest_log, 1):\n print('{}: {}'.format(n, q))\n\n def update_quest_log(self, quest_log_text):\n self.quest_log.append(quest_log_text)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Quest:\n <mask token>\n <mask token>\n\n def give_reward(self, player):\n print('You receive: \\n{} gold\\n{} exp'.format(self.reward_gold,\n self.reward_exp))\n for item in self.reward_item:\n print(item)\n player.gold += self.reward_gold\n player.exp += self.reward_exp\n for item in self.reward_item:\n player.item_inventory.append(item)\n self.complete = True\n\n\nclass NoobQuest(Quest):\n\n def __init__(self):\n self.quest_status = 0\n self.quest_name = 'Kill the Rat!'\n self.reward_gold = 250\n self.reward_exp = 500\n self.reward_item = [items.Longsword()]\n self.quest_log = []\n self.complete = False\n\n def print_quest_log(self):\n print('Quest: {}'.format(self.quest_name))\n for n, q in enumerate(self.quest_log, 1):\n print('{}: {}'.format(n, q))\n\n def update_quest_log(self, quest_log_text):\n self.quest_log.append(quest_log_text)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Quest:\n\n def __init__(self):\n raise NotImplementedError('Do not create raw quest classes')\n\n def __str__(self):\n return self.quest_name\n\n def give_reward(self, player):\n print('You receive: \\n{} gold\\n{} exp'.format(self.reward_gold,\n self.reward_exp))\n for item in self.reward_item:\n print(item)\n player.gold += self.reward_gold\n player.exp += self.reward_exp\n for item in self.reward_item:\n player.item_inventory.append(item)\n self.complete = True\n\n\nclass NoobQuest(Quest):\n\n def __init__(self):\n self.quest_status = 0\n self.quest_name = 'Kill the Rat!'\n self.reward_gold = 250\n self.reward_exp = 500\n self.reward_item = [items.Longsword()]\n self.quest_log = []\n self.complete = False\n\n def print_quest_log(self):\n print('Quest: {}'.format(self.quest_name))\n for n, q in enumerate(self.quest_log, 1):\n print('{}: {}'.format(n, q))\n\n def update_quest_log(self, quest_log_text):\n self.quest_log.append(quest_log_text)\n\n\n<mask token>\n", "step-4": "import world\nimport items\n\n\nclass Quest:\n\n def __init__(self):\n raise NotImplementedError('Do not create raw quest classes')\n\n def __str__(self):\n return self.quest_name\n\n def give_reward(self, player):\n print('You receive: \\n{} gold\\n{} exp'.format(self.reward_gold,\n self.reward_exp))\n for item in self.reward_item:\n print(item)\n player.gold += self.reward_gold\n player.exp += self.reward_exp\n for item in self.reward_item:\n player.item_inventory.append(item)\n self.complete = True\n\n\nclass NoobQuest(Quest):\n\n def __init__(self):\n self.quest_status = 0\n self.quest_name = 'Kill the Rat!'\n self.reward_gold = 250\n self.reward_exp = 500\n self.reward_item = [items.Longsword()]\n self.quest_log = []\n self.complete = False\n\n def print_quest_log(self):\n print('Quest: {}'.format(self.quest_name))\n for n, q in enumerate(self.quest_log, 1):\n print('{}: {}'.format(n, q))\n\n def update_quest_log(self, quest_log_text):\n self.quest_log.append(quest_log_text)\n\n\n<mask token>\n", "step-5": "import world\nimport items\n\n\n\nclass Quest:\n\tdef __init__(self):\n\t\traise NotImplementedError(\"Do not create raw quest classes\")\n\t\t\n\tdef __str__(self):\n\t\treturn self.quest_name\n\t\n\tdef give_reward(self, player):\n\t\tprint(\"You receive: \\n{} gold\\n{} exp\".format(self.reward_gold, self.reward_exp))\n\t\tfor item in self.reward_item:\n\t\t\tprint(item)\n\t\tplayer.gold += self.reward_gold\n\t\tplayer.exp += self.reward_exp\n\t\tfor item in self.reward_item:\n\t\t\tplayer.item_inventory.append(item)\n\t\tself.complete = True\n\n\n\nclass NoobQuest(Quest):\n\tdef __init__(self):\n\t\tself.quest_status = 0\n\t\tself.quest_name = \"Kill the Rat!\"\n\t\tself.reward_gold = 250\n\t\tself.reward_exp = 500\n\t\tself.reward_item = [items.Longsword()]\n\t\tself.quest_log = []\n\t\tself.complete = False\n\t\n\t\n\tdef print_quest_log(self):\n\t\tprint(\"Quest: {}\".format(self.quest_name))\n\t\tfor n, q in enumerate(self.quest_log, 1):\n\t\t\tprint(\"{}: {}\".format(n, q))\n\n\tdef update_quest_log(self, quest_log_text):\n\t\tself.quest_log.append(quest_log_text)\n\n''' #### Working on new quest architecture ####\nclass QuestObject:\n\tdef __init__(self):\n\t\tself.quest_status = 0\n\t\tself.complete_status = 0\n\t\tself.quest_name = \"Quest Name\"\n\t\tself.reward_gold = 0\n\t\tself.reward_exp = 0\n\t\tself.reward_item = []\n\t\tself.quest_logs = []\n\t\tself.player_log = []\n\t\tself.complete = False\n\t\n\tdef __str__(self):\n\t\treturn self.quest_name\n\n\tdef give_reward(self, player):\n\t\tif self.complete:\n\t\t\tprint(\"You tried to get rewards twice! Something broke!\")\n\t\t\treturn\n\t\tprint(\"You completed the quest: {}\".format(self.quest_name))\n\t\tprint(\"Here is your reward:\")\n\t\tfor item in self.reward_item:\n\t\t\tprint(\"* {}\".format())\n\t\t\tplayer.item_inventory.append(item)\n\t\tprint(\"* {} Gold\\n* {} Exp\".format(self.reward_gold, self.reward_exp))\n\t\tplayer.gold += self.reward_gold\n\t\tplayer.exp += self.reward_exp\n\t\tself.complete = True\n\t\n\tdef set_quest_status(self, status):\n\t\tself.quest_status = status\n\t\n\tdef update_player_log(self, index):\n\t\tself.player_log.append(self.quest_logs[index])\n\t\n\tdef can_be_completed(self):\n\t\treturn self.quest_status == self.complete_status\n'''\n", "step-ids": [ 5, 6, 8, 9, 10 ] }
[ 5, 6, 8, 9, 10 ]
# -*- coding: utf-8 -*- """ Created on Thu May 3 09:12:11 2018 @author: shen1994 """ import codecs import numpy as np def create_documents(): """ 按标点符号或是空格存储文件 """ documents_length = 0 chars,labels = [],[] chars_file = codecs.open("data/data.data", 'w', 'utf-8') labels_file = codecs.open("data/label.data", 'w', 'utf-8') with codecs.open("data/train.data", 'r', 'utf-8') as f: for line in f: line=line.strip() if len(line)==0: if len(chars)!=0: for char in chars: chars_file.write(char + "\t") chars_file.write("\n") for label in labels: labels_file.write(label + "\t") labels_file.write("\n") documents_length += 1 chars, labels=[], [] else: pieces=line.strip().split() chars.append(pieces[0]) labels.append(pieces[1]) if pieces[0] in ['。',',',';','!','?']: for char in chars: chars_file.write(char + "\t") chars_file.write("\n") for label in labels: labels_file.write(label + "\t") labels_file.write("\n") documents_length += 1 chars, labels=[], [] if len(chars)!=0: for char in chars: chars_file.write(char + "\t") chars_file.write("\n") for label in labels: labels_file.write(label + "\t") labels_file.write("\n") documents_length += 1 chars, labels=[], [] chars_file.close() labels_file.close() return documents_length def create_useful_words(embedding_model): return list(embedding_model.wv.vocab.keys()) def create_lexicon(word_dict): """ 生成词典 """ chars = {} # 统计词出现的次数 with codecs.open("data/data.data", 'r', 'utf-8') as f: line = f.readline() while(line): book_chars = line.strip().split() for sequence in book_chars: for char in sequence: chars[char] = chars.get(char,0) + 1 line = f.readline() sorted_chars = sorted(chars.items(), key=lambda x:x[1], reverse=True) # 下标从1开始 0用来补长 lexicon = dict([(item[0],index+1) for index, item in enumerate(sorted_chars)]) del sorted_chars # 替换无用词的标记,标记为-1 for v in lexicon: if v not in word_dict: lexicon[v] = -1 lexicon_reverse = dict(zip(lexicon.values(), lexicon.keys())) return lexicon, lexicon_reverse def create_label_index(): return {'P':0, 'B':1, 'M':2, 'E':3, 'S':4, 'U':5} def create_index_label(): return {0:'Pad',1:'B',2:'M',3:'E',4:'S',5:'Unk'} def create_embedding(embedding_model, embedding_size, lexicon_reverse): word_dict = create_useful_words(embedding_model) useful_word = [] useful_word_length = 0 for word in list(lexicon_reverse.values()): if word in word_dict: useful_word_length += 1 useful_word.append(word) del word_dict # 增加 padding 和 unknown embedding_weights = np.zeros((useful_word_length + 2, embedding_size)) for i in range(useful_word_length): embedding_weights[i + 1] = embedding_model.wv[useful_word[i]] # 无效词嵌入向量 embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size) return useful_word_length, embedding_weights def create_matrix(lexicon, label_2_index): data_index = codecs.open("data/data_index.data", 'w', 'utf-8') label_index = codecs.open("data/label_index.data", 'w', 'utf-8') file_chars = codecs.open("data/data.data", 'r', 'utf-8') file_labels = codecs.open("data/label.data", 'r', 'utf-8') chars_line = file_chars.readline() labels_line = file_labels.readline() while (chars_line and labels_line): book_chars = chars_line.strip().split() book_labels = labels_line.strip().split() for char, label in zip(book_chars, book_labels): data_index.write(str(lexicon[char]) + "\t") label_index.write(str(label_2_index[label]) + "\t") data_index.write("\n") label_index.write("\n") chars_line = file_chars.readline() labels_line = file_labels.readline() file_chars.close() file_labels.close() data_index.close() label_index.close() def padding_sentences(max_len): data_index = codecs.open("data/data_index.data", 'r', 'utf-8') label_index = codecs.open("data/label_index.data", 'r', 'utf-8') data_index_padding = codecs.open("data/data_index_padding.data", 'w', 'utf-8') label_index_padding = codecs.open("data/label_index_padding.data", 'w', 'utf-8') data_line = data_index.readline() while data_line: book_data = data_line.strip().split() book_data_len = len(book_data) new_book_data = [] if book_data_len < max_len: new_book_data = ([str(0)] * (max_len - book_data_len) + book_data) else: new_book_data = book_data for data_word in new_book_data: data_index_padding.write(data_word + "\t") data_index_padding.write("\n") data_line = data_index.readline() label_line = label_index.readline() while label_line: book_label = label_line.strip().split() book_label_len = len(book_label) new_book_label = [] if book_label_len < max_len: new_book_label = ([str(0)] * (max_len - book_label_len) + book_label) else: new_book_label = book_label for label_word in new_book_label: label_index_padding.write(label_word + "\t") label_index_padding.write("\n") label_line = label_index.readline() data_index.close() label_index.close() data_index_padding.close() label_index_padding.close() def maxlen_2d_list(): max_len = 0 data_index = codecs.open("data/data_index.data", 'r', 'utf-8') data_line = data_index.readline() while data_line: book_data = data_line.strip().split() book_data_len = len(book_data) if book_data_len > max_len: max_len = book_data_len data_line = data_index.readline() data_index.close() return max_len
normal
{ "blob_id": "f22836fc4fed22d833755db0ff34502170260766", "index": 9260, "step-1": "<mask token>\n\n\ndef create_documents():\n \"\"\" 按标点符号或是空格存储文件 \"\"\"\n documents_length = 0\n chars, labels = [], []\n chars_file = codecs.open('data/data.data', 'w', 'utf-8')\n labels_file = codecs.open('data/label.data', 'w', 'utf-8')\n with codecs.open('data/train.data', 'r', 'utf-8') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n else:\n pieces = line.strip().split()\n chars.append(pieces[0])\n labels.append(pieces[1])\n if pieces[0] in ['。', ',', ';', '!', '?']:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n chars_file.close()\n labels_file.close()\n return documents_length\n\n\ndef create_useful_words(embedding_model):\n return list(embedding_model.wv.vocab.keys())\n\n\n<mask token>\n\n\ndef create_label_index():\n return {'P': 0, 'B': 1, 'M': 2, 'E': 3, 'S': 4, 'U': 5}\n\n\n<mask token>\n\n\ndef create_embedding(embedding_model, embedding_size, lexicon_reverse):\n word_dict = create_useful_words(embedding_model)\n useful_word = []\n useful_word_length = 0\n for word in list(lexicon_reverse.values()):\n if word in word_dict:\n useful_word_length += 1\n useful_word.append(word)\n del word_dict\n embedding_weights = np.zeros((useful_word_length + 2, embedding_size))\n for i in range(useful_word_length):\n embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]\n embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)\n return useful_word_length, embedding_weights\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef create_documents():\n \"\"\" 按标点符号或是空格存储文件 \"\"\"\n documents_length = 0\n chars, labels = [], []\n chars_file = codecs.open('data/data.data', 'w', 'utf-8')\n labels_file = codecs.open('data/label.data', 'w', 'utf-8')\n with codecs.open('data/train.data', 'r', 'utf-8') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n else:\n pieces = line.strip().split()\n chars.append(pieces[0])\n labels.append(pieces[1])\n if pieces[0] in ['。', ',', ';', '!', '?']:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n chars_file.close()\n labels_file.close()\n return documents_length\n\n\ndef create_useful_words(embedding_model):\n return list(embedding_model.wv.vocab.keys())\n\n\n<mask token>\n\n\ndef create_label_index():\n return {'P': 0, 'B': 1, 'M': 2, 'E': 3, 'S': 4, 'U': 5}\n\n\ndef create_index_label():\n return {(0): 'Pad', (1): 'B', (2): 'M', (3): 'E', (4): 'S', (5): 'Unk'}\n\n\ndef create_embedding(embedding_model, embedding_size, lexicon_reverse):\n word_dict = create_useful_words(embedding_model)\n useful_word = []\n useful_word_length = 0\n for word in list(lexicon_reverse.values()):\n if word in word_dict:\n useful_word_length += 1\n useful_word.append(word)\n del word_dict\n embedding_weights = np.zeros((useful_word_length + 2, embedding_size))\n for i in range(useful_word_length):\n embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]\n embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)\n return useful_word_length, embedding_weights\n\n\ndef create_matrix(lexicon, label_2_index):\n data_index = codecs.open('data/data_index.data', 'w', 'utf-8')\n label_index = codecs.open('data/label_index.data', 'w', 'utf-8')\n file_chars = codecs.open('data/data.data', 'r', 'utf-8')\n file_labels = codecs.open('data/label.data', 'r', 'utf-8')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n while chars_line and labels_line:\n book_chars = chars_line.strip().split()\n book_labels = labels_line.strip().split()\n for char, label in zip(book_chars, book_labels):\n data_index.write(str(lexicon[char]) + '\\t')\n label_index.write(str(label_2_index[label]) + '\\t')\n data_index.write('\\n')\n label_index.write('\\n')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n file_chars.close()\n file_labels.close()\n data_index.close()\n label_index.close()\n\n\n<mask token>\n\n\ndef maxlen_2d_list():\n max_len = 0\n data_index = codecs.open('data/data_index.data', 'r', 'utf-8')\n data_line = data_index.readline()\n while data_line:\n book_data = data_line.strip().split()\n book_data_len = len(book_data)\n if book_data_len > max_len:\n max_len = book_data_len\n data_line = data_index.readline()\n data_index.close()\n return max_len\n", "step-3": "<mask token>\n\n\ndef create_documents():\n \"\"\" 按标点符号或是空格存储文件 \"\"\"\n documents_length = 0\n chars, labels = [], []\n chars_file = codecs.open('data/data.data', 'w', 'utf-8')\n labels_file = codecs.open('data/label.data', 'w', 'utf-8')\n with codecs.open('data/train.data', 'r', 'utf-8') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n else:\n pieces = line.strip().split()\n chars.append(pieces[0])\n labels.append(pieces[1])\n if pieces[0] in ['。', ',', ';', '!', '?']:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n chars_file.close()\n labels_file.close()\n return documents_length\n\n\ndef create_useful_words(embedding_model):\n return list(embedding_model.wv.vocab.keys())\n\n\n<mask token>\n\n\ndef create_label_index():\n return {'P': 0, 'B': 1, 'M': 2, 'E': 3, 'S': 4, 'U': 5}\n\n\ndef create_index_label():\n return {(0): 'Pad', (1): 'B', (2): 'M', (3): 'E', (4): 'S', (5): 'Unk'}\n\n\ndef create_embedding(embedding_model, embedding_size, lexicon_reverse):\n word_dict = create_useful_words(embedding_model)\n useful_word = []\n useful_word_length = 0\n for word in list(lexicon_reverse.values()):\n if word in word_dict:\n useful_word_length += 1\n useful_word.append(word)\n del word_dict\n embedding_weights = np.zeros((useful_word_length + 2, embedding_size))\n for i in range(useful_word_length):\n embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]\n embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)\n return useful_word_length, embedding_weights\n\n\ndef create_matrix(lexicon, label_2_index):\n data_index = codecs.open('data/data_index.data', 'w', 'utf-8')\n label_index = codecs.open('data/label_index.data', 'w', 'utf-8')\n file_chars = codecs.open('data/data.data', 'r', 'utf-8')\n file_labels = codecs.open('data/label.data', 'r', 'utf-8')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n while chars_line and labels_line:\n book_chars = chars_line.strip().split()\n book_labels = labels_line.strip().split()\n for char, label in zip(book_chars, book_labels):\n data_index.write(str(lexicon[char]) + '\\t')\n label_index.write(str(label_2_index[label]) + '\\t')\n data_index.write('\\n')\n label_index.write('\\n')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n file_chars.close()\n file_labels.close()\n data_index.close()\n label_index.close()\n\n\ndef padding_sentences(max_len):\n data_index = codecs.open('data/data_index.data', 'r', 'utf-8')\n label_index = codecs.open('data/label_index.data', 'r', 'utf-8')\n data_index_padding = codecs.open('data/data_index_padding.data', 'w',\n 'utf-8')\n label_index_padding = codecs.open('data/label_index_padding.data', 'w',\n 'utf-8')\n data_line = data_index.readline()\n while data_line:\n book_data = data_line.strip().split()\n book_data_len = len(book_data)\n new_book_data = []\n if book_data_len < max_len:\n new_book_data = [str(0)] * (max_len - book_data_len) + book_data\n else:\n new_book_data = book_data\n for data_word in new_book_data:\n data_index_padding.write(data_word + '\\t')\n data_index_padding.write('\\n')\n data_line = data_index.readline()\n label_line = label_index.readline()\n while label_line:\n book_label = label_line.strip().split()\n book_label_len = len(book_label)\n new_book_label = []\n if book_label_len < max_len:\n new_book_label = [str(0)] * (max_len - book_label_len) + book_label\n else:\n new_book_label = book_label\n for label_word in new_book_label:\n label_index_padding.write(label_word + '\\t')\n label_index_padding.write('\\n')\n label_line = label_index.readline()\n data_index.close()\n label_index.close()\n data_index_padding.close()\n label_index_padding.close()\n\n\ndef maxlen_2d_list():\n max_len = 0\n data_index = codecs.open('data/data_index.data', 'r', 'utf-8')\n data_line = data_index.readline()\n while data_line:\n book_data = data_line.strip().split()\n book_data_len = len(book_data)\n if book_data_len > max_len:\n max_len = book_data_len\n data_line = data_index.readline()\n data_index.close()\n return max_len\n", "step-4": "<mask token>\nimport codecs\nimport numpy as np\n\n\ndef create_documents():\n \"\"\" 按标点符号或是空格存储文件 \"\"\"\n documents_length = 0\n chars, labels = [], []\n chars_file = codecs.open('data/data.data', 'w', 'utf-8')\n labels_file = codecs.open('data/label.data', 'w', 'utf-8')\n with codecs.open('data/train.data', 'r', 'utf-8') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n else:\n pieces = line.strip().split()\n chars.append(pieces[0])\n labels.append(pieces[1])\n if pieces[0] in ['。', ',', ';', '!', '?']:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n chars_file.close()\n labels_file.close()\n return documents_length\n\n\ndef create_useful_words(embedding_model):\n return list(embedding_model.wv.vocab.keys())\n\n\ndef create_lexicon(word_dict):\n \"\"\" 生成词典 \"\"\"\n chars = {}\n with codecs.open('data/data.data', 'r', 'utf-8') as f:\n line = f.readline()\n while line:\n book_chars = line.strip().split()\n for sequence in book_chars:\n for char in sequence:\n chars[char] = chars.get(char, 0) + 1\n line = f.readline()\n sorted_chars = sorted(chars.items(), key=lambda x: x[1], reverse=True)\n lexicon = dict([(item[0], index + 1) for index, item in enumerate(\n sorted_chars)])\n del sorted_chars\n for v in lexicon:\n if v not in word_dict:\n lexicon[v] = -1\n lexicon_reverse = dict(zip(lexicon.values(), lexicon.keys()))\n return lexicon, lexicon_reverse\n\n\ndef create_label_index():\n return {'P': 0, 'B': 1, 'M': 2, 'E': 3, 'S': 4, 'U': 5}\n\n\ndef create_index_label():\n return {(0): 'Pad', (1): 'B', (2): 'M', (3): 'E', (4): 'S', (5): 'Unk'}\n\n\ndef create_embedding(embedding_model, embedding_size, lexicon_reverse):\n word_dict = create_useful_words(embedding_model)\n useful_word = []\n useful_word_length = 0\n for word in list(lexicon_reverse.values()):\n if word in word_dict:\n useful_word_length += 1\n useful_word.append(word)\n del word_dict\n embedding_weights = np.zeros((useful_word_length + 2, embedding_size))\n for i in range(useful_word_length):\n embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]\n embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)\n return useful_word_length, embedding_weights\n\n\ndef create_matrix(lexicon, label_2_index):\n data_index = codecs.open('data/data_index.data', 'w', 'utf-8')\n label_index = codecs.open('data/label_index.data', 'w', 'utf-8')\n file_chars = codecs.open('data/data.data', 'r', 'utf-8')\n file_labels = codecs.open('data/label.data', 'r', 'utf-8')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n while chars_line and labels_line:\n book_chars = chars_line.strip().split()\n book_labels = labels_line.strip().split()\n for char, label in zip(book_chars, book_labels):\n data_index.write(str(lexicon[char]) + '\\t')\n label_index.write(str(label_2_index[label]) + '\\t')\n data_index.write('\\n')\n label_index.write('\\n')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n file_chars.close()\n file_labels.close()\n data_index.close()\n label_index.close()\n\n\ndef padding_sentences(max_len):\n data_index = codecs.open('data/data_index.data', 'r', 'utf-8')\n label_index = codecs.open('data/label_index.data', 'r', 'utf-8')\n data_index_padding = codecs.open('data/data_index_padding.data', 'w',\n 'utf-8')\n label_index_padding = codecs.open('data/label_index_padding.data', 'w',\n 'utf-8')\n data_line = data_index.readline()\n while data_line:\n book_data = data_line.strip().split()\n book_data_len = len(book_data)\n new_book_data = []\n if book_data_len < max_len:\n new_book_data = [str(0)] * (max_len - book_data_len) + book_data\n else:\n new_book_data = book_data\n for data_word in new_book_data:\n data_index_padding.write(data_word + '\\t')\n data_index_padding.write('\\n')\n data_line = data_index.readline()\n label_line = label_index.readline()\n while label_line:\n book_label = label_line.strip().split()\n book_label_len = len(book_label)\n new_book_label = []\n if book_label_len < max_len:\n new_book_label = [str(0)] * (max_len - book_label_len) + book_label\n else:\n new_book_label = book_label\n for label_word in new_book_label:\n label_index_padding.write(label_word + '\\t')\n label_index_padding.write('\\n')\n label_line = label_index.readline()\n data_index.close()\n label_index.close()\n data_index_padding.close()\n label_index_padding.close()\n\n\ndef maxlen_2d_list():\n max_len = 0\n data_index = codecs.open('data/data_index.data', 'r', 'utf-8')\n data_line = data_index.readline()\n while data_line:\n book_data = data_line.strip().split()\n book_data_len = len(book_data)\n if book_data_len > max_len:\n max_len = book_data_len\n data_line = data_index.readline()\n data_index.close()\n return max_len\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 3 09:12:11 2018\n\n@author: shen1994\n\"\"\"\n\nimport codecs\nimport numpy as np\n\ndef create_documents():\n \"\"\" 按标点符号或是空格存储文件 \"\"\"\n documents_length = 0\n chars,labels = [],[]\n\n chars_file = codecs.open(\"data/data.data\", 'w', 'utf-8')\n labels_file = codecs.open(\"data/label.data\", 'w', 'utf-8')\n \n\n with codecs.open(\"data/train.data\", 'r', 'utf-8') as f:\n for line in f:\n\n line=line.strip()\n\t\t\t\n if len(line)==0:\n if len(chars)!=0:\n for char in chars:\n chars_file.write(char + \"\\t\")\n chars_file.write(\"\\n\")\n for label in labels:\n labels_file.write(label + \"\\t\")\n labels_file.write(\"\\n\")\n documents_length += 1\n chars, labels=[], []\n\n else:\n pieces=line.strip().split()\n chars.append(pieces[0])\n labels.append(pieces[1])\n\n if pieces[0] in ['。',',',';','!','?']:\n \n for char in chars:\n chars_file.write(char + \"\\t\")\n chars_file.write(\"\\n\")\n for label in labels:\n labels_file.write(label + \"\\t\")\n labels_file.write(\"\\n\")\n \n documents_length += 1\n chars, labels=[], []\n\n if len(chars)!=0:\n \n for char in chars:\n chars_file.write(char + \"\\t\")\n chars_file.write(\"\\n\")\n for label in labels:\n labels_file.write(label + \"\\t\")\n labels_file.write(\"\\n\")\n \n documents_length += 1\n chars, labels=[], []\n\n chars_file.close()\n labels_file.close()\n \n return documents_length\n \ndef create_useful_words(embedding_model):\n \n return list(embedding_model.wv.vocab.keys())\n \ndef create_lexicon(word_dict):\n \"\"\" 生成词典 \"\"\"\n chars = {}\n # 统计词出现的次数\n with codecs.open(\"data/data.data\", 'r', 'utf-8') as f:\n line = f.readline()\n while(line):\n \n book_chars = line.strip().split()\n for sequence in book_chars:\n for char in sequence:\n chars[char] = chars.get(char,0) + 1\n\n line = f.readline()\n\n sorted_chars = sorted(chars.items(), key=lambda x:x[1], reverse=True)\n\n # 下标从1开始 0用来补长\n lexicon = dict([(item[0],index+1) for index, item in enumerate(sorted_chars)])\n \n del sorted_chars\n \n # 替换无用词的标记,标记为-1\n for v in lexicon:\n if v not in word_dict:\n lexicon[v] = -1\n\n lexicon_reverse = dict(zip(lexicon.values(), lexicon.keys()))\n \n return lexicon, lexicon_reverse\n \ndef create_label_index(): \n\n return {'P':0, 'B':1, 'M':2, 'E':3, 'S':4, 'U':5}\n\ndef create_index_label(): \n\n return {0:'Pad',1:'B',2:'M',3:'E',4:'S',5:'Unk'}\n \ndef create_embedding(embedding_model, embedding_size, lexicon_reverse):\n \n word_dict = create_useful_words(embedding_model)\n \n useful_word = []\n useful_word_length = 0\n for word in list(lexicon_reverse.values()):\n if word in word_dict:\n useful_word_length += 1\n useful_word.append(word)\n \n del word_dict\n \n # 增加 padding 和 unknown\n embedding_weights = np.zeros((useful_word_length + 2, embedding_size))\n \n for i in range(useful_word_length):\n embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]\n\n # 无效词嵌入向量\n embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)\n \n return useful_word_length, embedding_weights\n \ndef create_matrix(lexicon, label_2_index):\n\n data_index = codecs.open(\"data/data_index.data\", 'w', 'utf-8')\n label_index = codecs.open(\"data/label_index.data\", 'w', 'utf-8')\n\n file_chars = codecs.open(\"data/data.data\", 'r', 'utf-8')\n file_labels = codecs.open(\"data/label.data\", 'r', 'utf-8')\n \n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n \n while (chars_line and labels_line):\n \n book_chars = chars_line.strip().split()\n book_labels = labels_line.strip().split()\n \n for char, label in zip(book_chars, book_labels):\n data_index.write(str(lexicon[char]) + \"\\t\")\n label_index.write(str(label_2_index[label]) + \"\\t\")\n \n data_index.write(\"\\n\")\n label_index.write(\"\\n\")\n \n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n \n file_chars.close()\n file_labels.close()\n \n data_index.close()\n label_index.close()\n \ndef padding_sentences(max_len):\n \n data_index = codecs.open(\"data/data_index.data\", 'r', 'utf-8')\n label_index = codecs.open(\"data/label_index.data\", 'r', 'utf-8')\n \n data_index_padding = codecs.open(\"data/data_index_padding.data\", 'w', 'utf-8')\n label_index_padding = codecs.open(\"data/label_index_padding.data\", 'w', 'utf-8')\n \n data_line = data_index.readline()\n \n while data_line:\n \n book_data = data_line.strip().split()\n \n book_data_len = len(book_data)\n \n new_book_data = []\n \n if book_data_len < max_len:\n new_book_data = ([str(0)] * (max_len - book_data_len) + book_data)\n else:\n new_book_data = book_data\n \n for data_word in new_book_data:\n \n data_index_padding.write(data_word + \"\\t\")\n \n data_index_padding.write(\"\\n\")\n \n data_line = data_index.readline()\n\n label_line = label_index.readline()\n \n while label_line:\n \n book_label = label_line.strip().split()\n \n book_label_len = len(book_label)\n \n new_book_label = []\n\n if book_label_len < max_len:\n new_book_label = ([str(0)] * (max_len - book_label_len) + book_label)\n else:\n new_book_label = book_label\n \n for label_word in new_book_label:\n \n label_index_padding.write(label_word + \"\\t\")\n \n label_index_padding.write(\"\\n\")\n \n label_line = label_index.readline()\n \n data_index.close()\n label_index.close()\n data_index_padding.close()\n label_index_padding.close()\n \ndef maxlen_2d_list():\n \n max_len = 0\n \n data_index = codecs.open(\"data/data_index.data\", 'r', 'utf-8')\n \n data_line = data_index.readline()\n \n while data_line:\n \n book_data = data_line.strip().split()\n \n book_data_len = len(book_data)\n \n if book_data_len > max_len:\n \n max_len = book_data_len\n \n data_line = data_index.readline()\n \n data_index.close()\n \n return max_len \n ", "step-ids": [ 4, 7, 8, 10, 11 ] }
[ 4, 7, 8, 10, 11 ]
"""This module provides constants for locale-dependent providers.""" import typing as t from mimesis.enums import Locale from mimesis.exceptions import LocaleError __all__ = ["Locale", "validate_locale"] def validate_locale(locale: t.Union[Locale, str]) -> Locale: if isinstance(locale, str): try: return Locale(locale) except ValueError: raise LocaleError(locale) if not isinstance(locale, Locale): raise LocaleError(locale) return locale
normal
{ "blob_id": "779445aa22145d5076940ea5b214c25ad233dd0e", "index": 3087, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef validate_locale(locale: t.Union[Locale, str]) ->Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n return locale\n", "step-3": "<mask token>\n__all__ = ['Locale', 'validate_locale']\n\n\ndef validate_locale(locale: t.Union[Locale, str]) ->Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n return locale\n", "step-4": "<mask token>\nimport typing as t\nfrom mimesis.enums import Locale\nfrom mimesis.exceptions import LocaleError\n__all__ = ['Locale', 'validate_locale']\n\n\ndef validate_locale(locale: t.Union[Locale, str]) ->Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n return locale\n", "step-5": "\"\"\"This module provides constants for locale-dependent providers.\"\"\"\n\nimport typing as t\n\nfrom mimesis.enums import Locale\nfrom mimesis.exceptions import LocaleError\n\n__all__ = [\"Locale\", \"validate_locale\"]\n\n\ndef validate_locale(locale: t.Union[Locale, str]) -> Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n\n return locale\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" TODO Chess A.I. """ import os, pygame, board, math, engine, sys, gSmart from pygame.locals import * import engine, board, piece, copy class gSmart: def __init__(self): self.e = engine.engine() self.mtrlW = .75 self.dvlpW = 2 self.aggnW = 2 self.defnW = .5 self.thrndW = 2 self.epW = 10 self.chkW = 50 self.chkmtW = 1000 def getNextMove(self, b, n): gt = gameTree(b, n) #create a gameTree of n ply return gt.miniMax() #use miniMax algo to return the best move def getAllNextMoves(self, b): pcs = b.getPieces(b.turn) nextMoves = [] for p in pcs: for x in range(8): for y in range(8): futureB = copy.deepcopy(b) success = futureB.movePiece(self.e, p.sqr, [x,y]) if success == True: m = [p.sqr, [x,y]] nextMoves.append([futureB, m]) # print(nextMoves) return nextMoves def evaluatePosition(self, b): mtrl = b.getMaterialSums() dvlp = self.e.getDevelopment(b) agg = self.e.getAggression(b) defn = self.e.getDefense(b) thrnd = self.e.getThreatened(b) ep = self.e.getEnPrise(b) chk = self.e.getCheck(b) chkmt = self.e.getCheckmate(b) #print("Unweighted") #print("Material: \t" + str(mtrl)) #print("Development: \t" + str(dvlp)) #print("Aggression: \t" + str(agg)) #print("Defense: \t" + str(defn)) #print("Threatened:\t" + str(thrnd)) #print("En Prise: \t" + str(ep)) #print("Check: \t" + str(chk)) #print("Checkmate: \t" + str(chkmt)) #print("") metrics = [mtrl, dvlp, agg, defn, thrnd, ep, chk, chkmt] weights = [self.mtrlW, self.dvlpW, self.aggnW, self.defnW, self.thrndW, self.epW, self.chkW, self.chkmtW] position = [0,0] for x in range(len(metrics)): for y in range(2): position[y]+=metrics[x][y] # print("Position: " + str(position)) weightedMetrics = [ [weights[x]*metrics[x][0], weights[x]*metrics[x][1]] for x in range(len(weights))] #print("Unweighted") #print("Material: \t" + str(weightedMetrics[0])) #print("Development: \t" + str(weightedMetrics[1])) #print("Aggression: \t" + str(weightedMetrics[2])) #print("Defense: \t" + str(weightedMetrics[3])) #print("Threatened:\t" + str(weightedMetrics[4])) #print("En Prise: \t" + str(weightedMetrics[5])) #print("Check: \t" + str(weightedMetrics[6])) #print("Checkmate: \t" + str(weightedMetrics[7])) #print("") weightedPosition = [0,0] for x in range(len(metrics)): for y in range(2): weightedPosition[y]+=weightedMetrics[x][y] # print("Weighted Position: " + str(weightedPosition)) #print("Weighted Posistion: " + str(weightedPosition)) totalWeight = -1*weightedPosition[0] + weightedPosition[1] print("total weight: " + totalWeight) return totalWeight class gameTree(): def __init__(self, b, n): #builds a game tree of "n" ply from board "b" self.t = gSmart.gameTree.tree(b) #create a tree cur = self.t.getRoot() #grab the root self.addPly(cur, b, 3) #build out "h" ply def addPly(self, curNode, b, ply): if ply == 0: #basecase return else: moves = getAllNextMoves(curNode.board) #get moves for board in current node for move in moves: temp = gameTree.tree.node(b,move,mm) #make a new node for each move curNode.addChild(temp) #add the new node as a child to curNode self.addPly(temp, b, ply-1) #recursively call addPly on the child, with one less ply def getMinOrMax(self, b): if b.getTurn == "w": return "max" else: return "min" def minimax(self): return None class tree: def __init__(self, b = None, m= None): self.root = gSmart.gameTree.tree.node(b, m) def getRoot(self): return self.root def addNode(self, parent, child): parent.addChild(child) def DFS(self, start): print(str(start)) children = start.getChildren() if(len(children) == 0): return else: for child in children: self.DFS(child) class node: def __init__(self, b = None, m = None): self.children = [] self.board = b self.move = m self.value = None def addChild(self, newChild): self.children.append(newChild) def getChildren(self): return self.children def getData(self): return self.data def setValue(self, v): if v == None: self.value = self.getBoardValue() else: self.value = v def getValue(self): return self.value def getBoardValue(self): return self.gSmart.evaluatePosition() def isMaxNode(self): return self.board.isTurn() == "w" bd = board.Board() bd.setupDefault() gt = gSmart.gameTree(bd, 3) t.DFS(gt.getRoot())
normal
{ "blob_id": "7998c4e0ed2bb683f029342554730464f8ac2a09", "index": 2366, "step-1": "<mask token>\n\n\nclass gSmart:\n <mask token>\n\n def getNextMove(self, b, n):\n gt = gameTree(b, n)\n return gt.miniMax()\n\n def getAllNextMoves(self, b):\n pcs = b.getPieces(b.turn)\n nextMoves = []\n for p in pcs:\n for x in range(8):\n for y in range(8):\n futureB = copy.deepcopy(b)\n success = futureB.movePiece(self.e, p.sqr, [x, y])\n if success == True:\n m = [p.sqr, [x, y]]\n nextMoves.append([futureB, m])\n return nextMoves\n\n def evaluatePosition(self, b):\n mtrl = b.getMaterialSums()\n dvlp = self.e.getDevelopment(b)\n agg = self.e.getAggression(b)\n defn = self.e.getDefense(b)\n thrnd = self.e.getThreatened(b)\n ep = self.e.getEnPrise(b)\n chk = self.e.getCheck(b)\n chkmt = self.e.getCheckmate(b)\n metrics = [mtrl, dvlp, agg, defn, thrnd, ep, chk, chkmt]\n weights = [self.mtrlW, self.dvlpW, self.aggnW, self.defnW, self.\n thrndW, self.epW, self.chkW, self.chkmtW]\n position = [0, 0]\n for x in range(len(metrics)):\n for y in range(2):\n position[y] += metrics[x][y]\n weightedMetrics = [[weights[x] * metrics[x][0], weights[x] *\n metrics[x][1]] for x in range(len(weights))]\n weightedPosition = [0, 0]\n for x in range(len(metrics)):\n for y in range(2):\n weightedPosition[y] += weightedMetrics[x][y]\n totalWeight = -1 * weightedPosition[0] + weightedPosition[1]\n print('total weight: ' + totalWeight)\n return totalWeight\n\n\n class gameTree:\n\n def __init__(self, b, n):\n self.t = gSmart.gameTree.tree(b)\n cur = self.t.getRoot()\n self.addPly(cur, b, 3)\n\n def addPly(self, curNode, b, ply):\n if ply == 0:\n return\n else:\n moves = getAllNextMoves(curNode.board)\n for move in moves:\n temp = gameTree.tree.node(b, move, mm)\n curNode.addChild(temp)\n self.addPly(temp, b, ply - 1)\n\n def getMinOrMax(self, b):\n if b.getTurn == 'w':\n return 'max'\n else:\n return 'min'\n\n def minimax(self):\n return None\n\n\n class tree:\n\n def __init__(self, b=None, m=None):\n self.root = gSmart.gameTree.tree.node(b, m)\n\n def getRoot(self):\n return self.root\n\n def addNode(self, parent, child):\n parent.addChild(child)\n\n def DFS(self, start):\n print(str(start))\n children = start.getChildren()\n if len(children) == 0:\n return\n else:\n for child in children:\n self.DFS(child)\n\n\n class node:\n\n def __init__(self, b=None, m=None):\n self.children = []\n self.board = b\n self.move = m\n self.value = None\n\n def addChild(self, newChild):\n self.children.append(newChild)\n\n def getChildren(self):\n return self.children\n\n def getData(self):\n return self.data\n\n def setValue(self, v):\n if v == None:\n self.value = self.getBoardValue()\n else:\n self.value = v\n\n def getValue(self):\n return self.value\n\n def getBoardValue(self):\n return self.gSmart.evaluatePosition()\n\n def isMaxNode(self):\n return self.board.isTurn() == 'w'\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass gSmart:\n\n def __init__(self):\n self.e = engine.engine()\n self.mtrlW = 0.75\n self.dvlpW = 2\n self.aggnW = 2\n self.defnW = 0.5\n self.thrndW = 2\n self.epW = 10\n self.chkW = 50\n self.chkmtW = 1000\n\n def getNextMove(self, b, n):\n gt = gameTree(b, n)\n return gt.miniMax()\n\n def getAllNextMoves(self, b):\n pcs = b.getPieces(b.turn)\n nextMoves = []\n for p in pcs:\n for x in range(8):\n for y in range(8):\n futureB = copy.deepcopy(b)\n success = futureB.movePiece(self.e, p.sqr, [x, y])\n if success == True:\n m = [p.sqr, [x, y]]\n nextMoves.append([futureB, m])\n return nextMoves\n\n def evaluatePosition(self, b):\n mtrl = b.getMaterialSums()\n dvlp = self.e.getDevelopment(b)\n agg = self.e.getAggression(b)\n defn = self.e.getDefense(b)\n thrnd = self.e.getThreatened(b)\n ep = self.e.getEnPrise(b)\n chk = self.e.getCheck(b)\n chkmt = self.e.getCheckmate(b)\n metrics = [mtrl, dvlp, agg, defn, thrnd, ep, chk, chkmt]\n weights = [self.mtrlW, self.dvlpW, self.aggnW, self.defnW, self.\n thrndW, self.epW, self.chkW, self.chkmtW]\n position = [0, 0]\n for x in range(len(metrics)):\n for y in range(2):\n position[y] += metrics[x][y]\n weightedMetrics = [[weights[x] * metrics[x][0], weights[x] *\n metrics[x][1]] for x in range(len(weights))]\n weightedPosition = [0, 0]\n for x in range(len(metrics)):\n for y in range(2):\n weightedPosition[y] += weightedMetrics[x][y]\n totalWeight = -1 * weightedPosition[0] + weightedPosition[1]\n print('total weight: ' + totalWeight)\n return totalWeight\n\n\n class gameTree:\n\n def __init__(self, b, n):\n self.t = gSmart.gameTree.tree(b)\n cur = self.t.getRoot()\n self.addPly(cur, b, 3)\n\n def addPly(self, curNode, b, ply):\n if ply == 0:\n return\n else:\n moves = getAllNextMoves(curNode.board)\n for move in moves:\n temp = gameTree.tree.node(b, move, mm)\n curNode.addChild(temp)\n self.addPly(temp, b, ply - 1)\n\n def getMinOrMax(self, b):\n if b.getTurn == 'w':\n return 'max'\n else:\n return 'min'\n\n def minimax(self):\n return None\n\n\n class tree:\n\n def __init__(self, b=None, m=None):\n self.root = gSmart.gameTree.tree.node(b, m)\n\n def getRoot(self):\n return self.root\n\n def addNode(self, parent, child):\n parent.addChild(child)\n\n def DFS(self, start):\n print(str(start))\n children = start.getChildren()\n if len(children) == 0:\n return\n else:\n for child in children:\n self.DFS(child)\n\n\n class node:\n\n def __init__(self, b=None, m=None):\n self.children = []\n self.board = b\n self.move = m\n self.value = None\n\n def addChild(self, newChild):\n self.children.append(newChild)\n\n def getChildren(self):\n return self.children\n\n def getData(self):\n return self.data\n\n def setValue(self, v):\n if v == None:\n self.value = self.getBoardValue()\n else:\n self.value = v\n\n def getValue(self):\n return self.value\n\n def getBoardValue(self):\n return self.gSmart.evaluatePosition()\n\n def isMaxNode(self):\n return self.board.isTurn() == 'w'\n\n\n<mask token>\nbd.setupDefault()\n<mask token>\nt.DFS(gt.getRoot())\n", "step-3": "<mask token>\n\n\nclass gSmart:\n\n def __init__(self):\n self.e = engine.engine()\n self.mtrlW = 0.75\n self.dvlpW = 2\n self.aggnW = 2\n self.defnW = 0.5\n self.thrndW = 2\n self.epW = 10\n self.chkW = 50\n self.chkmtW = 1000\n\n def getNextMove(self, b, n):\n gt = gameTree(b, n)\n return gt.miniMax()\n\n def getAllNextMoves(self, b):\n pcs = b.getPieces(b.turn)\n nextMoves = []\n for p in pcs:\n for x in range(8):\n for y in range(8):\n futureB = copy.deepcopy(b)\n success = futureB.movePiece(self.e, p.sqr, [x, y])\n if success == True:\n m = [p.sqr, [x, y]]\n nextMoves.append([futureB, m])\n return nextMoves\n\n def evaluatePosition(self, b):\n mtrl = b.getMaterialSums()\n dvlp = self.e.getDevelopment(b)\n agg = self.e.getAggression(b)\n defn = self.e.getDefense(b)\n thrnd = self.e.getThreatened(b)\n ep = self.e.getEnPrise(b)\n chk = self.e.getCheck(b)\n chkmt = self.e.getCheckmate(b)\n metrics = [mtrl, dvlp, agg, defn, thrnd, ep, chk, chkmt]\n weights = [self.mtrlW, self.dvlpW, self.aggnW, self.defnW, self.\n thrndW, self.epW, self.chkW, self.chkmtW]\n position = [0, 0]\n for x in range(len(metrics)):\n for y in range(2):\n position[y] += metrics[x][y]\n weightedMetrics = [[weights[x] * metrics[x][0], weights[x] *\n metrics[x][1]] for x in range(len(weights))]\n weightedPosition = [0, 0]\n for x in range(len(metrics)):\n for y in range(2):\n weightedPosition[y] += weightedMetrics[x][y]\n totalWeight = -1 * weightedPosition[0] + weightedPosition[1]\n print('total weight: ' + totalWeight)\n return totalWeight\n\n\n class gameTree:\n\n def __init__(self, b, n):\n self.t = gSmart.gameTree.tree(b)\n cur = self.t.getRoot()\n self.addPly(cur, b, 3)\n\n def addPly(self, curNode, b, ply):\n if ply == 0:\n return\n else:\n moves = getAllNextMoves(curNode.board)\n for move in moves:\n temp = gameTree.tree.node(b, move, mm)\n curNode.addChild(temp)\n self.addPly(temp, b, ply - 1)\n\n def getMinOrMax(self, b):\n if b.getTurn == 'w':\n return 'max'\n else:\n return 'min'\n\n def minimax(self):\n return None\n\n\n class tree:\n\n def __init__(self, b=None, m=None):\n self.root = gSmart.gameTree.tree.node(b, m)\n\n def getRoot(self):\n return self.root\n\n def addNode(self, parent, child):\n parent.addChild(child)\n\n def DFS(self, start):\n print(str(start))\n children = start.getChildren()\n if len(children) == 0:\n return\n else:\n for child in children:\n self.DFS(child)\n\n\n class node:\n\n def __init__(self, b=None, m=None):\n self.children = []\n self.board = b\n self.move = m\n self.value = None\n\n def addChild(self, newChild):\n self.children.append(newChild)\n\n def getChildren(self):\n return self.children\n\n def getData(self):\n return self.data\n\n def setValue(self, v):\n if v == None:\n self.value = self.getBoardValue()\n else:\n self.value = v\n\n def getValue(self):\n return self.value\n\n def getBoardValue(self):\n return self.gSmart.evaluatePosition()\n\n def isMaxNode(self):\n return self.board.isTurn() == 'w'\n\n\nbd = board.Board()\nbd.setupDefault()\ngt = gSmart.gameTree(bd, 3)\nt.DFS(gt.getRoot())\n", "step-4": "<mask token>\nimport os, pygame, board, math, engine, sys, gSmart\nfrom pygame.locals import *\nimport engine, board, piece, copy\n\n\nclass gSmart:\n\n def __init__(self):\n self.e = engine.engine()\n self.mtrlW = 0.75\n self.dvlpW = 2\n self.aggnW = 2\n self.defnW = 0.5\n self.thrndW = 2\n self.epW = 10\n self.chkW = 50\n self.chkmtW = 1000\n\n def getNextMove(self, b, n):\n gt = gameTree(b, n)\n return gt.miniMax()\n\n def getAllNextMoves(self, b):\n pcs = b.getPieces(b.turn)\n nextMoves = []\n for p in pcs:\n for x in range(8):\n for y in range(8):\n futureB = copy.deepcopy(b)\n success = futureB.movePiece(self.e, p.sqr, [x, y])\n if success == True:\n m = [p.sqr, [x, y]]\n nextMoves.append([futureB, m])\n return nextMoves\n\n def evaluatePosition(self, b):\n mtrl = b.getMaterialSums()\n dvlp = self.e.getDevelopment(b)\n agg = self.e.getAggression(b)\n defn = self.e.getDefense(b)\n thrnd = self.e.getThreatened(b)\n ep = self.e.getEnPrise(b)\n chk = self.e.getCheck(b)\n chkmt = self.e.getCheckmate(b)\n metrics = [mtrl, dvlp, agg, defn, thrnd, ep, chk, chkmt]\n weights = [self.mtrlW, self.dvlpW, self.aggnW, self.defnW, self.\n thrndW, self.epW, self.chkW, self.chkmtW]\n position = [0, 0]\n for x in range(len(metrics)):\n for y in range(2):\n position[y] += metrics[x][y]\n weightedMetrics = [[weights[x] * metrics[x][0], weights[x] *\n metrics[x][1]] for x in range(len(weights))]\n weightedPosition = [0, 0]\n for x in range(len(metrics)):\n for y in range(2):\n weightedPosition[y] += weightedMetrics[x][y]\n totalWeight = -1 * weightedPosition[0] + weightedPosition[1]\n print('total weight: ' + totalWeight)\n return totalWeight\n\n\n class gameTree:\n\n def __init__(self, b, n):\n self.t = gSmart.gameTree.tree(b)\n cur = self.t.getRoot()\n self.addPly(cur, b, 3)\n\n def addPly(self, curNode, b, ply):\n if ply == 0:\n return\n else:\n moves = getAllNextMoves(curNode.board)\n for move in moves:\n temp = gameTree.tree.node(b, move, mm)\n curNode.addChild(temp)\n self.addPly(temp, b, ply - 1)\n\n def getMinOrMax(self, b):\n if b.getTurn == 'w':\n return 'max'\n else:\n return 'min'\n\n def minimax(self):\n return None\n\n\n class tree:\n\n def __init__(self, b=None, m=None):\n self.root = gSmart.gameTree.tree.node(b, m)\n\n def getRoot(self):\n return self.root\n\n def addNode(self, parent, child):\n parent.addChild(child)\n\n def DFS(self, start):\n print(str(start))\n children = start.getChildren()\n if len(children) == 0:\n return\n else:\n for child in children:\n self.DFS(child)\n\n\n class node:\n\n def __init__(self, b=None, m=None):\n self.children = []\n self.board = b\n self.move = m\n self.value = None\n\n def addChild(self, newChild):\n self.children.append(newChild)\n\n def getChildren(self):\n return self.children\n\n def getData(self):\n return self.data\n\n def setValue(self, v):\n if v == None:\n self.value = self.getBoardValue()\n else:\n self.value = v\n\n def getValue(self):\n return self.value\n\n def getBoardValue(self):\n return self.gSmart.evaluatePosition()\n\n def isMaxNode(self):\n return self.board.isTurn() == 'w'\n\n\nbd = board.Board()\nbd.setupDefault()\ngt = gSmart.gameTree(bd, 3)\nt.DFS(gt.getRoot())\n", "step-5": "\"\"\" \r\nTODO\r\n\r\nChess A.I.\r\n\r\n\"\"\"\r\nimport os, pygame, board, math, engine, sys, gSmart\r\nfrom pygame.locals import *\r\n\r\nimport engine, board, piece, copy\r\n\r\nclass gSmart:\r\n\r\n\tdef __init__(self):\r\n\t\tself.e = engine.engine()\r\n\t\tself.mtrlW = .75\r\n\t\tself.dvlpW = 2\r\n\t\tself.aggnW = 2\r\n\t\tself.defnW = .5\r\n\t\tself.thrndW = 2\r\n\t\tself.epW = 10\r\n\t\tself.chkW = 50\r\n\t\tself.chkmtW = 1000\r\n\r\n\tdef getNextMove(self, b, n):\r\n\t\tgt = gameTree(b, n)\t\t#create a gameTree of n ply\r\n\t\treturn gt.miniMax()\t\t#use miniMax algo to return the best move\r\n\r\n\tdef getAllNextMoves(self, b):\r\n\t\tpcs = b.getPieces(b.turn)\r\n\t\tnextMoves = []\r\n\t\tfor p in pcs:\r\n\t\t\tfor x in range(8):\r\n\t\t\t\tfor y in range(8):\r\n\t\t\t\t\tfutureB = copy.deepcopy(b)\r\n\t\t\t\t\tsuccess = futureB.movePiece(self.e, p.sqr, [x,y])\r\n\t\t\t\t\tif success == True:\r\n\t\t\t\t\t\tm = [p.sqr, [x,y]]\r\n\t\t\t\t\t\tnextMoves.append([futureB, m])\r\n\t\t# print(nextMoves)\r\n\t\treturn nextMoves\r\n\r\n\tdef evaluatePosition(self, b):\r\n\r\n\t\tmtrl = b.getMaterialSums()\r\n\t\tdvlp = self.e.getDevelopment(b)\r\n\t\tagg = self.e.getAggression(b)\r\n\t\tdefn = self.e.getDefense(b)\r\n\t\tthrnd = self.e.getThreatened(b)\r\n\t\tep = self.e.getEnPrise(b)\r\n\t\tchk = self.e.getCheck(b)\r\n\t\tchkmt = self.e.getCheckmate(b)\r\n\t\t\r\n\t\t#print(\"Unweighted\")\r\n\t\t#print(\"Material: \\t\" + str(mtrl))\r\n\t\t#print(\"Development: \\t\" + str(dvlp))\r\n\t\t#print(\"Aggression: \\t\" + str(agg))\r\n\t\t#print(\"Defense: \\t\" + str(defn))\r\n\t\t#print(\"Threatened:\\t\" + str(thrnd))\r\n\t\t#print(\"En Prise: \\t\" + str(ep))\r\n\t\t#print(\"Check: \\t\" + str(chk))\r\n\t\t#print(\"Checkmate: \\t\" + str(chkmt))\r\n\t\t#print(\"\")\r\n\r\n\t\tmetrics = [mtrl, dvlp, agg, defn, thrnd, ep, chk, chkmt]\r\n\t\tweights = [self.mtrlW, self.dvlpW, self.aggnW, self.defnW, self.thrndW, self.epW, self.chkW, self.chkmtW]\r\n\t\t\r\n\t\tposition = [0,0]\r\n\t\tfor x in range(len(metrics)):\r\n\t\t\tfor y in range(2):\r\n\t\t\t\tposition[y]+=metrics[x][y]\r\n\t\t# print(\"Position: \" + str(position))\r\n\r\n\t\tweightedMetrics = [ [weights[x]*metrics[x][0], weights[x]*metrics[x][1]] for x in range(len(weights))]\r\n\t\t\r\n\t\t#print(\"Unweighted\")\r\n\t\t#print(\"Material: \\t\" + str(weightedMetrics[0]))\r\n\t\t#print(\"Development: \\t\" + str(weightedMetrics[1]))\r\n\t\t#print(\"Aggression: \\t\" + str(weightedMetrics[2]))\r\n\t\t#print(\"Defense: \\t\" + str(weightedMetrics[3]))\r\n\t\t#print(\"Threatened:\\t\" + str(weightedMetrics[4]))\r\n\t\t#print(\"En Prise: \\t\" + str(weightedMetrics[5]))\r\n\t\t#print(\"Check: \\t\" + str(weightedMetrics[6]))\r\n\t\t#print(\"Checkmate: \\t\" + str(weightedMetrics[7]))\r\n\t\t#print(\"\")\r\n\t\t\r\n\t\tweightedPosition = [0,0]\r\n\t\tfor x in range(len(metrics)):\r\n\t\t\tfor y in range(2):\r\n\t\t\t\tweightedPosition[y]+=weightedMetrics[x][y]\r\n\t\t# print(\"Weighted Position: \" + str(weightedPosition))\r\n\r\n\t\t#print(\"Weighted Posistion: \" + str(weightedPosition))\r\n\t\t\r\n\t\ttotalWeight = -1*weightedPosition[0] + weightedPosition[1]\r\n\t\tprint(\"total weight: \" + totalWeight)\r\n\t\t\r\n\t\treturn totalWeight\r\n\r\n\tclass gameTree():\r\n\r\n\t\tdef __init__(self, b, n):\t\t\t\t#builds a game tree of \"n\" ply from board \"b\"\r\n\t\t\tself.t = gSmart.gameTree.tree(b)\t#create a tree\r\n\t\t\tcur = self.t.getRoot()\t\t\t\t#grab the root\r\n\t\t\tself.addPly(cur, b, 3)\t\t\t\t#build out \"h\" ply\r\n\r\n\t\tdef addPly(self, curNode, b, ply):\r\n\t\t\tif ply == 0:\t\t\t#basecase\r\n\t\t\t\treturn\r\n\t\t\telse:\r\n\t\t\t\tmoves = getAllNextMoves(curNode.board)\t#get moves for board in current node\r\n\t\t\t\tfor move in moves:\t\t\t\t\t\t\r\n\t\t\t\t\ttemp = gameTree.tree.node(b,move,mm)\t\t#make a new node for each move\r\n\t\t\t\t\tcurNode.addChild(temp)\t\t\t\t\t\t#add the new node as a child to curNode\r\n\t\t\t\t\tself.addPly(temp, b, ply-1)\t\t\t\t\t#recursively call addPly on the child, with one less ply\r\n\t\t\r\n\t\tdef getMinOrMax(self, b):\r\n\t\t\tif b.getTurn == \"w\":\r\n\t\t\t\treturn \"max\"\r\n\t\t\telse:\r\n\t\t\t\treturn \"min\"\r\n\t\t\t\t\r\n\t\tdef minimax(self):\r\n\t\t\treturn None\r\n\r\n\t\tclass tree:\r\n\t\t\r\n\t\t\tdef __init__(self, b = None, m= None):\r\n\t\t\t\tself.root = gSmart.gameTree.tree.node(b, m)\r\n\t\t\r\n\t\t\tdef getRoot(self):\r\n\t\t\t\treturn self.root\r\n\t\t\t\t\r\n\t\t\tdef addNode(self, parent, child):\r\n\t\t\t\tparent.addChild(child)\r\n\t\t\r\n\t\t\tdef DFS(self, start):\r\n\t\t\t\tprint(str(start))\r\n\t\t\t\tchildren = start.getChildren()\r\n\t\t\t\tif(len(children) == 0):\r\n\t\t\t\t\treturn\r\n\t\t\t\telse:\r\n\t\t\t\t\tfor child in children:\r\n\t\t\t\t\t\tself.DFS(child)\r\n\t\t\t\t\r\n\t\t\tclass node:\r\n\t\t\t\r\n\t\t\t\tdef __init__(self, b = None, m = None):\r\n\t\t\t\t\tself.children = []\r\n\t\t\t\t\tself.board = b\r\n\t\t\t\t\tself.move = m\r\n\t\t\t\t\tself.value = None\r\n\t\t\t\t\r\n\t\t\t\tdef addChild(self, newChild):\r\n\t\t\t\t\tself.children.append(newChild)\r\n\t\t\t\t\r\n\t\t\t\tdef getChildren(self):\r\n\t\t\t\t\treturn self.children\r\n\t\t\t\t\t\r\n\t\t\t\tdef getData(self):\r\n\t\t\t\t\treturn self.data\r\n\t\t\t\t\t\r\n\t\t\t\tdef setValue(self, v):\r\n\t\t\t\t\tif v == None:\r\n\t\t\t\t\t\tself.value = self.getBoardValue()\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.value = v\r\n\t\t\t\t\t\r\n\t\t\t\tdef getValue(self):\r\n\t\t\t\t\t\treturn self.value\r\n\t\t\t\t\t\t\r\n\t\t\t\tdef getBoardValue(self):\r\n\t\t\t\t\treturn self.gSmart.evaluatePosition()\r\n\t\t\t\t\r\n\t\t\t\tdef isMaxNode(self):\r\n\t\t\t\t\treturn self.board.isTurn() == \"w\"\r\n\r\nbd = board.Board()\r\nbd.setupDefault()\r\ngt = gSmart.gameTree(bd, 3)\r\nt.DFS(gt.getRoot())", "step-ids": [ 4, 6, 7, 8, 9 ] }
[ 4, 6, 7, 8, 9 ]
#!/usr/bin/env python # encoding: utf-8 ''' 1D2DCNN抽取特征,LSTM后提取特征,最后将提取的特征进行拼接,CNN与LSTM是交叉在一起的 ''' # 导入相关的包 import keras # 导入相关层的结构 from keras.models import Sequential from keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout,LSTM,Reshape from keras import Model # 可视化神经网络 from keras.utils import plot_model def merge_model(model_1, model_2): ''' keras将两个独立的模型融合起来 :param model_1: :param model_2: :return: ''' # model_1.load_weights('model_1_weight.h5')#这里可以加载各自权重 # model_2.load_weights('model_2_weight.h5')#可以是预训练好的模型权重(迁移学习) inp1 = model_1.input # 第一个模型的参数 inp2 = model_2.input # 第二个模型的参数 r1 = model_1.output r2 = model_2.output x = keras.layers.Concatenate(axis=1)([r1, r2]) model = Model(inputs=[inp1, inp2], outputs=x) return model def addLayers_model(model): ''' 修改模型(模型加层) 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层 :param model: 待扩层的模型 :return: ''' origin_model = model for layer in origin_model.layers: layer.trainable = False # 原来的不训练,冻结网络层 inp = origin_model.input x = origin_model.output den = Dense(512, name="fine_dense")(x) l = Dropout(0.5)(den) result = Dense(10, activation="softmax")(l) model = Model(input=inp, outputs=result) return model input_shape_1D = (1024, 1) input_shape_2D = (32, 32, 1) # 构建模型 # 网络结构(卷积层:relu - 池化层 - 卷积层 - 池化层 - Flatten - 汇聚层 - 全连接层 - Dropout - softmax) # ====================1、 1D部分 ============================== model1 = Sequential() # Conv1D:8 @ 1*1024。8个过滤器(卷积核),卷积核大小设置为3 model1.add(Conv1D(filters=8, kernel_size=(3), input_shape=input_shape_1D, padding='same', activation='relu')) # MaxPooling1D:8 @ 1*512。 model1.add(MaxPooling1D(pool_size=(2), padding='same')) # Conv1D:16 @ 1*512。16个过滤器,大小设置为3 model1.add(Conv1D(filters=16, kernel_size=(3), input_shape=(1, 512), padding='same', activation='relu')) # MaxPooling1D:16 @ 1*256。 model1.add(MaxPooling1D(pool_size=(2), padding='same')) ''' # Conv1D: 16 @ 1*256 。16个过滤器,大小设置为3 model1.add(Conv1D(filters=16, kernel_size=(3), input_shape=(1, 512), padding='same', activation='relu')) # MaxPooling1D:16 @ 1*128。 model1.add(MaxPooling1D(pool_size=(2), padding='same')) ''' model1.add(LSTM(32,return_sequences=True)) model1.add(Flatten()) # 压平:将输出压平为1维 # ============================================================= # ============ ======== 2、 2D部分 ============================ model2 = Sequential() # Conv2D:8 @ 32*32。8个过滤器(卷积核),卷积核大小设置为3*3 model2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D, padding='same', activation='relu')) # MaxPooling2D:8 @ 16*16。 model2.add(MaxPooling2D(pool_size=(2, 2), padding='same')) # Conv2D:16 @ 16*16。16个过滤器,卷积核大小设置为3*3 model2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1), padding='same', activation='relu')) # MaxPooling2D:16 @ 8*8。 model2.add(MaxPooling2D(pool_size=(2, 2), padding='same')) ''' # Conv2D:16 @ 8*8。16个过滤器,卷积核大小设置为3*3 model2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(8, 8, 1), padding='same', activation='relu')) # MaxPooling2D:16 @ 4*4。 model2.add(MaxPooling2D(pool_size=(2, 2), padding='same')) ''' print("model2两层卷积后的输出形状:",model2.output_shape) # (None,4,4,16) model2.add(Reshape((64,16))) #(None,16,16) model2.add(LSTM(32,return_sequences=True)) model2.add(Flatten()) # ============================================================= # ==================== 3、汇聚层 =============================== # 融合部分 model = merge_model(model1, model2) model.summary() # ============================================================= print("model.outputs:",model.output.shape) # ============= 4、 全连接层,dropout,分类层 ==================== model = addLayers_model(model) print(model.summary()) plot_model(model, to_file='model/1D2DLSTM_cross.png') # ============================================================= # ==================== 5、模型训练指标 ========================== # adam优化器, lr:初始学习率为0.1,学习率下降递减采用:ReduceLROnPlateau,在 model.fit 的回调函数中设置 # adam = keras.optimizers.Adam(lr=0.1) adam = keras.optimizers.Adam() model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) # ============================================================= # 保存模型结构 model.save('model/1D2DLSTM_cross.h5')
normal
{ "blob_id": "cce1b6f8e4b3f78adfa2243fe49b4994d35c5a38", "index": 9898, "step-1": "<mask token>\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\n<mask token>\nmodel1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,\n padding='same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\nmodel1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=\n 'same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\n<mask token>\nmodel1.add(LSTM(32, return_sequences=True))\nmodel1.add(Flatten())\n<mask token>\nmodel2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n<mask token>\nprint('model2两层卷积后的输出形状:', model2.output_shape)\nmodel2.add(Reshape((64, 16)))\nmodel2.add(LSTM(32, return_sequences=True))\nmodel2.add(Flatten())\n<mask token>\nmodel.summary()\nprint('model.outputs:', model.output.shape)\n<mask token>\nprint(model.summary())\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\n<mask token>\nmodel.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[\n 'accuracy'])\nmodel.save('model/1D2DLSTM_cross.h5')\n", "step-3": "<mask token>\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\ninput_shape_1D = 1024, 1\ninput_shape_2D = 32, 32, 1\nmodel1 = Sequential()\nmodel1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,\n padding='same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\nmodel1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=\n 'same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\n<mask token>\nmodel1.add(LSTM(32, return_sequences=True))\nmodel1.add(Flatten())\nmodel2 = Sequential()\nmodel2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n<mask token>\nprint('model2两层卷积后的输出形状:', model2.output_shape)\nmodel2.add(Reshape((64, 16)))\nmodel2.add(LSTM(32, return_sequences=True))\nmodel2.add(Flatten())\nmodel = merge_model(model1, model2)\nmodel.summary()\nprint('model.outputs:', model.output.shape)\nmodel = addLayers_model(model)\nprint(model.summary())\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\nadam = keras.optimizers.Adam()\nmodel.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[\n 'accuracy'])\nmodel.save('model/1D2DLSTM_cross.h5')\n", "step-4": "<mask token>\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout, LSTM, Reshape\nfrom keras import Model\nfrom keras.utils import plot_model\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\ninput_shape_1D = 1024, 1\ninput_shape_2D = 32, 32, 1\nmodel1 = Sequential()\nmodel1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,\n padding='same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\nmodel1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=\n 'same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\n<mask token>\nmodel1.add(LSTM(32, return_sequences=True))\nmodel1.add(Flatten())\nmodel2 = Sequential()\nmodel2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n<mask token>\nprint('model2两层卷积后的输出形状:', model2.output_shape)\nmodel2.add(Reshape((64, 16)))\nmodel2.add(LSTM(32, return_sequences=True))\nmodel2.add(Flatten())\nmodel = merge_model(model1, model2)\nmodel.summary()\nprint('model.outputs:', model.output.shape)\nmodel = addLayers_model(model)\nprint(model.summary())\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\nadam = keras.optimizers.Adam()\nmodel.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[\n 'accuracy'])\nmodel.save('model/1D2DLSTM_cross.h5')\n", "step-5": "#!/usr/bin/env python\n# encoding: utf-8\n'''\n 1D2DCNN抽取特征,LSTM后提取特征,最后将提取的特征进行拼接,CNN与LSTM是交叉在一起的\n'''\n\n# 导入相关的包\nimport keras\n\n# 导入相关层的结构\nfrom keras.models import Sequential\nfrom keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout,LSTM,Reshape\nfrom keras import Model\n\n# 可视化神经网络\nfrom keras.utils import plot_model\n\n\ndef merge_model(model_1, model_2):\n '''\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n '''\n\n # model_1.load_weights('model_1_weight.h5')#这里可以加载各自权重\n # model_2.load_weights('model_2_weight.h5')#可以是预训练好的模型权重(迁移学习)\n\n inp1 = model_1.input # 第一个模型的参数\n inp2 = model_2.input # 第二个模型的参数\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n '''\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n '''\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False # 原来的不训练,冻结网络层\n\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name=\"fine_dense\")(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation=\"softmax\")(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\ninput_shape_1D = (1024, 1)\ninput_shape_2D = (32, 32, 1)\n\n# 构建模型\n# 网络结构(卷积层:relu - 池化层 - 卷积层 - 池化层 - Flatten - 汇聚层 - 全连接层 - Dropout - softmax)\n# ====================1、 1D部分 ==============================\nmodel1 = Sequential()\n# Conv1D:8 @ 1*1024。8个过滤器(卷积核),卷积核大小设置为3\nmodel1.add(Conv1D(filters=8,\n kernel_size=(3),\n input_shape=input_shape_1D,\n padding='same',\n activation='relu'))\n\n# MaxPooling1D:8 @ 1*512。\nmodel1.add(MaxPooling1D(pool_size=(2), padding='same'))\n\n# Conv1D:16 @ 1*512。16个过滤器,大小设置为3\nmodel1.add(Conv1D(filters=16,\n kernel_size=(3),\n input_shape=(1, 512),\n padding='same',\n activation='relu'))\n\n# MaxPooling1D:16 @ 1*256。\nmodel1.add(MaxPooling1D(pool_size=(2), padding='same'))\n'''\n# Conv1D: 16 @ 1*256 。16个过滤器,大小设置为3\nmodel1.add(Conv1D(filters=16,\n kernel_size=(3),\n input_shape=(1, 512),\n padding='same',\n activation='relu'))\n\n# MaxPooling1D:16 @ 1*128。\nmodel1.add(MaxPooling1D(pool_size=(2), padding='same'))\n'''\n\nmodel1.add(LSTM(32,return_sequences=True))\nmodel1.add(Flatten()) # 压平:将输出压平为1维\n\n# =============================================================\n\n# ============ ======== 2、 2D部分 ============================\nmodel2 = Sequential()\n# Conv2D:8 @ 32*32。8个过滤器(卷积核),卷积核大小设置为3*3\nmodel2.add(Conv2D(filters=8,\n kernel_size=(3, 3),\n input_shape=input_shape_2D,\n padding='same',\n activation='relu'))\n\n# MaxPooling2D:8 @ 16*16。\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n\n# Conv2D:16 @ 16*16。16个过滤器,卷积核大小设置为3*3\nmodel2.add(Conv2D(filters=16,\n kernel_size=(3, 3),\n input_shape=(16, 16, 1),\n padding='same',\n activation='relu'))\n\n# MaxPooling2D:16 @ 8*8。\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n\n'''\n# Conv2D:16 @ 8*8。16个过滤器,卷积核大小设置为3*3\nmodel2.add(Conv2D(filters=16,\n kernel_size=(3, 3),\n input_shape=(8, 8, 1),\n padding='same',\n activation='relu'))\n\n# MaxPooling2D:16 @ 4*4。\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n'''\nprint(\"model2两层卷积后的输出形状:\",model2.output_shape) # (None,4,4,16)\nmodel2.add(Reshape((64,16))) #(None,16,16)\nmodel2.add(LSTM(32,return_sequences=True))\nmodel2.add(Flatten())\n# =============================================================\n\n\n# ==================== 3、汇聚层 ===============================\n# 融合部分\nmodel = merge_model(model1, model2)\nmodel.summary()\n# =============================================================\n\nprint(\"model.outputs:\",model.output.shape)\n\n# ============= 4、 全连接层,dropout,分类层 ====================\nmodel = addLayers_model(model)\nprint(model.summary())\n\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\n# =============================================================\n\n# ==================== 5、模型训练指标 ==========================\n# adam优化器, lr:初始学习率为0.1,学习率下降递减采用:ReduceLROnPlateau,在 model.fit 的回调函数中设置\n# adam = keras.optimizers.Adam(lr=0.1)\nadam = keras.optimizers.Adam()\nmodel.compile(loss='categorical_crossentropy',\n optimizer=adam,\n metrics=['accuracy'])\n# =============================================================\n\n# 保存模型结构\nmodel.save('model/1D2DLSTM_cross.h5')\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from abc import ABC from rest_framework import serializers from shopping_cars.models import Order, ShoppingCart class OrderSerializer(serializers.ModelSerializer): class Meta: model = Order fields = '__all__' class OrderProductSerializer(serializers.ModelSerializer): class Meta: model = ShoppingCart fields = '__all__' # ways to validate # #1 def validate_quantity(self, value): if value <= 0: raise serializers.ValidationError( "Please, enter a positive quantity") return value def validate_total_price_product(self, value): if value <= 0: raise serializers.ValidationError( "Please, enter a positive total price") return value # #2 def validate(self, data): if data['quantity'] <= 0 and data['total_price_product'] <= 0: raise serializers.ValidationError( "Please, enter a positive value") return data
normal
{ "blob_id": "9c14f024b25c5014567405535dbe5a6c787cfe28", "index": 6529, "step-1": "<mask token>\n\n\nclass OrderProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n def validate_quantity(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive quantity')\n return value\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass OrderProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n def validate_quantity(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive quantity')\n return value\n\n def validate_total_price_product(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive total price')\n return value\n <mask token>\n", "step-3": "<mask token>\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Order\n fields = '__all__'\n\n\nclass OrderProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n def validate_quantity(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive quantity')\n return value\n\n def validate_total_price_product(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive total price')\n return value\n\n def validate(self, data):\n if data['quantity'] <= 0 and data['total_price_product'] <= 0:\n raise serializers.ValidationError('Please, enter a positive value')\n return data\n", "step-4": "from abc import ABC\nfrom rest_framework import serializers\nfrom shopping_cars.models import Order, ShoppingCart\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Order\n fields = '__all__'\n\n\nclass OrderProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n def validate_quantity(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive quantity')\n return value\n\n def validate_total_price_product(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n 'Please, enter a positive total price')\n return value\n\n def validate(self, data):\n if data['quantity'] <= 0 and data['total_price_product'] <= 0:\n raise serializers.ValidationError('Please, enter a positive value')\n return data\n", "step-5": "from abc import ABC\nfrom rest_framework import serializers\nfrom shopping_cars.models import Order, ShoppingCart\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Order\n fields = '__all__'\n\n\nclass OrderProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n # ways to validate\n # #1\n def validate_quantity(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n \"Please, enter a positive quantity\")\n return value\n\n def validate_total_price_product(self, value):\n if value <= 0:\n raise serializers.ValidationError(\n \"Please, enter a positive total price\")\n return value\n\n # #2\n def validate(self, data):\n if data['quantity'] <= 0 and data['total_price_product'] <= 0:\n raise serializers.ValidationError(\n \"Please, enter a positive value\")\n return data\n", "step-ids": [ 2, 3, 5, 6, 7 ] }
[ 2, 3, 5, 6, 7 ]
def test_number(): pass
normal
{ "blob_id": "687ab41e9ce94c8d14154a941504845a8fa9f2d9", "index": 8660, "step-1": "<mask token>\n", "step-2": "def test_number():\n pass\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
import csv import boto3 import pytz import time from datetime import datetime, timedelta # current_time = int(datetime.now()) from boto3.dynamodb.conditions import Key, Attr def lambda_handler(event, context): current_date = datetime.now(pytz.timezone('US/Central')) yesterday_date = current_date - timedleta(days=1) yesterday_date_string = yesterday_date.strftime("%Y-%m-%dT") dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('AppStreamDynamoDB1') response = table.scan( FilterExpression=Attr('formData').contains(yesterday_date_string) ) items = response['Items'] print(items) # it should print out the values print("testing") print(yesterday_date_string) if len(items) != 0: print(items) # it should print null return items saving_backup() delete_entires() def saving_backup(): s3_client = boto3.client('s3') key = datetime.now(pytz.timezone('US/Central')).strftime("%Y-%m-%dT") bucket = 'REPLACE_WITH_BUCKET_NAME' data = [] serializedData = json.dumps(data) try: # response = s3_client.upload_file(file_name, bucket, object_name) response = s3.put_object(Bucket=bucket, Key=key, Body=serializedData) except ClientError as e: logging.error(e) return False return True def delete_entires(): saving_backup() == True #----------------------Delete Items inside the dynamo db--------------------------------------------- print("Attempting a conditional delete...") try: response = table.delete_item( Key={ 'date': yesterday_date_string , }, # ConditionExpression="info.rating <= :val", # ExpressionAttributeValues= { # ":val": decimal.Decimal(5) # } ) except ClientError as e: if e.response['Error']['Code'] == "ConditionalCheckFailedException": print(e.response['Error']['Message']) else: raise else: print("DeleteItem succeeded:") # print(json.dumps(response, indent=4, cls=DecimalEncoder))
normal
{ "blob_id": "64d955d568a6bfec50aad36c9c4f1e36998e4d74", "index": 7467, "step-1": "<mask token>\n\n\ndef lambda_handler(event, context):\n current_date = datetime.now(pytz.timezone('US/Central'))\n yesterday_date = current_date - timedleta(days=1)\n yesterday_date_string = yesterday_date.strftime('%Y-%m-%dT')\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('AppStreamDynamoDB1')\n response = table.scan(FilterExpression=Attr('formData').contains(\n yesterday_date_string))\n items = response['Items']\n print(items)\n print('testing')\n print(yesterday_date_string)\n if len(items) != 0:\n print(items)\n return items\n saving_backup()\n delete_entires()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef lambda_handler(event, context):\n current_date = datetime.now(pytz.timezone('US/Central'))\n yesterday_date = current_date - timedleta(days=1)\n yesterday_date_string = yesterday_date.strftime('%Y-%m-%dT')\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('AppStreamDynamoDB1')\n response = table.scan(FilterExpression=Attr('formData').contains(\n yesterday_date_string))\n items = response['Items']\n print(items)\n print('testing')\n print(yesterday_date_string)\n if len(items) != 0:\n print(items)\n return items\n saving_backup()\n delete_entires()\n\n\ndef saving_backup():\n s3_client = boto3.client('s3')\n key = datetime.now(pytz.timezone('US/Central')).strftime('%Y-%m-%dT')\n bucket = 'REPLACE_WITH_BUCKET_NAME'\n data = []\n serializedData = json.dumps(data)\n try:\n response = s3.put_object(Bucket=bucket, Key=key, Body=serializedData)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef lambda_handler(event, context):\n current_date = datetime.now(pytz.timezone('US/Central'))\n yesterday_date = current_date - timedleta(days=1)\n yesterday_date_string = yesterday_date.strftime('%Y-%m-%dT')\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('AppStreamDynamoDB1')\n response = table.scan(FilterExpression=Attr('formData').contains(\n yesterday_date_string))\n items = response['Items']\n print(items)\n print('testing')\n print(yesterday_date_string)\n if len(items) != 0:\n print(items)\n return items\n saving_backup()\n delete_entires()\n\n\ndef saving_backup():\n s3_client = boto3.client('s3')\n key = datetime.now(pytz.timezone('US/Central')).strftime('%Y-%m-%dT')\n bucket = 'REPLACE_WITH_BUCKET_NAME'\n data = []\n serializedData = json.dumps(data)\n try:\n response = s3.put_object(Bucket=bucket, Key=key, Body=serializedData)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n\ndef delete_entires():\n saving_backup() == True\n print('Attempting a conditional delete...')\n try:\n response = table.delete_item(Key={'date': yesterday_date_string})\n except ClientError as e:\n if e.response['Error']['Code'] == 'ConditionalCheckFailedException':\n print(e.response['Error']['Message'])\n else:\n raise\n else:\n print('DeleteItem succeeded:')\n", "step-4": "import csv\nimport boto3\nimport pytz\nimport time\nfrom datetime import datetime, timedelta\nfrom boto3.dynamodb.conditions import Key, Attr\n\n\ndef lambda_handler(event, context):\n current_date = datetime.now(pytz.timezone('US/Central'))\n yesterday_date = current_date - timedleta(days=1)\n yesterday_date_string = yesterday_date.strftime('%Y-%m-%dT')\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('AppStreamDynamoDB1')\n response = table.scan(FilterExpression=Attr('formData').contains(\n yesterday_date_string))\n items = response['Items']\n print(items)\n print('testing')\n print(yesterday_date_string)\n if len(items) != 0:\n print(items)\n return items\n saving_backup()\n delete_entires()\n\n\ndef saving_backup():\n s3_client = boto3.client('s3')\n key = datetime.now(pytz.timezone('US/Central')).strftime('%Y-%m-%dT')\n bucket = 'REPLACE_WITH_BUCKET_NAME'\n data = []\n serializedData = json.dumps(data)\n try:\n response = s3.put_object(Bucket=bucket, Key=key, Body=serializedData)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n\ndef delete_entires():\n saving_backup() == True\n print('Attempting a conditional delete...')\n try:\n response = table.delete_item(Key={'date': yesterday_date_string})\n except ClientError as e:\n if e.response['Error']['Code'] == 'ConditionalCheckFailedException':\n print(e.response['Error']['Message'])\n else:\n raise\n else:\n print('DeleteItem succeeded:')\n", "step-5": "import csv\nimport boto3 \nimport pytz\nimport time\nfrom datetime import datetime, timedelta\n# current_time = int(datetime.now())\nfrom boto3.dynamodb.conditions import Key, Attr\n\n\ndef lambda_handler(event, context):\n current_date = datetime.now(pytz.timezone('US/Central'))\n yesterday_date = current_date - timedleta(days=1)\n yesterday_date_string = yesterday_date.strftime(\"%Y-%m-%dT\")\n\n dynamodb = boto3.resource('dynamodb')\n\n table = dynamodb.Table('AppStreamDynamoDB1')\n\n\n response = table.scan(\n FilterExpression=Attr('formData').contains(yesterday_date_string)\n )\n items = response['Items']\n print(items) # it should print out the values\n print(\"testing\")\n print(yesterday_date_string)\n\n if len(items) != 0:\n print(items) # it should print null\n return items\n\n \n saving_backup()\n delete_entires()\n\ndef saving_backup():\n s3_client = boto3.client('s3')\n key = datetime.now(pytz.timezone('US/Central')).strftime(\"%Y-%m-%dT\")\n bucket = 'REPLACE_WITH_BUCKET_NAME'\n data = []\n serializedData = json.dumps(data)\n try:\n # response = s3_client.upload_file(file_name, bucket, object_name)\n response = s3.put_object(Bucket=bucket, Key=key, Body=serializedData)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n\ndef delete_entires():\n saving_backup() == True\n #----------------------Delete Items inside the dynamo db---------------------------------------------\n\n print(\"Attempting a conditional delete...\")\n\n try:\n response = table.delete_item(\n Key={\n 'date': yesterday_date_string ,\n \n },\n # ConditionExpression=\"info.rating <= :val\",\n # ExpressionAttributeValues= {\n # \":val\": decimal.Decimal(5)\n # }\n )\n except ClientError as e:\n if e.response['Error']['Code'] == \"ConditionalCheckFailedException\":\n print(e.response['Error']['Message'])\n else:\n raise\n else:\n print(\"DeleteItem succeeded:\")\n # print(json.dumps(response, indent=4, cls=DecimalEncoder))\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from django.contrib import admin from django.urls import path, include from django.conf import settings from rest_framework_swagger.views import get_swagger_view schema_view = get_swagger_view(title='API') from django.contrib.auth import views as auth_views urlpatterns = [ path('django-admin/', admin.site.urls), path('', schema_view), path('auth/login/', auth_views.LoginView.as_view(template_name='auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view()), path('api/auth/', include('apps.auth.urls')), path('api/polls/', include('apps.polls.urls')), ] if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS: import debug_toolbar urlpatterns = [ path('__debug__/', include(debug_toolbar.urls)) ] + urlpatterns
normal
{ "blob_id": "987d6c769a4f593405e889ed2b0e3f9955900406", "index": 856, "step-1": "<mask token>\n", "step-2": "<mask token>\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n", "step-3": "<mask token>\nschema_view = get_swagger_view(title='API')\n<mask token>\nurlpatterns = [path('django-admin/', admin.site.urls), path('', schema_view\n ), path('auth/login/', auth_views.LoginView.as_view(template_name=\n 'auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view\n ()), path('api/auth/', include('apps.auth.urls')), path('api/polls/',\n include('apps.polls.urls'))]\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n", "step-4": "from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom rest_framework_swagger.views import get_swagger_view\nschema_view = get_swagger_view(title='API')\nfrom django.contrib.auth import views as auth_views\nurlpatterns = [path('django-admin/', admin.site.urls), path('', schema_view\n ), path('auth/login/', auth_views.LoginView.as_view(template_name=\n 'auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view\n ()), path('api/auth/', include('apps.auth.urls')), path('api/polls/',\n include('apps.polls.urls'))]\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n", "step-5": "from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\n\nfrom rest_framework_swagger.views import get_swagger_view\n\nschema_view = get_swagger_view(title='API')\n\nfrom django.contrib.auth import views as auth_views\n\nurlpatterns = [\n path('django-admin/', admin.site.urls),\n path('', schema_view),\n path('auth/login/', auth_views.LoginView.as_view(template_name='auth/login.html')),\n path('auth/logout/', auth_views.LogoutView.as_view()),\n path('api/auth/', include('apps.auth.urls')),\n path('api/polls/', include('apps.polls.urls')),\n]\n\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [\n path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import cv2,os import sqlite3 cam = cv2.VideoCapture(0) detector = cv2.CascadeClassifier('Classifiers/face.xml') i = 0 offset = 50 def create_or_open_db(db_file): db_is_new = not os.path.exists(db_file) conn = sqlite3.connect(db_file) if db_is_new: print 'Creating schema' sql = '''create table if not exists PEOPLES( ID INTEGER PRIMARY KEY, Name TEXT);''' sql_image = '''create table if not exists PICTURES( ID INTEGER PRIMARY KEY AUTOINCREMENT, Picture BLOB, Type TEXT, File_name TEXT);''' sql_trainer = '''create table if not exists TRAINER( ID INTEGER PRIMARY KEY, File BLOB, Type TEXT, File_name TEXT);''' conn.execute(sql) # shortcut for conn.cursor().execute(sql) conn.execute(sql_image) # create image table conn.execute(sql_trainer) # create trainer table else: print 'Schema exists\n' return conn def insertOrUpdate(Id,Name): conn=sqlite3.connect("FaceBase.db") cmd="SELECT * FROM PEOPLES WHERE ID="+str(Id) cursor=conn.execute(cmd) isRecordExist=0 for row in cursor: isRecordExist=1 if(isRecordExist==1): cmd="UPDATE PEOPLES SET NAME='"+str(Name)+"' WHERE ID="+str(Id) else: cmd="INSERT INTO PEOPLES(ID,NAME)Values("+str(Id)+",'"+str(Name)+"')" conn.execute(cmd) conn.commit() conn.close() def insert_picture(picture_file): conn = create_or_open_db('FaceBase.db') with open(picture_file, 'rb') as input_file: ablob = input_file.read() base=os.path.basename(picture_file) afile, ext = os.path.splitext(base) sql = '''INSERT INTO PICTURES (PICTURE, TYPE, FILE_NAME) VALUES(?, ?, ?);''' conn.execute(sql,[sqlite3.Binary(ablob), ext, afile]) conn.commit() # picture_file = "./dataSet/face- 2.1.jpg" # insert_picture(conn, picture_file) # conn.close() id=raw_input('Digite o id ') name=raw_input('Digite o Nome ') create_or_open_db('FaceBase.db') insertOrUpdate(id,name) while True: ret, im =cam.read() gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) faces=detector.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(100, 100), flags=cv2.CASCADE_SCALE_IMAGE) for(x,y,w,h) in faces: i=i+1 cv2.imwrite("dataSet/face-"+id +'.'+ str(i) + ".jpg", gray[y-offset:y+h+offset,x-offset:x+w+offset]) #picture_file = "./dataSet/face-"+id +'.'+ str(i) + ".jpg" #insert_picture(picture_file) cv2.rectangle(im,(x-50,y-50),(x+w+50,y+h+50),(225,0,0),2) cv2.imshow('im',im[y-offset:y+h+offset,x-offset:x+w+offset]) cv2.waitKey(100) if i>70: cam.release() cv2.destroyAllWindows() break
normal
{ "blob_id": "3beaea1f2b1b085a60bdc5e53f4e6d9aff7e8b6f", "index": 5538, "step-1": "import cv2,os\nimport sqlite3\ncam = cv2.VideoCapture(0)\ndetector = cv2.CascadeClassifier('Classifiers/face.xml')\ni = 0\noffset = 50\n\n\ndef create_or_open_db(db_file):\n db_is_new = not os.path.exists(db_file)\n conn = sqlite3.connect(db_file)\n if db_is_new:\n print 'Creating schema'\n sql = '''create table if not exists PEOPLES(\n ID INTEGER PRIMARY KEY,\n Name TEXT);'''\n sql_image = '''create table if not exists PICTURES(\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Picture BLOB,\n Type TEXT,\n File_name TEXT);'''\n sql_trainer = '''create table if not exists TRAINER(\n ID INTEGER PRIMARY KEY,\n File BLOB,\n Type TEXT,\n File_name TEXT);'''\n conn.execute(sql) # shortcut for conn.cursor().execute(sql)\n conn.execute(sql_image) # create image table\n conn.execute(sql_trainer) # create trainer table\n else:\n print 'Schema exists\\n'\n return conn\n\ndef insertOrUpdate(Id,Name):\n conn=sqlite3.connect(\"FaceBase.db\")\n cmd=\"SELECT * FROM PEOPLES WHERE ID=\"+str(Id)\n cursor=conn.execute(cmd)\n isRecordExist=0\n for row in cursor:\n isRecordExist=1\n if(isRecordExist==1):\n cmd=\"UPDATE PEOPLES SET NAME='\"+str(Name)+\"' WHERE ID=\"+str(Id)\n else:\n cmd=\"INSERT INTO PEOPLES(ID,NAME)Values(\"+str(Id)+\",'\"+str(Name)+\"')\"\n conn.execute(cmd)\n conn.commit()\n conn.close()\n\ndef insert_picture(picture_file):\n conn = create_or_open_db('FaceBase.db')\n with open(picture_file, 'rb') as input_file:\n ablob = input_file.read()\n base=os.path.basename(picture_file)\n afile, ext = os.path.splitext(base)\n sql = '''INSERT INTO PICTURES\n (PICTURE, TYPE, FILE_NAME)\n VALUES(?, ?, ?);'''\n conn.execute(sql,[sqlite3.Binary(ablob), ext, afile]) \n conn.commit()\n\t\n# picture_file = \"./dataSet/face- 2.1.jpg\"\n# insert_picture(conn, picture_file)\n# conn.close()\n\nid=raw_input('Digite o id ')\nname=raw_input('Digite o Nome ')\ncreate_or_open_db('FaceBase.db')\ninsertOrUpdate(id,name)\n\nwhile True:\n ret, im =cam.read()\n gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n faces=detector.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(100, 100), flags=cv2.CASCADE_SCALE_IMAGE)\n for(x,y,w,h) in faces:\n i=i+1\n cv2.imwrite(\"dataSet/face-\"+id +'.'+ str(i) + \".jpg\", gray[y-offset:y+h+offset,x-offset:x+w+offset])\n #picture_file = \"./dataSet/face-\"+id +'.'+ str(i) + \".jpg\"\n #insert_picture(picture_file)\n cv2.rectangle(im,(x-50,y-50),(x+w+50,y+h+50),(225,0,0),2)\n cv2.imshow('im',im[y-offset:y+h+offset,x-offset:x+w+offset])\n cv2.waitKey(100)\n if i>70:\n cam.release()\n cv2.destroyAllWindows()\n break\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import pandas as pd import matplotlib.pyplot as plt import numpy as np r_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv') em_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv') aishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv') agni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv') df = pd.concat([aishah_data_df.mean(),em_data_df.mean(),r_data_df.mean(),agni_data_df.mean()],axis=1).T # Setting the positions and width for the bars pos = list(range(len(df['Mersenne Twister']))) width = 0.2 # Plotting the bars fig, ax = plt.subplots(figsize=(10,5)) # Create a bar with pre_score data, # in position pos, plt.bar(pos, #using df['pre_score'] data, df['Mersenne Twister'], # of width width, # with alpha 0.5 alpha=0.5, # with color color='#EE3224') # with label the first value in first_name #label=df['first_name'][0]) # Create a bar with mid_score data, # in position pos + some width buffer, plt.bar([p + width for p in pos], #using df['mid_score'] data, df['Xorshift 128+'], # of width width, # with alpha 0.5 alpha=0.5, # with color color='#F78F1E') # with label the second value in first_name #label=df['first_name'][1]) # Create a bar with post_score data, # in position pos + some width buffer, plt.bar([p + width*2 for p in pos], #using df['post_score'] data, df['SPCG64'], # of width width, # with alpha 0.5 #alpha=0.5, # with color color='#FFC222') # with label the third value in first_name #label=df['first_name'][2]) # Create a bar with post_score data, # in position pos + some width buffer, plt.bar([p + width*3 for p in pos], #using df['post_score'] data, df['Xoroshiro 128+'], # of width width, # with alpha 0.5 #alpha=0.5, # with color color='#FF3300') # with label the third value in first_name #label=df['first_name'][2]) # Set the y axis label ax.set_ylabel('Average MB/s',fontweight='bold') # Set the chart's title ax.set_title('Average MBs of Random Numbers Generated in a Second',fontweight='bold') # Set the position of the x ticks ax.set_xticks([p + 1.5 * width for p in pos]) # Set the labels for the x ticks ax.set_xticklabels(['MacBook 2017','MacBook 2015','MacBook 2011','Ubuntu 18.04']) # Setting the x-axis and y-axis limits plt.xlim(min(pos)-width, max(pos)+width*4) plt.ylim([0, 10000] ) # Adding the legend and showing the plot plt.legend(['Mersenne Twister','Xorshift 128+', 'SPCG64','Xoroshiro 128+'], loc='upper left') plt.grid() #plt.show() plt.savefig('barchart_compare.png')
normal
{ "blob_id": "467b919f6953737eedd3f99596df244bd1177575", "index": 5411, "step-1": "<mask token>\n", "step-2": "<mask token>\nplt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')\nplt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,\n color='#F78F1E')\nplt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')\nplt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=\n '#FF3300')\nax.set_ylabel('Average MB/s', fontweight='bold')\nax.set_title('Average MBs of Random Numbers Generated in a Second',\n fontweight='bold')\nax.set_xticks([(p + 1.5 * width) for p in pos])\nax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',\n 'Ubuntu 18.04'])\nplt.xlim(min(pos) - width, max(pos) + width * 4)\nplt.ylim([0, 10000])\nplt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'\n ], loc='upper left')\nplt.grid()\nplt.savefig('barchart_compare.png')\n", "step-3": "<mask token>\nr_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')\nem_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')\naishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')\nagni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')\ndf = pd.concat([aishah_data_df.mean(), em_data_df.mean(), r_data_df.mean(),\n agni_data_df.mean()], axis=1).T\npos = list(range(len(df['Mersenne Twister'])))\nwidth = 0.2\nfig, ax = plt.subplots(figsize=(10, 5))\nplt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')\nplt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,\n color='#F78F1E')\nplt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')\nplt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=\n '#FF3300')\nax.set_ylabel('Average MB/s', fontweight='bold')\nax.set_title('Average MBs of Random Numbers Generated in a Second',\n fontweight='bold')\nax.set_xticks([(p + 1.5 * width) for p in pos])\nax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',\n 'Ubuntu 18.04'])\nplt.xlim(min(pos) - width, max(pos) + width * 4)\nplt.ylim([0, 10000])\nplt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'\n ], loc='upper left')\nplt.grid()\nplt.savefig('barchart_compare.png')\n", "step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nr_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')\nem_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')\naishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')\nagni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')\ndf = pd.concat([aishah_data_df.mean(), em_data_df.mean(), r_data_df.mean(),\n agni_data_df.mean()], axis=1).T\npos = list(range(len(df['Mersenne Twister'])))\nwidth = 0.2\nfig, ax = plt.subplots(figsize=(10, 5))\nplt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')\nplt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,\n color='#F78F1E')\nplt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')\nplt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=\n '#FF3300')\nax.set_ylabel('Average MB/s', fontweight='bold')\nax.set_title('Average MBs of Random Numbers Generated in a Second',\n fontweight='bold')\nax.set_xticks([(p + 1.5 * width) for p in pos])\nax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',\n 'Ubuntu 18.04'])\nplt.xlim(min(pos) - width, max(pos) + width * 4)\nplt.ylim([0, 10000])\nplt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'\n ], loc='upper left')\nplt.grid()\nplt.savefig('barchart_compare.png')\n", "step-5": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nr_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')\nem_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')\naishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')\nagni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')\n\ndf = pd.concat([aishah_data_df.mean(),em_data_df.mean(),r_data_df.mean(),agni_data_df.mean()],axis=1).T\n\n\n# Setting the positions and width for the bars\npos = list(range(len(df['Mersenne Twister'])))\nwidth = 0.2\n\n# Plotting the bars\nfig, ax = plt.subplots(figsize=(10,5))\n\n# Create a bar with pre_score data,\n# in position pos,\nplt.bar(pos,\n #using df['pre_score'] data,\n df['Mersenne Twister'],\n # of width\n width,\n # with alpha 0.5\n alpha=0.5,\n # with color\n color='#EE3224')\n # with label the first value in first_name\n #label=df['first_name'][0])\n\n# Create a bar with mid_score data,\n# in position pos + some width buffer,\nplt.bar([p + width for p in pos],\n #using df['mid_score'] data,\n df['Xorshift 128+'],\n # of width\n width,\n # with alpha 0.5\n alpha=0.5,\n # with color\n color='#F78F1E')\n # with label the second value in first_name\n #label=df['first_name'][1])\n\n# Create a bar with post_score data,\n# in position pos + some width buffer,\nplt.bar([p + width*2 for p in pos],\n #using df['post_score'] data,\n df['SPCG64'],\n # of width\n width,\n # with alpha 0.5\n #alpha=0.5,\n # with color\n color='#FFC222')\n # with label the third value in first_name\n #label=df['first_name'][2])\n \n# Create a bar with post_score data,\n# in position pos + some width buffer,\nplt.bar([p + width*3 for p in pos],\n #using df['post_score'] data,\n df['Xoroshiro 128+'],\n # of width\n width,\n # with alpha 0.5\n #alpha=0.5,\n # with color\n color='#FF3300')\n # with label the third value in first_name\n #label=df['first_name'][2])\n\n# Set the y axis label\nax.set_ylabel('Average MB/s',fontweight='bold')\n\n# Set the chart's title\nax.set_title('Average MBs of Random Numbers Generated in a Second',fontweight='bold')\n\n# Set the position of the x ticks\nax.set_xticks([p + 1.5 * width for p in pos])\n\n# Set the labels for the x ticks\nax.set_xticklabels(['MacBook 2017','MacBook 2015','MacBook 2011','Ubuntu 18.04'])\n\n# Setting the x-axis and y-axis limits\nplt.xlim(min(pos)-width, max(pos)+width*4)\nplt.ylim([0, 10000] )\n\n# Adding the legend and showing the plot\nplt.legend(['Mersenne Twister','Xorshift 128+', 'SPCG64','Xoroshiro 128+'], loc='upper left')\nplt.grid()\n#plt.show()\nplt.savefig('barchart_compare.png')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#https://www.youtube.com/watch?v=CQ5kc_j4RjA import pandas as pd #import quandl import math, datetime import time import numpy as np from pandas.tools.plotting import scatter_matrix import matplotlib.pyplot as plt from sklearn import cross_validation, preprocessing, svm from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from matplotlib import style style.use ('ggplot') import datetime from pandas_datareader import data import csv #Setting Companies def Set_Ticker(): global stockTicker stockTicker = 'ONGC.NS' ## stockTicker = input("Enter the Ticker: ") print ("Possible options: ONGC.NS, ") return def Set_Date(): #Setting Date global end_date global start_date ## end_date = input("Enter prediction date(YYYY-MM-DD):") end_date = datetime.datetime(2017,1,30) start_date = end_date print (end_date) return def Actual_Value(): #Actual Value global df print("The Actual Closing Value is Displayed below") df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5') ao=df['Close'] print (str(ao)) return def Add_Features_x(): #Create Features - X global df df ['OC_Change'] = (df['Close']-df['Open']/df['Open']*100) df ['HL_Change'] = (df['High']-df['Low']/df['Low']*100) df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']] return def Forcast_Values(): #Forecast global forecast_out global forecast_col forecast_col = 'Close' forecast_out = int(math.ceil(0.01*len(df))) return def Add_Features_y(): #Label - y df['label'] = df[forecast_col].shift(-forecast_out) df.dropna(inplace=True) return def Setup_Validate_data(): #Set X and y global y global X global X_train, X_test, y_train, y_test X = np.array(df.drop(['label'],1)) y = np.array(df['label']) #Split Training and Testing Data X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2) return def Set_Model(): #Set Model for ML global clf clf = LinearRegression() clf.fit(X_train, y_train) return def get_Accuracy(): #Accuracy of Test Data global accuracy accuracy = clf.score(X_test, y_test) return() def Prediction(): #Predict Next Values global X X = X[:-forecast_out] global X_lately global forecast_set X_lately = X[-forecast_out:] forecast_set = clf.predict(X_lately) def Data_frame_Create(): #Creat a DataFrame global df df = data.DataReader(stockTicker, 'yahoo', start_date, end_date) ## df.plot(kind="box", subplots=True, layout=(1,6), sharex=False, sharey=False) ## plt.show() ## df.hist() ## plt.show() ## scatter_matrix(df) ## plt.show() return Set_Ticker() Actual_Value() #Setting Date Set_Date() #Gap of 1 month in time #n = int(input("Enter the No. of Years in Months:")) start_date += datetime.timedelta(weeks=-100) #Creat a DataFrame Data_frame_Create() #Create Features - X Add_Features_x() #Forecast Forcast_Values() #Label - y Add_Features_y() #Split Training and Testing Data Setup_Validate_data() #Set Model for ML Set_Model() #Accuracy of Test Data get_Accuracy() #Predict Next Values Prediction() print (stockTicker.partition('.')[0]) ##print ("Start Date:" + str(start_date)) print ("Accuracy: " + str(accuracy*100)) print ("Next day value: "+ str(forecast_set[0])) print (forecast_set) print ("3rd day value: "+ str(forecast_set[1])) print ("5th day value: "+ str(forecast_set[2])) print ("7th day value: "+ str(forecast_set[3])) print ("10th day value: "+ str(forecast_set[4])) ##dict = {'Next Day':forecast_set[0],'3rd Day':forecast_set[1],'5th Day':forecast_set[2]} ##print (dict) somedict = dict(NextDay=forecast_set[0],ThirdDay=forecast_set[1],FifthDay=forecast_set[2]) with open('mycsvfile.csv','wb') as f: w = csv.writer(f) w.writerows(somedict.items())
normal
{ "blob_id": "9c4676edbeef3748a4947f827fefa29e95674bfa", "index": 121, "step-1": "<mask token>\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\n<mask token>\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\n<mask token>\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\n<mask token>\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\n<mask token>\n", "step-2": "<mask token>\nstyle.use('ggplot')\n<mask token>\n\n\ndef Set_Ticker():\n global stockTicker\n stockTicker = 'ONGC.NS'\n print('Possible options: ONGC.NS, ')\n return\n\n\ndef Set_Date():\n global end_date\n global start_date\n end_date = datetime.datetime(2017, 1, 30)\n start_date = end_date\n print(end_date)\n return\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\ndef Add_Features_x():\n global df\n df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100\n df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\n return\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\ndef Add_Features_y():\n df['label'] = df[forecast_col].shift(-forecast_out)\n df.dropna(inplace=True)\n return\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\ndef Set_Model():\n global clf\n clf = LinearRegression()\n clf.fit(X_train, y_train)\n return\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\ndef Data_frame_Create():\n global df\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\n return\n\n\nSet_Ticker()\nActual_Value()\nSet_Date()\nstart_date += datetime.timedelta(weeks=-100)\nData_frame_Create()\nAdd_Features_x()\nForcast_Values()\nAdd_Features_y()\nSetup_Validate_data()\nSet_Model()\nget_Accuracy()\nPrediction()\nprint(stockTicker.partition('.')[0])\nprint('Accuracy: ' + str(accuracy * 100))\nprint('Next day value: ' + str(forecast_set[0]))\nprint(forecast_set)\nprint('3rd day value: ' + str(forecast_set[1]))\nprint('5th day value: ' + str(forecast_set[2]))\nprint('7th day value: ' + str(forecast_set[3]))\nprint('10th day value: ' + str(forecast_set[4]))\n<mask token>\nwith open('mycsvfile.csv', 'wb') as f:\n w = csv.writer(f)\n w.writerows(somedict.items())\n", "step-3": "<mask token>\nstyle.use('ggplot')\n<mask token>\n\n\ndef Set_Ticker():\n global stockTicker\n stockTicker = 'ONGC.NS'\n print('Possible options: ONGC.NS, ')\n return\n\n\ndef Set_Date():\n global end_date\n global start_date\n end_date = datetime.datetime(2017, 1, 30)\n start_date = end_date\n print(end_date)\n return\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\ndef Add_Features_x():\n global df\n df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100\n df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\n return\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\ndef Add_Features_y():\n df['label'] = df[forecast_col].shift(-forecast_out)\n df.dropna(inplace=True)\n return\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\ndef Set_Model():\n global clf\n clf = LinearRegression()\n clf.fit(X_train, y_train)\n return\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\ndef Data_frame_Create():\n global df\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\n return\n\n\nSet_Ticker()\nActual_Value()\nSet_Date()\nstart_date += datetime.timedelta(weeks=-100)\nData_frame_Create()\nAdd_Features_x()\nForcast_Values()\nAdd_Features_y()\nSetup_Validate_data()\nSet_Model()\nget_Accuracy()\nPrediction()\nprint(stockTicker.partition('.')[0])\nprint('Accuracy: ' + str(accuracy * 100))\nprint('Next day value: ' + str(forecast_set[0]))\nprint(forecast_set)\nprint('3rd day value: ' + str(forecast_set[1]))\nprint('5th day value: ' + str(forecast_set[2]))\nprint('7th day value: ' + str(forecast_set[3]))\nprint('10th day value: ' + str(forecast_set[4]))\nsomedict = dict(NextDay=forecast_set[0], ThirdDay=forecast_set[1], FifthDay\n =forecast_set[2])\nwith open('mycsvfile.csv', 'wb') as f:\n w = csv.writer(f)\n w.writerows(somedict.items())\n", "step-4": "import pandas as pd\nimport math, datetime\nimport time\nimport numpy as np\nfrom pandas.tools.plotting import scatter_matrix\nimport matplotlib.pyplot as plt\nfrom sklearn import cross_validation, preprocessing, svm\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom matplotlib import style\nstyle.use('ggplot')\nimport datetime\nfrom pandas_datareader import data\nimport csv\n\n\ndef Set_Ticker():\n global stockTicker\n stockTicker = 'ONGC.NS'\n print('Possible options: ONGC.NS, ')\n return\n\n\ndef Set_Date():\n global end_date\n global start_date\n end_date = datetime.datetime(2017, 1, 30)\n start_date = end_date\n print(end_date)\n return\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\ndef Add_Features_x():\n global df\n df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100\n df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\n return\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\ndef Add_Features_y():\n df['label'] = df[forecast_col].shift(-forecast_out)\n df.dropna(inplace=True)\n return\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\ndef Set_Model():\n global clf\n clf = LinearRegression()\n clf.fit(X_train, y_train)\n return\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\ndef Data_frame_Create():\n global df\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\n return\n\n\nSet_Ticker()\nActual_Value()\nSet_Date()\nstart_date += datetime.timedelta(weeks=-100)\nData_frame_Create()\nAdd_Features_x()\nForcast_Values()\nAdd_Features_y()\nSetup_Validate_data()\nSet_Model()\nget_Accuracy()\nPrediction()\nprint(stockTicker.partition('.')[0])\nprint('Accuracy: ' + str(accuracy * 100))\nprint('Next day value: ' + str(forecast_set[0]))\nprint(forecast_set)\nprint('3rd day value: ' + str(forecast_set[1]))\nprint('5th day value: ' + str(forecast_set[2]))\nprint('7th day value: ' + str(forecast_set[3]))\nprint('10th day value: ' + str(forecast_set[4]))\nsomedict = dict(NextDay=forecast_set[0], ThirdDay=forecast_set[1], FifthDay\n =forecast_set[2])\nwith open('mycsvfile.csv', 'wb') as f:\n w = csv.writer(f)\n w.writerows(somedict.items())\n", "step-5": "#https://www.youtube.com/watch?v=CQ5kc_j4RjA\r\n\r\nimport pandas as pd\r\n#import quandl\r\nimport math, datetime\r\nimport time\r\nimport numpy as np\r\nfrom pandas.tools.plotting import scatter_matrix\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import cross_validation, preprocessing, svm\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.svm import SVC\r\nfrom matplotlib import style\r\nstyle.use ('ggplot')\r\nimport datetime\r\nfrom pandas_datareader import data\r\nimport csv\r\n\r\n\r\n#Setting Companies\r\ndef Set_Ticker():\r\n global stockTicker\r\n stockTicker = 'ONGC.NS'\r\n## stockTicker = input(\"Enter the Ticker: \")\r\n print (\"Possible options: ONGC.NS, \")\r\n return \r\n\r\ndef Set_Date():\r\n #Setting Date\r\n global end_date\r\n global start_date\r\n## end_date = input(\"Enter prediction date(YYYY-MM-DD):\")\r\n end_date = datetime.datetime(2017,1,30)\r\n start_date = end_date\r\n print (end_date)\r\n return\r\n\r\n\r\ndef Actual_Value():\r\n #Actual Value\r\n global df\r\n print(\"The Actual Closing Value is Displayed below\")\r\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\r\n ao=df['Close']\r\n print (str(ao))\r\n return\r\n\r\n\r\ndef Add_Features_x():\r\n #Create Features - X\r\n global df\r\n df ['OC_Change'] = (df['Close']-df['Open']/df['Open']*100)\r\n df ['HL_Change'] = (df['High']-df['Low']/df['Low']*100)\r\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\r\n return\r\n\r\ndef Forcast_Values():\r\n #Forecast\r\n global forecast_out\r\n global forecast_col\r\n forecast_col = 'Close'\r\n forecast_out = int(math.ceil(0.01*len(df)))\r\n return\r\n\r\ndef Add_Features_y():\r\n #Label - y\r\n df['label'] = df[forecast_col].shift(-forecast_out)\r\n df.dropna(inplace=True)\r\n return\r\n\r\ndef Setup_Validate_data():\r\n #Set X and y \r\n global y\r\n global X\r\n global X_train, X_test, y_train, y_test\r\n X = np.array(df.drop(['label'],1))\r\n y = np.array(df['label'])\r\n #Split Training and Testing Data\r\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2)\r\n return\r\n\r\ndef Set_Model():\r\n #Set Model for ML\r\n global clf\r\n clf = LinearRegression()\r\n clf.fit(X_train, y_train)\r\n return\r\n\r\ndef get_Accuracy():\r\n #Accuracy of Test Data\r\n global accuracy\r\n accuracy = clf.score(X_test, y_test)\r\n return()\r\n\r\ndef Prediction():\r\n #Predict Next Values\r\n global X\r\n X = X[:-forecast_out]\r\n global X_lately\r\n global forecast_set\r\n X_lately = X[-forecast_out:]\r\n forecast_set = clf.predict(X_lately)\r\n\r\ndef Data_frame_Create():\r\n #Creat a DataFrame \r\n global df\r\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\r\n## df.plot(kind=\"box\", subplots=True, layout=(1,6), sharex=False, sharey=False)\r\n## plt.show()\r\n## df.hist()\r\n## plt.show()\r\n## scatter_matrix(df)\r\n## plt.show()\r\n return\r\n\r\n\r\nSet_Ticker()\r\nActual_Value()\r\n\r\n#Setting Date\r\nSet_Date()\r\n\r\n#Gap of 1 month in time\r\n#n = int(input(\"Enter the No. of Years in Months:\"))\r\nstart_date += datetime.timedelta(weeks=-100)\r\n\r\n#Creat a DataFrame\r\nData_frame_Create() \r\n\r\n#Create Features - X\r\nAdd_Features_x()\r\n\r\n#Forecast\r\nForcast_Values()\r\n\r\n#Label - y\r\nAdd_Features_y()\r\n\r\n#Split Training and Testing Data\r\nSetup_Validate_data()\r\n\r\n#Set Model for ML\r\nSet_Model()\r\n\r\n#Accuracy of Test Data\r\nget_Accuracy()\r\n\r\n#Predict Next Values\r\nPrediction()\r\n \r\nprint (stockTicker.partition('.')[0])\r\n##print (\"Start Date:\" + str(start_date))\r\nprint (\"Accuracy: \" + str(accuracy*100))\r\nprint (\"Next day value: \"+ str(forecast_set[0]))\r\nprint (forecast_set)\r\nprint (\"3rd day value: \"+ str(forecast_set[1]))\r\nprint (\"5th day value: \"+ str(forecast_set[2]))\r\nprint (\"7th day value: \"+ str(forecast_set[3]))\r\nprint (\"10th day value: \"+ str(forecast_set[4]))\r\n\r\n##dict = {'Next Day':forecast_set[0],'3rd Day':forecast_set[1],'5th Day':forecast_set[2]}\r\n##print (dict)\r\n\r\nsomedict = dict(NextDay=forecast_set[0],ThirdDay=forecast_set[1],FifthDay=forecast_set[2])\r\n\r\nwith open('mycsvfile.csv','wb') as f:\r\n w = csv.writer(f)\r\n w.writerows(somedict.items())\r\n", "step-ids": [ 5, 12, 13, 14, 15 ] }
[ 5, 12, 13, 14, 15 ]
import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_csv('Position_Salaries.csv') X = dataset.iloc[:, 1:-1].values y = dataset.iloc[:, dataset.shape[1]-1].values #Fitting the Decision Tree Regression from sklearn.tree import DecisionTreeRegressor regressor = DecisionTreeRegressor(random_state = 0) regressor.fit(X, y) #Predicting a new result y_pred = regressor.predict(np.reshape([6.5], (-1, 1))) #Visualizing the results X_grid = np.arange(min(X), max(X), 0.1) X_grid = X_grid.reshape((len(X_grid), 1)) plt.scatter(X, y, color = 'red') plt.plot(X_grid, regressor.predict(X_grid), color = 'blue') plt.scatter(6.5, y_pred, color = 'green') plt.title('Salary vs Title') plt.xlabel('Title') plt.ylabel('Salary') plt.show()
normal
{ "blob_id": "c8565e1b5659dd0908aabf91e07738a798dc3232", "index": 1366, "step-1": "<mask token>\n", "step-2": "<mask token>\nregressor.fit(X, y)\n<mask token>\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, regressor.predict(X_grid), color='blue')\nplt.scatter(6.5, y_pred, color='green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()\n", "step-3": "<mask token>\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, dataset.shape[1] - 1].values\n<mask token>\nregressor = DecisionTreeRegressor(random_state=0)\nregressor.fit(X, y)\ny_pred = regressor.predict(np.reshape([6.5], (-1, 1)))\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, regressor.predict(X_grid), color='blue')\nplt.scatter(6.5, y_pred, color='green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()\n", "step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, dataset.shape[1] - 1].values\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state=0)\nregressor.fit(X, y)\ny_pred = regressor.predict(np.reshape([6.5], (-1, 1)))\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, regressor.predict(X_grid), color='blue')\nplt.scatter(6.5, y_pred, color='green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()\n", "step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, dataset.shape[1]-1].values\n\n#Fitting the Decision Tree Regression\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state = 0)\nregressor.fit(X, y)\n\n#Predicting a new result\ny_pred = regressor.predict(np.reshape([6.5], (-1, 1)))\n\n#Visualizing the results\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color = 'red')\nplt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\nplt.scatter(6.5, y_pred, color = 'green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import sys import bisect t = int(raw_input()) for i in xrange(1, t+1): n, k = map(int, raw_input().strip().split()) s = [n] for j in xrange(k): num = s.pop() if num % 2 != 0: ls = num/2 lr = num/2 if ls != 0: bisect.insort_left(s,ls) bisect.insort_left(s,lr) else: ls = num/2 -1 lr = num/2 if ls != 0: bisect.insort_left(s,ls) bisect.insort_left(s,lr) else: bisect.insort_left(s,lr) print "Case #{}: {} {}".format(i, lr, ls)
normal
{ "blob_id": "488c111c051796b481794678cb04108fcf11ac39", "index": 5778, "step-1": "import sys\nimport bisect\n\nt = int(raw_input())\n\nfor i in xrange(1, t+1):\n n, k = map(int, raw_input().strip().split())\n s = [n]\n for j in xrange(k):\n num = s.pop()\n if num % 2 != 0:\n ls = num/2\n lr = num/2\n if ls != 0:\n bisect.insort_left(s,ls)\n bisect.insort_left(s,lr)\n else:\n ls = num/2 -1\n lr = num/2\n if ls != 0:\n bisect.insort_left(s,ls)\n bisect.insort_left(s,lr)\n else:\n bisect.insort_left(s,lr) \n \n print \"Case #{}: {} {}\".format(i, lr, ls)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from knox.models import AuthToken from rest_framework import generics, permissions, status from rest_framework.response import Response from accounts.serializers import UserSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer # Register API class RegisterAPI(generics.CreateAPIView): permission_classes = [ permissions.AllowAny ] serializer_class = RegisterSerializer def post(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) user = serializer.save() return Response({ "user": UserSerializer(user, context=self.get_serializer_context()).data, "token": AuthToken.objects.create(user)[1] }) # Login API class LoginAPI(generics.GenericAPIView): serializer_class = LoginSerializer def post(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) user = serializer.validated_data return Response({ "user": UserSerializer(user, context=self.get_serializer_context()).data, "token": AuthToken.objects.create(user)[1] }) class ChangePasswordAPI(generics.UpdateAPIView): permission_classes = [ permissions.IsAuthenticated ] serializer_class = ChangePasswordSerializer def update(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) user = request.user user.set_password(serializer.validated_data['new_password']) user.save() return Response({ 'success': True, }, status=status.HTTP_200_OK) # Get User API class UserAPI(generics.RetrieveUpdateAPIView): permission_classes = [ permissions.IsAuthenticated, ] serializer_class = UserSerializer def get_object(self): return self.request.user def update(self, request, *args, **kwargs): user = self.get_object() first_name = request.data.get('first_name') last_name = request.data.get('last_name') mobile = request.data.get('mobile') print(first_name, last_name, mobile) user.first_name = first_name user.last_name = last_name user.mobile = mobile user.save() return Response({ "success": False }, status=status.HTTP_200_OK)
normal
{ "blob_id": "5d6ec1b23dcbc935fe80dd09a2e967eb7e37a363", "index": 5645, "step-1": "<mask token>\n\n\nclass LoginAPI(generics.GenericAPIView):\n <mask token>\n <mask token>\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n", "step-2": "<mask token>\n\n\nclass LoginAPI(generics.GenericAPIView):\n <mask token>\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n", "step-3": "<mask token>\n\n\nclass LoginAPI(generics.GenericAPIView):\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n", "step-4": "from knox.models import AuthToken\nfrom rest_framework import generics, permissions, status\nfrom rest_framework.response import Response\nfrom accounts.serializers import UserSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer\n\n\nclass RegisterAPI(generics.CreateAPIView):\n permission_classes = [permissions.AllowAny]\n serializer_class = RegisterSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass LoginAPI(generics.GenericAPIView):\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n", "step-5": "from knox.models import AuthToken\nfrom rest_framework import generics, permissions, status\nfrom rest_framework.response import Response\n\nfrom accounts.serializers import UserSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer\n\n\n# Register API\n\nclass RegisterAPI(generics.CreateAPIView):\n permission_classes = [\n permissions.AllowAny\n ]\n serializer_class = RegisterSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n return Response({\n \"user\": UserSerializer(user, context=self.get_serializer_context()).data,\n \"token\": AuthToken.objects.create(user)[1]\n })\n\n\n# Login API\nclass LoginAPI(generics.GenericAPIView):\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({\n \"user\": UserSerializer(user, context=self.get_serializer_context()).data,\n \"token\": AuthToken.objects.create(user)[1]\n })\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [\n permissions.IsAuthenticated\n ]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({\n 'success': True,\n }, status=status.HTTP_200_OK)\n\n\n# Get User API\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [\n permissions.IsAuthenticated,\n ]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n\n print(first_name, last_name, mobile)\n\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n\n return Response({\n \"success\": False\n }, status=status.HTTP_200_OK)\n", "step-ids": [ 8, 9, 10, 14, 15 ] }
[ 8, 9, 10, 14, 15 ]
# Generated by Django 3.2 on 2021-04-21 13:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('rate', '0003_auto_20210421_1316'), ] operations = [ migrations.AlterField( model_name='song', name='overall_rating', field=models.FloatField(default=0), ), migrations.AlterField( model_name='song', name='rating_count', field=models.FloatField(default=0), ), ]
normal
{ "blob_id": "d46cda5354640e1c87432d39a2e949d6db034edc", "index": 6413, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('rate', '0003_auto_20210421_1316')]\n operations = [migrations.AlterField(model_name='song', name=\n 'overall_rating', field=models.FloatField(default=0)), migrations.\n AlterField(model_name='song', name='rating_count', field=models.\n FloatField(default=0))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('rate', '0003_auto_20210421_1316')]\n operations = [migrations.AlterField(model_name='song', name=\n 'overall_rating', field=models.FloatField(default=0)), migrations.\n AlterField(model_name='song', name='rating_count', field=models.\n FloatField(default=0))]\n", "step-5": "# Generated by Django 3.2 on 2021-04-21 13:21\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rate', '0003_auto_20210421_1316'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='song',\n name='overall_rating',\n field=models.FloatField(default=0),\n ),\n migrations.AlterField(\n model_name='song',\n name='rating_count',\n field=models.FloatField(default=0),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os import time #if __name__ == "__main__": # os.system('xterm -e "pwd ; cd ~ ; torcs -r ~/quickrace.xml ; echo press RETURN to close this window ; read" &') # delete the echo and the read to don't stop the process and make it run quickly # os.system('xterm -e "pwd ; ./start.sh ; echo press RETURN to close this window ; read" &') def train_once(): os.system('xterm -e "pwd ; cd ~ ; torcs -r ~/quickrace.xml " &') # delete the echo and the read to don't stop the process and make it run quickly os.system('xterm -e "pwd ; ./start.sh " &') return True
normal
{ "blob_id": "c2cf74893c7f7515a95141bb10be6a446b45a0cc", "index": 1447, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef train_once():\n os.system('xterm -e \"pwd ; cd ~ ; torcs -r ~/quickrace.xml \" &')\n os.system('xterm -e \"pwd ; ./start.sh \" &')\n return True\n", "step-3": "import os\nimport time\n\n\ndef train_once():\n os.system('xterm -e \"pwd ; cd ~ ; torcs -r ~/quickrace.xml \" &')\n os.system('xterm -e \"pwd ; ./start.sh \" &')\n return True\n", "step-4": "import os\nimport time\n#if __name__ == \"__main__\":\n# os.system('xterm -e \"pwd ; cd ~ ; torcs -r ~/quickrace.xml ; echo press RETURN to close this window ; read\" &') # delete the echo and the read to don't stop the process and make it run quickly\n# os.system('xterm -e \"pwd ; ./start.sh ; echo press RETURN to close this window ; read\" &')\n\ndef train_once():\n os.system('xterm -e \"pwd ; cd ~ ; torcs -r ~/quickrace.xml \" &') # delete the echo and the read to don't stop the process and make it run quickly\n os.system('xterm -e \"pwd ; ./start.sh \" &')\n return True\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
def myswap(a, b): temp = a a = b b = temp if a < b: print(a, b) else: print(b, a) a, b = map(int, input().split()) myswap(a, b)
normal
{ "blob_id": "e6efd2de5f92d66f1b734a2173fc8681af3c4cc8", "index": 8040, "step-1": "<mask token>\n", "step-2": "def myswap(a, b):\n temp = a\n a = b\n b = temp\n if a < b:\n print(a, b)\n else:\n print(b, a)\n\n\n<mask token>\n", "step-3": "def myswap(a, b):\n temp = a\n a = b\n b = temp\n if a < b:\n print(a, b)\n else:\n print(b, a)\n\n\n<mask token>\nmyswap(a, b)\n", "step-4": "def myswap(a, b):\n temp = a\n a = b\n b = temp\n if a < b:\n print(a, b)\n else:\n print(b, a)\n\n\na, b = map(int, input().split())\nmyswap(a, b)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
""" db.集合.update() """ """ 实例 被替换了 > db.test1000.update({'name':'dapeng'},{'name':'大鹏'}) WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 }) > db.test1000.find() { "_id" : ObjectId("5c35549d7ad0cf935d3c150d"), "name" : "大鹏" } { "_id" : ObjectId("5c3554f37ad0cf935d3c150e"), "nInserted" : 1 } { "_id" : ObjectId("5c3555417ad0cf935d3c150f"), "name" : "kongming", "age" : 12 } { "_id" : ObjectId("5c3555457ad0cf935d3c1510"), "name" : "kongming1", "age" : 12 } { "_id" : ObjectId("5c3555557ad0cf935d3c1511"), "name" : "kongming1", "age" : 12 } > """ """ 实例2 利用$set:只修改匹配到的值 > db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}}) WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 }) > db.test1000.find() { "_id" : ObjectId("5c35549d7ad0cf935d3c150d"), "name" : "大鹏" } { "_id" : ObjectId("5c3554f37ad0cf935d3c150e"), "nInserted" : 1 } { "_id" : ObjectId("5c3555417ad0cf935d3c150f"), "name" : "空明被修改", "age" : 12 } { "_id" : ObjectId("5c3555457ad0cf935d3c1510"), "name" : "kongming1", "age" : 12 } { "_id" : ObjectId("5c3555557ad0cf935d3c1511"), "name" : "kongming1", "age" : 12 } > """ """ 实例3 修改多条 db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}},{multi:true}) """
normal
{ "blob_id": "7d8c2aa5674704d4443034c29bbdc715da9fd567", "index": 5022, "step-1": "<mask token>\n", "step-2": "\"\"\"\ndb.集合.update()\n\n\"\"\"\n\"\"\"\n实例 被替换了\n> db.test1000.update({'name':'dapeng'},{'name':'大鹏'})\nWriteResult({ \"nMatched\" : 1, \"nUpserted\" : 0, \"nModified\" : 1 })\n> db.test1000.find()\n{ \"_id\" : ObjectId(\"5c35549d7ad0cf935d3c150d\"), \"name\" : \"大鹏\" }\n{ \"_id\" : ObjectId(\"5c3554f37ad0cf935d3c150e\"), \"nInserted\" : 1 }\n{ \"_id\" : ObjectId(\"5c3555417ad0cf935d3c150f\"), \"name\" : \"kongming\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555457ad0cf935d3c1510\"), \"name\" : \"kongming1\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555557ad0cf935d3c1511\"), \"name\" : \"kongming1\", \"age\" : 12 }\n> \n\"\"\"\n\n\"\"\"\n实例2 利用$set:只修改匹配到的值\n> db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}})\nWriteResult({ \"nMatched\" : 1, \"nUpserted\" : 0, \"nModified\" : 1 })\n> db.test1000.find()\n{ \"_id\" : ObjectId(\"5c35549d7ad0cf935d3c150d\"), \"name\" : \"大鹏\" }\n{ \"_id\" : ObjectId(\"5c3554f37ad0cf935d3c150e\"), \"nInserted\" : 1 }\n{ \"_id\" : ObjectId(\"5c3555417ad0cf935d3c150f\"), \"name\" : \"空明被修改\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555457ad0cf935d3c1510\"), \"name\" : \"kongming1\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555557ad0cf935d3c1511\"), \"name\" : \"kongming1\", \"age\" : 12 }\n> \n\n\"\"\"\n\"\"\"\n实例3 修改多条\ndb.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}},{multi:true})\n\"\"\"", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
from django.urls import reverse_lazy from django.views.generic import CreateView, edit, ListView from django.shortcuts import render from django.contrib.auth import authenticate, login from users.forms import CustomUserCreationForm, LoginForm from users.models import CustomUser as Users class SignUpView(CreateView): form_class = CustomUserCreationForm success_url = reverse_lazy('login') template_name = 'users/signup.html' class IndexView(edit.FormView): success_url = '/facilities' form_class = LoginForm template_name = 'users/index.html' def form_valid(self, form): username = form.cleaned_data['username'] password = form.cleaned_data['password'] user = authenticate(self.request, username=username, password=password) if user is not None: login(self.request, user) return super().form_valid(form)
normal
{ "blob_id": "6bd9c8e38373e696193c146b88ebf6601170cf0e", "index": 9549, "step-1": "<mask token>\n\n\nclass IndexView(edit.FormView):\n success_url = '/facilities'\n form_class = LoginForm\n template_name = 'users/index.html'\n\n def form_valid(self, form):\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(self.request, username=username, password=password)\n if user is not None:\n login(self.request, user)\n return super().form_valid(form)\n", "step-2": "<mask token>\n\n\nclass SignUpView(CreateView):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass IndexView(edit.FormView):\n success_url = '/facilities'\n form_class = LoginForm\n template_name = 'users/index.html'\n\n def form_valid(self, form):\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(self.request, username=username, password=password)\n if user is not None:\n login(self.request, user)\n return super().form_valid(form)\n", "step-3": "<mask token>\n\n\nclass SignUpView(CreateView):\n form_class = CustomUserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'users/signup.html'\n\n\nclass IndexView(edit.FormView):\n success_url = '/facilities'\n form_class = LoginForm\n template_name = 'users/index.html'\n\n def form_valid(self, form):\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(self.request, username=username, password=password)\n if user is not None:\n login(self.request, user)\n return super().form_valid(form)\n", "step-4": "from django.urls import reverse_lazy\nfrom django.views.generic import CreateView, edit, ListView\nfrom django.shortcuts import render\nfrom django.contrib.auth import authenticate, login\nfrom users.forms import CustomUserCreationForm, LoginForm\nfrom users.models import CustomUser as Users\n\n\nclass SignUpView(CreateView):\n form_class = CustomUserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'users/signup.html'\n\n\nclass IndexView(edit.FormView):\n success_url = '/facilities'\n form_class = LoginForm\n template_name = 'users/index.html'\n\n def form_valid(self, form):\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(self.request, username=username, password=password)\n if user is not None:\n login(self.request, user)\n return super().form_valid(form)\n", "step-5": null, "step-ids": [ 3, 4, 5, 6 ] }
[ 3, 4, 5, 6 ]
from packet import Packet from packetConstructor import PacketConstructor import threading import time class PacketSender: """ Packet represents a simulated UDP packet. """ # The next seq num for sent packets seq_num = 0 # The next seq num for acks that we're waiting for next_seq_num = 0 sent_packets = 0 acked_packets = [] acked_all_packets = False acked_packets_lock = threading.Lock() was_reset = False def reset(self): global seq_num global sent_packets global next_seq_num global acked_packets global acked_all_packets global acked_packets_lock seq_num = 0 sent_packets = 0 next_seq_num = 0 acked_packets = [] acked_all_packets = False acked_packets_lock = threading.Lock() def handle_ack(data): global acked_packets global seq_num global acked_all_packets global acked_packets_lock p = Packet.from_bytes(data) if not p.packet_type == PacketConstructor.ack_type: # TODO: handle NAKs here return print("received ack " + str(p.seq_num)) acked_packets_lock.acquire() if p.seq_num not in acked_packets: print("it's a new ack") acked_packets.append(p.seq_num) if len(acked_packets) == seq_num: print("got all acks") acked_all_packets = True else: print("len: " + str(len(acked_packets))) print("seq_num: " + str(seq_num)) acked_packets_lock.release() def await_acks(conn): print("awaiting acks") while not PacketSender.acked_all_packets: data, sender = conn.recvfrom(1024) threading.Thread(target=PacketSender.handle_ack, args=(data,)).start() def resend_packet_if_needed(conn, packet, destination): while not packet.seq_num in PacketSender.acked_packets and not PacketSender.was_reset: print("starting resend loop") time.sleep(0.5) acked_packets_lock.acquire() if not packet.seq_num in PacketSender.acked_packets and not PacketSender.was_reset: print("resending packet " + str(packet.seq_num)) conn.sendto(packet.to_bytes(), destination) acked_packets_lock.release() def spawn_resend_thread(conn, packet, destination): threading.Thread(target=PacketSender.resend_packet_if_needed, args=(conn, packet, destination)).start() @staticmethod def send_as_packets(data, conn, destination, peer_ip, peer_port): global sent_packets global acked_packets global next_seq_num global acked_all_packets global seq_num PacketSender.reset() max_payload_length = Packet.MAX_LEN - Packet.MIN_LEN curr = [0, 0] def nbytes(n): curr[0], curr[1] = curr[1], curr[1] + n return data[curr[0]: curr[1]] remaining_data = len(data) if remaining_data > 0: threading.Thread(target=PacketSender.await_acks, args=(conn,)).start() # While there's still data to be sent while remaining_data > 0: # While there are less packets in transit than the window size while (sent_packets < PacketConstructor.window_size and remaining_data > 0): print("sending packet %d" % seq_num) if remaining_data > max_payload_length: p = Packet(packet_type=PacketConstructor.data_type, seq_num=seq_num, peer_ip_addr=peer_ip, peer_port=peer_port, is_last_packet=False, payload=nbytes(max_payload_length)) conn.sendto(p.to_bytes(), destination) sent_packets += 1 remaining_data -= max_payload_length seq_num += 1 PacketSender.spawn_resend_thread(conn, p, destination) print("not last packet") else: p = Packet(packet_type=PacketConstructor.data_type, seq_num=seq_num, peer_ip_addr=peer_ip, peer_port=peer_port, is_last_packet=True, payload=nbytes(remaining_data)) conn.sendto(p.to_bytes(), destination) sent_packets += 1 remaining_data -= remaining_data seq_num += 1 print("remaining data " + str(remaining_data)) print("is last packet") PacketSender.spawn_resend_thread(conn, p, destination) # Update the number of packets still in transit while next_seq_num in acked_packets: next_seq_num += 1 sent_packets -= 1 print("Waiting for acks") while not acked_all_packets: # Wait here until all packets have been acked pass print("RECEIVED ALL ACKS") PacketSender.was_reset = True
normal
{ "blob_id": "47c1ad4bd1ceffa38eef467ea8eb59dbd2fc2ebb", "index": 262, "step-1": "<mask token>\n\n\nclass PacketSender:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def await_acks(conn):\n print('awaiting acks')\n while not PacketSender.acked_all_packets:\n data, sender = conn.recvfrom(1024)\n threading.Thread(target=PacketSender.handle_ack, args=(data,)\n ).start()\n <mask token>\n <mask token>\n\n @staticmethod\n def send_as_packets(data, conn, destination, peer_ip, peer_port):\n global sent_packets\n global acked_packets\n global next_seq_num\n global acked_all_packets\n global seq_num\n PacketSender.reset()\n max_payload_length = Packet.MAX_LEN - Packet.MIN_LEN\n curr = [0, 0]\n\n def nbytes(n):\n curr[0], curr[1] = curr[1], curr[1] + n\n return data[curr[0]:curr[1]]\n remaining_data = len(data)\n if remaining_data > 0:\n threading.Thread(target=PacketSender.await_acks, args=(conn,)\n ).start()\n while remaining_data > 0:\n while (sent_packets < PacketConstructor.window_size and \n remaining_data > 0):\n print('sending packet %d' % seq_num)\n if remaining_data > max_payload_length:\n p = Packet(packet_type=PacketConstructor.data_type,\n seq_num=seq_num, peer_ip_addr=peer_ip, peer_port=\n peer_port, is_last_packet=False, payload=nbytes(\n max_payload_length))\n conn.sendto(p.to_bytes(), destination)\n sent_packets += 1\n remaining_data -= max_payload_length\n seq_num += 1\n PacketSender.spawn_resend_thread(conn, p, destination)\n print('not last packet')\n else:\n p = Packet(packet_type=PacketConstructor.data_type,\n seq_num=seq_num, peer_ip_addr=peer_ip, peer_port=\n peer_port, is_last_packet=True, payload=nbytes(\n remaining_data))\n conn.sendto(p.to_bytes(), destination)\n sent_packets += 1\n remaining_data -= remaining_data\n seq_num += 1\n print('remaining data ' + str(remaining_data))\n print('is last packet')\n PacketSender.spawn_resend_thread(conn, p, destination)\n while next_seq_num in acked_packets:\n next_seq_num += 1\n sent_packets -= 1\n print('Waiting for acks')\n while not acked_all_packets:\n pass\n print('RECEIVED ALL ACKS')\n PacketSender.was_reset = True\n", "step-2": "<mask token>\n\n\nclass PacketSender:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def reset(self):\n global seq_num\n global sent_packets\n global next_seq_num\n global acked_packets\n global acked_all_packets\n global acked_packets_lock\n seq_num = 0\n sent_packets = 0\n next_seq_num = 0\n acked_packets = []\n acked_all_packets = False\n acked_packets_lock = threading.Lock()\n\n def handle_ack(data):\n global acked_packets\n global seq_num\n global acked_all_packets\n global acked_packets_lock\n p = Packet.from_bytes(data)\n if not p.packet_type == PacketConstructor.ack_type:\n return\n print('received ack ' + str(p.seq_num))\n acked_packets_lock.acquire()\n if p.seq_num not in acked_packets:\n print(\"it's a new ack\")\n acked_packets.append(p.seq_num)\n if len(acked_packets) == seq_num:\n print('got all acks')\n acked_all_packets = True\n else:\n print('len: ' + str(len(acked_packets)))\n print('seq_num: ' + str(seq_num))\n acked_packets_lock.release()\n\n def await_acks(conn):\n print('awaiting acks')\n while not PacketSender.acked_all_packets:\n data, sender = conn.recvfrom(1024)\n threading.Thread(target=PacketSender.handle_ack, args=(data,)\n ).start()\n <mask token>\n <mask token>\n\n @staticmethod\n def send_as_packets(data, conn, destination, peer_ip, peer_port):\n global sent_packets\n global acked_packets\n global next_seq_num\n global acked_all_packets\n global seq_num\n PacketSender.reset()\n max_payload_length = Packet.MAX_LEN - Packet.MIN_LEN\n curr = [0, 0]\n\n def nbytes(n):\n curr[0], curr[1] = curr[1], curr[1] + n\n return data[curr[0]:curr[1]]\n remaining_data = len(data)\n if remaining_data > 0:\n threading.Thread(target=PacketSender.await_acks, args=(conn,)\n ).start()\n while remaining_data > 0:\n while (sent_packets < PacketConstructor.window_size and \n remaining_data > 0):\n print('sending packet %d' % seq_num)\n if remaining_data > max_payload_length:\n p = Packet(packet_type=PacketConstructor.data_type,\n seq_num=seq_num, peer_ip_addr=peer_ip, peer_port=\n peer_port, is_last_packet=False, payload=nbytes(\n max_payload_length))\n conn.sendto(p.to_bytes(), destination)\n sent_packets += 1\n remaining_data -= max_payload_length\n seq_num += 1\n PacketSender.spawn_resend_thread(conn, p, destination)\n print('not last packet')\n else:\n p = Packet(packet_type=PacketConstructor.data_type,\n seq_num=seq_num, peer_ip_addr=peer_ip, peer_port=\n peer_port, is_last_packet=True, payload=nbytes(\n remaining_data))\n conn.sendto(p.to_bytes(), destination)\n sent_packets += 1\n remaining_data -= remaining_data\n seq_num += 1\n print('remaining data ' + str(remaining_data))\n print('is last packet')\n PacketSender.spawn_resend_thread(conn, p, destination)\n while next_seq_num in acked_packets:\n next_seq_num += 1\n sent_packets -= 1\n print('Waiting for acks')\n while not acked_all_packets:\n pass\n print('RECEIVED ALL ACKS')\n PacketSender.was_reset = True\n", "step-3": "<mask token>\n\n\nclass PacketSender:\n \"\"\"\n Packet represents a simulated UDP packet.\n \"\"\"\n seq_num = 0\n next_seq_num = 0\n sent_packets = 0\n acked_packets = []\n acked_all_packets = False\n acked_packets_lock = threading.Lock()\n was_reset = False\n\n def reset(self):\n global seq_num\n global sent_packets\n global next_seq_num\n global acked_packets\n global acked_all_packets\n global acked_packets_lock\n seq_num = 0\n sent_packets = 0\n next_seq_num = 0\n acked_packets = []\n acked_all_packets = False\n acked_packets_lock = threading.Lock()\n\n def handle_ack(data):\n global acked_packets\n global seq_num\n global acked_all_packets\n global acked_packets_lock\n p = Packet.from_bytes(data)\n if not p.packet_type == PacketConstructor.ack_type:\n return\n print('received ack ' + str(p.seq_num))\n acked_packets_lock.acquire()\n if p.seq_num not in acked_packets:\n print(\"it's a new ack\")\n acked_packets.append(p.seq_num)\n if len(acked_packets) == seq_num:\n print('got all acks')\n acked_all_packets = True\n else:\n print('len: ' + str(len(acked_packets)))\n print('seq_num: ' + str(seq_num))\n acked_packets_lock.release()\n\n def await_acks(conn):\n print('awaiting acks')\n while not PacketSender.acked_all_packets:\n data, sender = conn.recvfrom(1024)\n threading.Thread(target=PacketSender.handle_ack, args=(data,)\n ).start()\n\n def resend_packet_if_needed(conn, packet, destination):\n while (not packet.seq_num in PacketSender.acked_packets and not\n PacketSender.was_reset):\n print('starting resend loop')\n time.sleep(0.5)\n acked_packets_lock.acquire()\n if (not packet.seq_num in PacketSender.acked_packets and not\n PacketSender.was_reset):\n print('resending packet ' + str(packet.seq_num))\n conn.sendto(packet.to_bytes(), destination)\n acked_packets_lock.release()\n\n def spawn_resend_thread(conn, packet, destination):\n threading.Thread(target=PacketSender.resend_packet_if_needed, args=\n (conn, packet, destination)).start()\n\n @staticmethod\n def send_as_packets(data, conn, destination, peer_ip, peer_port):\n global sent_packets\n global acked_packets\n global next_seq_num\n global acked_all_packets\n global seq_num\n PacketSender.reset()\n max_payload_length = Packet.MAX_LEN - Packet.MIN_LEN\n curr = [0, 0]\n\n def nbytes(n):\n curr[0], curr[1] = curr[1], curr[1] + n\n return data[curr[0]:curr[1]]\n remaining_data = len(data)\n if remaining_data > 0:\n threading.Thread(target=PacketSender.await_acks, args=(conn,)\n ).start()\n while remaining_data > 0:\n while (sent_packets < PacketConstructor.window_size and \n remaining_data > 0):\n print('sending packet %d' % seq_num)\n if remaining_data > max_payload_length:\n p = Packet(packet_type=PacketConstructor.data_type,\n seq_num=seq_num, peer_ip_addr=peer_ip, peer_port=\n peer_port, is_last_packet=False, payload=nbytes(\n max_payload_length))\n conn.sendto(p.to_bytes(), destination)\n sent_packets += 1\n remaining_data -= max_payload_length\n seq_num += 1\n PacketSender.spawn_resend_thread(conn, p, destination)\n print('not last packet')\n else:\n p = Packet(packet_type=PacketConstructor.data_type,\n seq_num=seq_num, peer_ip_addr=peer_ip, peer_port=\n peer_port, is_last_packet=True, payload=nbytes(\n remaining_data))\n conn.sendto(p.to_bytes(), destination)\n sent_packets += 1\n remaining_data -= remaining_data\n seq_num += 1\n print('remaining data ' + str(remaining_data))\n print('is last packet')\n PacketSender.spawn_resend_thread(conn, p, destination)\n while next_seq_num in acked_packets:\n next_seq_num += 1\n sent_packets -= 1\n print('Waiting for acks')\n while not acked_all_packets:\n pass\n print('RECEIVED ALL ACKS')\n PacketSender.was_reset = True\n", "step-4": "from packet import Packet\nfrom packetConstructor import PacketConstructor\nimport threading\nimport time\n\n\nclass PacketSender:\n \"\"\"\n Packet represents a simulated UDP packet.\n \"\"\"\n seq_num = 0\n next_seq_num = 0\n sent_packets = 0\n acked_packets = []\n acked_all_packets = False\n acked_packets_lock = threading.Lock()\n was_reset = False\n\n def reset(self):\n global seq_num\n global sent_packets\n global next_seq_num\n global acked_packets\n global acked_all_packets\n global acked_packets_lock\n seq_num = 0\n sent_packets = 0\n next_seq_num = 0\n acked_packets = []\n acked_all_packets = False\n acked_packets_lock = threading.Lock()\n\n def handle_ack(data):\n global acked_packets\n global seq_num\n global acked_all_packets\n global acked_packets_lock\n p = Packet.from_bytes(data)\n if not p.packet_type == PacketConstructor.ack_type:\n return\n print('received ack ' + str(p.seq_num))\n acked_packets_lock.acquire()\n if p.seq_num not in acked_packets:\n print(\"it's a new ack\")\n acked_packets.append(p.seq_num)\n if len(acked_packets) == seq_num:\n print('got all acks')\n acked_all_packets = True\n else:\n print('len: ' + str(len(acked_packets)))\n print('seq_num: ' + str(seq_num))\n acked_packets_lock.release()\n\n def await_acks(conn):\n print('awaiting acks')\n while not PacketSender.acked_all_packets:\n data, sender = conn.recvfrom(1024)\n threading.Thread(target=PacketSender.handle_ack, args=(data,)\n ).start()\n\n def resend_packet_if_needed(conn, packet, destination):\n while (not packet.seq_num in PacketSender.acked_packets and not\n PacketSender.was_reset):\n print('starting resend loop')\n time.sleep(0.5)\n acked_packets_lock.acquire()\n if (not packet.seq_num in PacketSender.acked_packets and not\n PacketSender.was_reset):\n print('resending packet ' + str(packet.seq_num))\n conn.sendto(packet.to_bytes(), destination)\n acked_packets_lock.release()\n\n def spawn_resend_thread(conn, packet, destination):\n threading.Thread(target=PacketSender.resend_packet_if_needed, args=\n (conn, packet, destination)).start()\n\n @staticmethod\n def send_as_packets(data, conn, destination, peer_ip, peer_port):\n global sent_packets\n global acked_packets\n global next_seq_num\n global acked_all_packets\n global seq_num\n PacketSender.reset()\n max_payload_length = Packet.MAX_LEN - Packet.MIN_LEN\n curr = [0, 0]\n\n def nbytes(n):\n curr[0], curr[1] = curr[1], curr[1] + n\n return data[curr[0]:curr[1]]\n remaining_data = len(data)\n if remaining_data > 0:\n threading.Thread(target=PacketSender.await_acks, args=(conn,)\n ).start()\n while remaining_data > 0:\n while (sent_packets < PacketConstructor.window_size and \n remaining_data > 0):\n print('sending packet %d' % seq_num)\n if remaining_data > max_payload_length:\n p = Packet(packet_type=PacketConstructor.data_type,\n seq_num=seq_num, peer_ip_addr=peer_ip, peer_port=\n peer_port, is_last_packet=False, payload=nbytes(\n max_payload_length))\n conn.sendto(p.to_bytes(), destination)\n sent_packets += 1\n remaining_data -= max_payload_length\n seq_num += 1\n PacketSender.spawn_resend_thread(conn, p, destination)\n print('not last packet')\n else:\n p = Packet(packet_type=PacketConstructor.data_type,\n seq_num=seq_num, peer_ip_addr=peer_ip, peer_port=\n peer_port, is_last_packet=True, payload=nbytes(\n remaining_data))\n conn.sendto(p.to_bytes(), destination)\n sent_packets += 1\n remaining_data -= remaining_data\n seq_num += 1\n print('remaining data ' + str(remaining_data))\n print('is last packet')\n PacketSender.spawn_resend_thread(conn, p, destination)\n while next_seq_num in acked_packets:\n next_seq_num += 1\n sent_packets -= 1\n print('Waiting for acks')\n while not acked_all_packets:\n pass\n print('RECEIVED ALL ACKS')\n PacketSender.was_reset = True\n", "step-5": "from packet import Packet\nfrom packetConstructor import PacketConstructor\nimport threading\nimport time\n\n\nclass PacketSender:\n \"\"\"\n Packet represents a simulated UDP packet.\n \"\"\"\n # The next seq num for sent packets\n seq_num = 0\n # The next seq num for acks that we're waiting for\n next_seq_num = 0\n sent_packets = 0\n acked_packets = []\n acked_all_packets = False\n acked_packets_lock = threading.Lock()\n was_reset = False\n\n def reset(self):\n global seq_num\n global sent_packets\n global next_seq_num\n global acked_packets\n global acked_all_packets\n global acked_packets_lock\n seq_num = 0\n sent_packets = 0\n next_seq_num = 0\n acked_packets = []\n acked_all_packets = False\n acked_packets_lock = threading.Lock()\n\n def handle_ack(data):\n global acked_packets\n global seq_num\n global acked_all_packets\n global acked_packets_lock\n p = Packet.from_bytes(data)\n if not p.packet_type == PacketConstructor.ack_type:\n # TODO: handle NAKs here\n return\n print(\"received ack \" + str(p.seq_num))\n acked_packets_lock.acquire()\n if p.seq_num not in acked_packets:\n print(\"it's a new ack\")\n acked_packets.append(p.seq_num)\n if len(acked_packets) == seq_num:\n print(\"got all acks\")\n acked_all_packets = True\n else:\n print(\"len: \" + str(len(acked_packets)))\n print(\"seq_num: \" + str(seq_num))\n acked_packets_lock.release()\n\n def await_acks(conn):\n print(\"awaiting acks\")\n while not PacketSender.acked_all_packets:\n data, sender = conn.recvfrom(1024)\n threading.Thread(target=PacketSender.handle_ack, args=(data,)).start()\n\n def resend_packet_if_needed(conn, packet, destination):\n while not packet.seq_num in PacketSender.acked_packets and not PacketSender.was_reset:\n print(\"starting resend loop\")\n time.sleep(0.5)\n acked_packets_lock.acquire()\n if not packet.seq_num in PacketSender.acked_packets and not PacketSender.was_reset:\n print(\"resending packet \" + str(packet.seq_num))\n conn.sendto(packet.to_bytes(), destination)\n acked_packets_lock.release()\n\n def spawn_resend_thread(conn, packet, destination):\n threading.Thread(target=PacketSender.resend_packet_if_needed, args=(conn, packet, destination)).start()\n\n @staticmethod\n def send_as_packets(data, conn, destination, peer_ip, peer_port):\n global sent_packets\n global acked_packets\n global next_seq_num\n global acked_all_packets\n global seq_num\n PacketSender.reset()\n max_payload_length = Packet.MAX_LEN - Packet.MIN_LEN\n\n curr = [0, 0]\n\n def nbytes(n):\n curr[0], curr[1] = curr[1], curr[1] + n\n return data[curr[0]: curr[1]]\n\n remaining_data = len(data)\n if remaining_data > 0:\n threading.Thread(target=PacketSender.await_acks, args=(conn,)).start()\n # While there's still data to be sent\n while remaining_data > 0:\n # While there are less packets in transit than the window size\n while (sent_packets < PacketConstructor.window_size and remaining_data > 0):\n print(\"sending packet %d\" % seq_num)\n if remaining_data > max_payload_length:\n p = Packet(packet_type=PacketConstructor.data_type,\n seq_num=seq_num,\n peer_ip_addr=peer_ip,\n peer_port=peer_port,\n is_last_packet=False,\n payload=nbytes(max_payload_length))\n\n conn.sendto(p.to_bytes(), destination)\n sent_packets += 1\n remaining_data -= max_payload_length\n seq_num += 1\n PacketSender.spawn_resend_thread(conn, p, destination)\n print(\"not last packet\")\n else:\n p = Packet(packet_type=PacketConstructor.data_type,\n seq_num=seq_num,\n peer_ip_addr=peer_ip,\n peer_port=peer_port,\n is_last_packet=True,\n payload=nbytes(remaining_data))\n\n conn.sendto(p.to_bytes(), destination)\n sent_packets += 1\n remaining_data -= remaining_data\n seq_num += 1\n print(\"remaining data \" + str(remaining_data))\n print(\"is last packet\")\n PacketSender.spawn_resend_thread(conn, p, destination)\n # Update the number of packets still in transit\n while next_seq_num in acked_packets:\n next_seq_num += 1\n sent_packets -= 1\n print(\"Waiting for acks\")\n while not acked_all_packets:\n # Wait here until all packets have been acked\n pass\n print(\"RECEIVED ALL ACKS\")\n PacketSender.was_reset = True\n", "step-ids": [ 3, 5, 9, 10, 11 ] }
[ 3, 5, 9, 10, 11 ]
ghj=input("enter your first name:") print("Welcome to my Quiz:\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.") print("Q1:-Who is the president of India?") winlist=("ramnath govind","multiple choice question","multiple choice questions","mumbai") enter=input("enter your answer here:") seat=enter.lower() x=0 if seat in winlist: print("woah you surely are smart you are correct!!!!") x=x+1 else: print("you went wrong at the first question") x=x-1 print("Q2:-What is the full form of MCQ?") enter2=input("enter your answer here:") seat2=enter2.lower() if seat2 in winlist: print("you are right!!!!!!") x=x+1 else: print("I told you this is a hard quiz, ur answer is wrong") x=x-1 print("Q3:-which city is the india's largest city by population") enter3=input("enter ur answer here:") seat3=enter3.lower() if seat3 in winlist: print("you are right!!!") x=x+1 else: print("you were wrong you lose 1 mark") x=x-1 print("well " +str(ghj)+ " you have completed the quiz and scored: "+str(x)+" marks")
normal
{ "blob_id": "351421ef6a40e3a4bd4549a1851fbf4bed9ddf30", "index": 5024, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(\n \"\"\"Welcome to my Quiz:\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.\"\"\"\n )\nprint('Q1:-Who is the president of India?')\n<mask token>\nif seat in winlist:\n print('woah you surely are smart you are correct!!!!')\n x = x + 1\nelse:\n print('you went wrong at the first question')\n x = x - 1\nprint('Q2:-What is the full form of MCQ?')\n<mask token>\nif seat2 in winlist:\n print('you are right!!!!!!')\n x = x + 1\nelse:\n print('I told you this is a hard quiz, ur answer is wrong')\n x = x - 1\nprint(\"Q3:-which city is the india's largest city by population\")\n<mask token>\nif seat3 in winlist:\n print('you are right!!!')\n x = x + 1\nelse:\n print('you were wrong you lose 1 mark')\n x = x - 1\nprint('well ' + str(ghj) + ' you have completed the quiz and scored: ' +\n str(x) + ' marks')\n", "step-3": "ghj = input('enter your first name:')\nprint(\n \"\"\"Welcome to my Quiz:\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.\"\"\"\n )\nprint('Q1:-Who is the president of India?')\nwinlist = ('ramnath govind', 'multiple choice question',\n 'multiple choice questions', 'mumbai')\nenter = input('enter your answer here:')\nseat = enter.lower()\nx = 0\nif seat in winlist:\n print('woah you surely are smart you are correct!!!!')\n x = x + 1\nelse:\n print('you went wrong at the first question')\n x = x - 1\nprint('Q2:-What is the full form of MCQ?')\nenter2 = input('enter your answer here:')\nseat2 = enter2.lower()\nif seat2 in winlist:\n print('you are right!!!!!!')\n x = x + 1\nelse:\n print('I told you this is a hard quiz, ur answer is wrong')\n x = x - 1\nprint(\"Q3:-which city is the india's largest city by population\")\nenter3 = input('enter ur answer here:')\nseat3 = enter3.lower()\nif seat3 in winlist:\n print('you are right!!!')\n x = x + 1\nelse:\n print('you were wrong you lose 1 mark')\n x = x - 1\nprint('well ' + str(ghj) + ' you have completed the quiz and scored: ' +\n str(x) + ' marks')\n", "step-4": "ghj=input(\"enter your first name:\")\r\nprint(\"Welcome to my Quiz:\\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.\")\r\nprint(\"Q1:-Who is the president of India?\")\r\nwinlist=(\"ramnath govind\",\"multiple choice question\",\"multiple choice questions\",\"mumbai\")\r\nenter=input(\"enter your answer here:\")\r\nseat=enter.lower()\r\nx=0\r\nif seat in winlist:\r\n print(\"woah you surely are smart you are correct!!!!\")\r\n x=x+1\r\nelse:\r\n print(\"you went wrong at the first question\")\r\n x=x-1\r\nprint(\"Q2:-What is the full form of MCQ?\")\r\nenter2=input(\"enter your answer here:\")\r\nseat2=enter2.lower()\r\nif seat2 in winlist:\r\n print(\"you are right!!!!!!\")\r\n x=x+1\r\nelse:\r\n print(\"I told you this is a hard quiz, ur answer is wrong\")\r\n x=x-1\r\nprint(\"Q3:-which city is the india's largest city by population\")\r\nenter3=input(\"enter ur answer here:\")\r\nseat3=enter3.lower()\r\nif seat3 in winlist:\r\n print(\"you are right!!!\")\r\n x=x+1\r\nelse:\r\n print(\"you were wrong you lose 1 mark\")\r\n x=x-1\r\nprint(\"well \" +str(ghj)+ \" you have completed the quiz and scored: \"+str(x)+\" marks\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import hashlib from ast import literal_eval # import requests # from rest_framework import generics from rest_framework.views import APIView from rest_framework.response import Response from django.shortcuts import render, redirect, HttpResponse,get_object_or_404 from django.views.decorators.csrf import csrf_exempt from django.contrib.auth.forms import PasswordChangeForm,AuthenticationForm from django.contrib.auth import update_session_auth_hash,login,logout from django.contrib.auth.decorators import login_required from accounts.forms import RegistrationForm,EditProfileForm,EditUserProfileForm, ResetPasswordForm, SetPasswordForm, SendEmailForm from django.core.mail import send_mail from .models import User,UserProfile from django.http import JsonResponse from html2text import html2text # class UserProfileList(generics.ListCreateAPIView): # queryset = UserProfile.objects.all() # serializer_class = UserProfileSerializer # # def perform_create(self, serializer): # serializer.save() # # class DetailsView(generics.RetrieveUpdateDestroyAPIView): # # queryset = UserProfile.objects.all() # serializer_class = UserProfileSerializer class UserProfileList(APIView): def get(self,request): result = [] for each in User.objects.all(): result.append(each.userprofile.as_json()) return JsonResponse(result,safe=False) def post(self,request): data_dict = literal_eval(request.body) print data_dict try: user = User.objects.create( username=data_dict.get('username'), email = data_dict.get('email'), first_name = data_dict.get('first_name'), last_name = data_dict.get('last_name'), password = data_dict.get('password'), ) except: return JsonResponse({'msg': 'Invalid data'}) try: user.userprofile.phone = data_dict.get('phone') user.userprofile.website = data_dict.get('website') user.userprofile.city = data_dict.get('city') user.userprofile.description = data_dict.get('description') user.userprofile.save() except: return JsonResponse({'msg1': 'User created succesfully','msg2': 'Userprofile created succesfully withe empty data', 'userid': user.id}) return JsonResponse({'msg':'User created succesfully','userid':user.id}) class DetailsView(APIView): def get(self,request,pk): result =[] try: user = User.objects.get(pk=pk) except: return JsonResponse({"msg": "User not found"}) result.append(user.userprofile.as_json()) return JsonResponse(result, safe=False) def put(self,request,pk): try: user = User.objects.get(pk=pk) except: return JsonResponse({"msg": "User not found"}) pass data_dict = literal_eval(request.body) edited = False if 'email' in data_dict.keys(): user.email = data_dict['email'] edited = True if 'first_name' in data_dict.keys(): user.email = data_dict['first_name'] edited = True if 'last_name' in data_dict.keys(): user.email = data_dict['last_name'] edited = True if 'phone' in data_dict.keys(): user.userprofile.phone = data_dict['phone'] edited = True if 'website' in data_dict.keys(): user.userprofile.website = data_dict['website'] edited = True if 'city' in data_dict.keys(): user.userprofile.city = data_dict['city'] edited = True if 'description' in data_dict.keys(): user.userprofile.description = data_dict['description'] edited = True if edited == True: user.save() user.userprofile.save() return JsonResponse({"msg": "User successfully modified"}) return JsonResponse({"msg":"Invalid data"}) def delete(self,request,pk): try: user = User.objects.get(pk=pk) except: return JsonResponse({"msg": "User not found"}) user.delete() return JsonResponse({"msg":"User has been deleted"}) # @csrf_exempt # def userprofileapiview(request): # result = [] # # if request.method == 'POST': # data_dict = literal_eval(request.body) # try: # user = User.objects.create( # username=data_dict.get('username'), # email = data_dict.get('email'), # first_name = data_dict.get('first_name'), # last_name = data_dict.get('last_name'), # password = data_dict.get('password'), # ) # except: # return JsonResponse({'msg': 'Invalid data'}) # try: # user.userprofile.phone = data_dict.get('phone') # user.userprofile.website = data_dict.get('website') # user.userprofile.city = data_dict.get('city') # user.userprofile.description = data_dict.get('description') # user.userprofile.save() # except: # return JsonResponse({'msg1': 'User created succesfully','msg2': 'Userprofile created succesfully withe empty data', 'userid': user.id}) # # return JsonResponse({'msg':'User created succesfully','userid':user.id}) # # if request.method == 'GET': # for each in User.objects.all(): # result.append(each.userprofile.as_json()) # # return JsonResponse(result,safe=False) # # @csrf_exempt # def userdetailapiview(request,pk): # result = [] # if request.method == 'GET': # try: # user = User.objects.get(pk=pk) # except: # return JsonResponse({"msg": "User not found"}) # result.append(user.userprofile.as_json()) # return JsonResponse(result, safe=False) # # if request.method == 'DELETE': # try: # user = User.objects.get(pk=pk) # except: # return JsonResponse({"msg": "User not found"}) # user.delete() # return JsonResponse({"msg":"User has been deleted"}) # # if request.method == 'PUT': # try: # user = User.objects.get(pk=pk) # except: # return JsonResponse({"msg": "User not found"}) # pass # data_dict = literal_eval(request.body) # edited = False # if 'email' in data_dict.keys(): # user.email = data_dict['email'] # edited = True # if 'first_name' in data_dict.keys(): # user.email = data_dict['first_name'] # edited = True # if 'last_name' in data_dict.keys(): # user.email = data_dict['last_name'] # edited = True # if 'phone' in data_dict.keys(): # user.userprofile.phone = data_dict['phone'] # edited = True # if 'website' in data_dict.keys(): # user.userprofile.website = data_dict['website'] # edited = True # if 'city' in data_dict.keys(): # user.userprofile.city = data_dict['city'] # edited = True # if 'description' in data_dict.keys(): # user.userprofile.description = data_dict['description'] # edited = True # if edited == True: # user.save() # user.userprofile.save() # return JsonResponse({"msg": "User successfully modified"}) # return JsonResponse({"msg":"Invalid data"}) def loginview(request): if request.POST: form = AuthenticationForm(data=request.POST) if form.is_valid(): user = form.get_user() login(request,user) return redirect('/account/profile') form = AuthenticationForm() args = {"form": form} return render(request, 'accounts/login.html', args) @login_required def logoutview(request): logout(request) return render(request, 'accounts/logout.html') @login_required def view_all(request): user_list = UserProfile.objects.filter(is_live=True) table = {'user_list': user_list} return render(request,'accounts/view_all.html',table) def register(request): if request.method == 'POST': form = RegistrationForm(request.POST) if form.is_valid(): form.save() return redirect('/account/login') form = RegistrationForm() args = {"form":form} return render(request,'accounts/reg_form.html',args) @login_required def view_profile(request): args = {'user':request.user} return render(request,'accounts/profile.html',args) @login_required def edit_profile(request): userprofile = UserProfile.objects.get(user=request.user) if request.method=='POST': userform = EditProfileForm(request.POST, instance=request.user) userprofileform = EditUserProfileForm(request.POST, instance=request.user.userprofile) if userform.is_valid() and userprofileform.is_valid(): userform.save() userprofileform.save() return redirect('/account/profile') initial = {'description': userprofile.description, 'city': userprofile.city, 'website': userprofile.website, 'phone': userprofile.phone} userform = EditProfileForm(instance=request.user) userprofileform = EditUserProfileForm(initial=initial) args = { 'userform':userform, 'userprofileform':userprofileform, } return render(request,'accounts/edit_profile.html',args) @login_required def change_password(request): if request.method == 'POST': form = PasswordChangeForm(data=request.POST,user=request.user) if form.is_valid(): form.save() update_session_auth_hash(request,form.user) return redirect('/account/profile') return redirect('account/change_password') form = PasswordChangeForm(user=request.user) args = {'form': form} return render(request, 'accounts/change_password.html', args) @login_required def delete_profile(request): userprofile = UserProfile.objects.get(user=request.user) if request.method == 'POST': userprofile.is_live = False userprofile.save() return redirect('/account/profile/view_all') return render(request,'accounts/delete_profile.html',{'user':userprofile}) def password_reset(request): if request.method == 'POST': form = ResetPasswordForm(request.POST) if form.is_valid(): if form.data['email'] in (User.objects.values_list('email',flat=True)): user = User.objects.get(email=form.data['email']) token = hashlib.md5(str(user.id)).hexdigest() user.userprofile.token = token user.userprofile.save() reset_password_link = 'http://127.0.0.1:8000/account/password_reset/confirm/?token='+str(token)+'&id='+str(user.id) email_body = 'Hi, you can click the following link to reset your password\n\n'+reset_password_link send_mail( 'Reset Password', email_body, '[email protected]', [form.data['email'],], fail_silently=False, ) return redirect('/account/reset_password/done/') return HttpResponse('This email id does not exist') return HttpResponse('Enter a valid email id') form = ResetPasswordForm() args = {'form':form} return render(request,'accounts/password_reset.html',args) def password_reset_confirm(request): token = request.GET.get('token') id = request.GET.get('id') user = User.objects.get(pk=id) if request.method == 'POST': form = SetPasswordForm(request.POST) if form.is_valid(): user.set_password(form.data['password']) user.save() return HttpResponse('You password was reset successfully.<br><br>You can login <a href="http://127.0.0.1:8000/">here</a> ') if user.userprofile.token == token: form = SetPasswordForm() args = {'form':form} return render(request,'accounts/password_reset_confirm.html',args) return HttpResponse('Token expired') def send_email(request): if request.method == "POST": form = SendEmailForm(request.POST) try: for each in User.objects.filter(id__in=form.data.getlist('user')): body = form.data.get('body').replace('{{user}}', each.username) send_mail( subject=form.data.get('subject'), message=html2text(body), from_email='[email protected]', # recipient_list=User.objects.filter(id__in=form.data.getlist('user')).values_list('email', flat=True), recipient_list=[each.email], fail_silently=False, html_message=body, ) return HttpResponse('email sent succesfully') except: return HttpResponse('Invalid data or email') form = SendEmailForm args = {'form': form} return render(request,'accounts/send_email.html',args)
normal
{ "blob_id": "a84920821982f04b9835391eb267707971f8f7c1", "index": 3929, "step-1": "import hashlib\nfrom ast import literal_eval\n# import requests\n# from rest_framework import generics\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.shortcuts import render, redirect, HttpResponse,get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.forms import PasswordChangeForm,AuthenticationForm\nfrom django.contrib.auth import update_session_auth_hash,login,logout\nfrom django.contrib.auth.decorators import login_required\nfrom accounts.forms import RegistrationForm,EditProfileForm,EditUserProfileForm, ResetPasswordForm, SetPasswordForm, SendEmailForm\nfrom django.core.mail import send_mail\nfrom .models import User,UserProfile\nfrom django.http import JsonResponse\nfrom html2text import html2text\n\n\n\n# class UserProfileList(generics.ListCreateAPIView):\n# queryset = UserProfile.objects.all()\n# serializer_class = UserProfileSerializer\n#\n# def perform_create(self, serializer):\n# serializer.save()\n#\n# class DetailsView(generics.RetrieveUpdateDestroyAPIView):\n#\n# queryset = UserProfile.objects.all()\n# serializer_class = UserProfileSerializer\n\nclass UserProfileList(APIView):\n\n def get(self,request):\n result = []\n for each in User.objects.all():\n result.append(each.userprofile.as_json())\n return JsonResponse(result,safe=False)\n\n def post(self,request):\n data_dict = literal_eval(request.body)\n print data_dict\n try:\n user = User.objects.create(\n username=data_dict.get('username'),\n email = data_dict.get('email'),\n first_name = data_dict.get('first_name'),\n last_name = data_dict.get('last_name'),\n password = data_dict.get('password'),\n )\n except:\n return JsonResponse({'msg': 'Invalid data'})\n try:\n user.userprofile.phone = data_dict.get('phone')\n user.userprofile.website = data_dict.get('website')\n user.userprofile.city = data_dict.get('city')\n user.userprofile.description = data_dict.get('description')\n user.userprofile.save()\n except:\n return JsonResponse({'msg1': 'User created succesfully','msg2': 'Userprofile created succesfully withe empty data', 'userid': user.id})\n\n return JsonResponse({'msg':'User created succesfully','userid':user.id})\n\n\nclass DetailsView(APIView):\n\n def get(self,request,pk):\n result =[]\n try:\n user = User.objects.get(pk=pk)\n except:\n return JsonResponse({\"msg\": \"User not found\"})\n result.append(user.userprofile.as_json())\n return JsonResponse(result, safe=False)\n\n def put(self,request,pk):\n try:\n user = User.objects.get(pk=pk)\n except:\n return JsonResponse({\"msg\": \"User not found\"})\n pass\n data_dict = literal_eval(request.body)\n edited = False\n if 'email' in data_dict.keys():\n user.email = data_dict['email']\n edited = True\n if 'first_name' in data_dict.keys():\n user.email = data_dict['first_name']\n edited = True\n if 'last_name' in data_dict.keys():\n user.email = data_dict['last_name']\n edited = True\n if 'phone' in data_dict.keys():\n user.userprofile.phone = data_dict['phone']\n edited = True\n if 'website' in data_dict.keys():\n user.userprofile.website = data_dict['website']\n edited = True\n if 'city' in data_dict.keys():\n user.userprofile.city = data_dict['city']\n edited = True\n if 'description' in data_dict.keys():\n user.userprofile.description = data_dict['description']\n edited = True\n if edited == True:\n user.save()\n user.userprofile.save()\n return JsonResponse({\"msg\": \"User successfully modified\"})\n return JsonResponse({\"msg\":\"Invalid data\"})\n\n\n def delete(self,request,pk):\n try:\n user = User.objects.get(pk=pk)\n except:\n return JsonResponse({\"msg\": \"User not found\"})\n user.delete()\n return JsonResponse({\"msg\":\"User has been deleted\"})\n\n\n# @csrf_exempt\n# def userprofileapiview(request):\n# result = []\n#\n# if request.method == 'POST':\n# data_dict = literal_eval(request.body)\n# try:\n# user = User.objects.create(\n# username=data_dict.get('username'),\n# email = data_dict.get('email'),\n# first_name = data_dict.get('first_name'),\n# last_name = data_dict.get('last_name'),\n# password = data_dict.get('password'),\n# )\n# except:\n# return JsonResponse({'msg': 'Invalid data'})\n# try:\n# user.userprofile.phone = data_dict.get('phone')\n# user.userprofile.website = data_dict.get('website')\n# user.userprofile.city = data_dict.get('city')\n# user.userprofile.description = data_dict.get('description')\n# user.userprofile.save()\n# except:\n# return JsonResponse({'msg1': 'User created succesfully','msg2': 'Userprofile created succesfully withe empty data', 'userid': user.id})\n#\n# return JsonResponse({'msg':'User created succesfully','userid':user.id})\n#\n# if request.method == 'GET':\n# for each in User.objects.all():\n# result.append(each.userprofile.as_json())\n#\n# return JsonResponse(result,safe=False)\n#\n# @csrf_exempt\n# def userdetailapiview(request,pk):\n# result = []\n# if request.method == 'GET':\n# try:\n# user = User.objects.get(pk=pk)\n# except:\n# return JsonResponse({\"msg\": \"User not found\"})\n# result.append(user.userprofile.as_json())\n# return JsonResponse(result, safe=False)\n#\n# if request.method == 'DELETE':\n# try:\n# user = User.objects.get(pk=pk)\n# except:\n# return JsonResponse({\"msg\": \"User not found\"})\n# user.delete()\n# return JsonResponse({\"msg\":\"User has been deleted\"})\n#\n# if request.method == 'PUT':\n# try:\n# user = User.objects.get(pk=pk)\n# except:\n# return JsonResponse({\"msg\": \"User not found\"})\n# pass\n# data_dict = literal_eval(request.body)\n# edited = False\n# if 'email' in data_dict.keys():\n# user.email = data_dict['email']\n# edited = True\n# if 'first_name' in data_dict.keys():\n# user.email = data_dict['first_name']\n# edited = True\n# if 'last_name' in data_dict.keys():\n# user.email = data_dict['last_name']\n# edited = True\n# if 'phone' in data_dict.keys():\n# user.userprofile.phone = data_dict['phone']\n# edited = True\n# if 'website' in data_dict.keys():\n# user.userprofile.website = data_dict['website']\n# edited = True\n# if 'city' in data_dict.keys():\n# user.userprofile.city = data_dict['city']\n# edited = True\n# if 'description' in data_dict.keys():\n# user.userprofile.description = data_dict['description']\n# edited = True\n# if edited == True:\n# user.save()\n# user.userprofile.save()\n# return JsonResponse({\"msg\": \"User successfully modified\"})\n# return JsonResponse({\"msg\":\"Invalid data\"})\n\n\n\ndef loginview(request):\n if request.POST:\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n user = form.get_user()\n login(request,user)\n return redirect('/account/profile')\n form = AuthenticationForm()\n args = {\"form\": form}\n return render(request, 'accounts/login.html', args)\n\n@login_required\ndef logoutview(request):\n logout(request)\n return render(request, 'accounts/logout.html')\n\n@login_required\ndef view_all(request):\n user_list = UserProfile.objects.filter(is_live=True)\n table = {'user_list': user_list}\n return render(request,'accounts/view_all.html',table)\n\n\ndef register(request):\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/account/login')\n form = RegistrationForm()\n args = {\"form\":form}\n return render(request,'accounts/reg_form.html',args)\n\n\n@login_required\ndef view_profile(request):\n args = {'user':request.user}\n return render(request,'accounts/profile.html',args)\n\n\n@login_required\ndef edit_profile(request):\n userprofile = UserProfile.objects.get(user=request.user)\n if request.method=='POST':\n userform = EditProfileForm(request.POST, instance=request.user)\n userprofileform = EditUserProfileForm(request.POST, instance=request.user.userprofile)\n if userform.is_valid() and userprofileform.is_valid():\n userform.save()\n userprofileform.save()\n return redirect('/account/profile')\n initial = {'description': userprofile.description, 'city': userprofile.city, 'website': userprofile.website,\n 'phone': userprofile.phone}\n userform = EditProfileForm(instance=request.user)\n userprofileform = EditUserProfileForm(initial=initial)\n args = {\n 'userform':userform,\n 'userprofileform':userprofileform,\n }\n return render(request,'accounts/edit_profile.html',args)\n\n\n@login_required\ndef change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(data=request.POST,user=request.user)\n if form.is_valid():\n form.save()\n update_session_auth_hash(request,form.user)\n return redirect('/account/profile')\n return redirect('account/change_password')\n form = PasswordChangeForm(user=request.user)\n args = {'form': form}\n return render(request, 'accounts/change_password.html', args)\n\n\n@login_required\ndef delete_profile(request):\n userprofile = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n userprofile.is_live = False\n userprofile.save()\n return redirect('/account/profile/view_all')\n return render(request,'accounts/delete_profile.html',{'user':userprofile})\n\n\ndef password_reset(request):\n if request.method == 'POST':\n form = ResetPasswordForm(request.POST)\n if form.is_valid():\n if form.data['email'] in (User.objects.values_list('email',flat=True)):\n user = User.objects.get(email=form.data['email'])\n token = hashlib.md5(str(user.id)).hexdigest()\n user.userprofile.token = token\n user.userprofile.save()\n reset_password_link = 'http://127.0.0.1:8000/account/password_reset/confirm/?token='+str(token)+'&id='+str(user.id)\n email_body = 'Hi, you can click the following link to reset your password\\n\\n'+reset_password_link\n send_mail(\n 'Reset Password',\n email_body,\n '[email protected]',\n [form.data['email'],],\n fail_silently=False,\n )\n return redirect('/account/reset_password/done/')\n return HttpResponse('This email id does not exist')\n return HttpResponse('Enter a valid email id')\n form = ResetPasswordForm()\n args = {'form':form}\n return render(request,'accounts/password_reset.html',args)\n\n\ndef password_reset_confirm(request):\n token = request.GET.get('token')\n id = request.GET.get('id')\n user = User.objects.get(pk=id)\n if request.method == 'POST':\n form = SetPasswordForm(request.POST)\n if form.is_valid():\n user.set_password(form.data['password'])\n user.save()\n return HttpResponse('You password was reset successfully.<br><br>You can login <a href=\"http://127.0.0.1:8000/\">here</a> ')\n if user.userprofile.token == token:\n form = SetPasswordForm()\n args = {'form':form}\n return render(request,'accounts/password_reset_confirm.html',args)\n return HttpResponse('Token expired')\n\ndef send_email(request):\n if request.method == \"POST\":\n form = SendEmailForm(request.POST)\n try:\n for each in User.objects.filter(id__in=form.data.getlist('user')):\n body = form.data.get('body').replace('{{user}}', each.username)\n send_mail(\n subject=form.data.get('subject'),\n message=html2text(body),\n from_email='[email protected]',\n # recipient_list=User.objects.filter(id__in=form.data.getlist('user')).values_list('email', flat=True),\n recipient_list=[each.email],\n fail_silently=False,\n html_message=body,\n )\n return HttpResponse('email sent succesfully')\n except:\n return HttpResponse('Invalid data or email')\n form = SendEmailForm\n args = {'form': form}\n return render(request,'accounts/send_email.html',args)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import pathlib import shutil import os import glob import pandas as pd import sqlalchemy as sqla """ SCRIPT TO FILL THE DATABASE FROM CSV ON MEGA IF LOSE DATA IN PARTICULAR DATE """ PATH = "/home/thomas/Documents/TER/AJOUTER_CSV_BDD/" folder = "test/" files_used = [] totalFiles = 0 contents = pathlib.Path(PATH+folder).iterdir() for path in sorted(contents): # utiliser .stem -> nom sans extension fichier / .name -> nom fichier complet files_used.append(path.name) totalFiles+=1 print(files_used) print(totalFiles) li = [] for filename in files_used: df = pd.read_csv(PATH+folder+filename,sep=';',skiprows=range(1,6),index_col=0) li.append(df) frame = pd.concat(li) frame.to_csv("merged.csv",sep=';') print('FINISH MERGING FILES!') #Move all files used in folder dest folder_dest = 'dest' for file in files_used: shutil.move(PATH+folder+file, PATH+folder_dest) print('FINISH MOVING MERGED FILES!') df = pd.read_csv('merged.csv',sep=';') df['Date'] = df['Date'].str[0:10] +' '+df['Date'].str[11:19] df = df.rename(columns={'Date': 'horodatage','Nom parking': 'nom','Type de parc': 'type_parking',"Horaires d'accès au public (pour les usagers non abonnés)": 'horaires','Code parking': 'code_parking','Type de compteur': 'type_compteur', 'Places disponibles': 'places_disponibles'}) df['horodatage'] = pd.to_datetime(df['horodatage']) df = df.loc[: ,['code_parking','type_compteur','horodatage','places_disponibles']] print('FINISH CLEAN DF!') print(df) df.info() host = '' port = '' db = '' user = '' psw = '' name_table = '' # dialect+driver://username:password@host:port/database engine = sqla.create_engine('mysql://'+user+':'+psw+'@'+host+':'+port+'/'+db) print('CONNECTED!') """ df.to_sql(name_table,engine,if_exists='append',index=False,chunksize=1024,dtype={'id': sqla.Integer,'code_parking': sqla.String(255),'type_compteur': sqla.String(255),'horodatage': sqla.DateTime,'places_disponibles': sqla.Integer}) print('Finished export to Database!') """
normal
{ "blob_id": "795936dad7a9e51edf0df66207a43ac4d97e9023", "index": 3781, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor path in sorted(contents):\n files_used.append(path.name)\n totalFiles += 1\nprint(files_used)\nprint(totalFiles)\n<mask token>\nfor filename in files_used:\n df = pd.read_csv(PATH + folder + filename, sep=';', skiprows=range(1, 6\n ), index_col=0)\n li.append(df)\n<mask token>\nframe.to_csv('merged.csv', sep=';')\nprint('FINISH MERGING FILES!')\n<mask token>\nfor file in files_used:\n shutil.move(PATH + folder + file, PATH + folder_dest)\nprint('FINISH MOVING MERGED FILES!')\n<mask token>\nprint('FINISH CLEAN DF!')\nprint(df)\ndf.info()\n<mask token>\nprint('CONNECTED!')\n<mask token>\n", "step-3": "<mask token>\nPATH = '/home/thomas/Documents/TER/AJOUTER_CSV_BDD/'\nfolder = 'test/'\nfiles_used = []\ntotalFiles = 0\ncontents = pathlib.Path(PATH + folder).iterdir()\nfor path in sorted(contents):\n files_used.append(path.name)\n totalFiles += 1\nprint(files_used)\nprint(totalFiles)\nli = []\nfor filename in files_used:\n df = pd.read_csv(PATH + folder + filename, sep=';', skiprows=range(1, 6\n ), index_col=0)\n li.append(df)\nframe = pd.concat(li)\nframe.to_csv('merged.csv', sep=';')\nprint('FINISH MERGING FILES!')\nfolder_dest = 'dest'\nfor file in files_used:\n shutil.move(PATH + folder + file, PATH + folder_dest)\nprint('FINISH MOVING MERGED FILES!')\ndf = pd.read_csv('merged.csv', sep=';')\ndf['Date'] = df['Date'].str[0:10] + ' ' + df['Date'].str[11:19]\ndf = df.rename(columns={'Date': 'horodatage', 'Nom parking': 'nom',\n 'Type de parc': 'type_parking',\n \"Horaires d'accès au public (pour les usagers non abonnés)\": 'horaires',\n 'Code parking': 'code_parking', 'Type de compteur': 'type_compteur',\n 'Places disponibles': 'places_disponibles'})\ndf['horodatage'] = pd.to_datetime(df['horodatage'])\ndf = df.loc[:, ['code_parking', 'type_compteur', 'horodatage',\n 'places_disponibles']]\nprint('FINISH CLEAN DF!')\nprint(df)\ndf.info()\nhost = ''\nport = ''\ndb = ''\nuser = ''\npsw = ''\nname_table = ''\nengine = sqla.create_engine('mysql://' + user + ':' + psw + '@' + host +\n ':' + port + '/' + db)\nprint('CONNECTED!')\n<mask token>\n", "step-4": "import pathlib\nimport shutil\nimport os\nimport glob\nimport pandas as pd\nimport sqlalchemy as sqla\n<mask token>\nPATH = '/home/thomas/Documents/TER/AJOUTER_CSV_BDD/'\nfolder = 'test/'\nfiles_used = []\ntotalFiles = 0\ncontents = pathlib.Path(PATH + folder).iterdir()\nfor path in sorted(contents):\n files_used.append(path.name)\n totalFiles += 1\nprint(files_used)\nprint(totalFiles)\nli = []\nfor filename in files_used:\n df = pd.read_csv(PATH + folder + filename, sep=';', skiprows=range(1, 6\n ), index_col=0)\n li.append(df)\nframe = pd.concat(li)\nframe.to_csv('merged.csv', sep=';')\nprint('FINISH MERGING FILES!')\nfolder_dest = 'dest'\nfor file in files_used:\n shutil.move(PATH + folder + file, PATH + folder_dest)\nprint('FINISH MOVING MERGED FILES!')\ndf = pd.read_csv('merged.csv', sep=';')\ndf['Date'] = df['Date'].str[0:10] + ' ' + df['Date'].str[11:19]\ndf = df.rename(columns={'Date': 'horodatage', 'Nom parking': 'nom',\n 'Type de parc': 'type_parking',\n \"Horaires d'accès au public (pour les usagers non abonnés)\": 'horaires',\n 'Code parking': 'code_parking', 'Type de compteur': 'type_compteur',\n 'Places disponibles': 'places_disponibles'})\ndf['horodatage'] = pd.to_datetime(df['horodatage'])\ndf = df.loc[:, ['code_parking', 'type_compteur', 'horodatage',\n 'places_disponibles']]\nprint('FINISH CLEAN DF!')\nprint(df)\ndf.info()\nhost = ''\nport = ''\ndb = ''\nuser = ''\npsw = ''\nname_table = ''\nengine = sqla.create_engine('mysql://' + user + ':' + psw + '@' + host +\n ':' + port + '/' + db)\nprint('CONNECTED!')\n<mask token>\n", "step-5": "import pathlib\nimport shutil\nimport os\nimport glob\nimport pandas as pd\nimport sqlalchemy as sqla\n\n\"\"\"\nSCRIPT TO FILL THE DATABASE FROM CSV ON MEGA IF LOSE DATA IN PARTICULAR DATE\n\"\"\"\n\nPATH = \"/home/thomas/Documents/TER/AJOUTER_CSV_BDD/\"\nfolder = \"test/\"\nfiles_used = []\ntotalFiles = 0\ncontents = pathlib.Path(PATH+folder).iterdir()\nfor path in sorted(contents): # utiliser .stem -> nom sans extension fichier / .name -> nom fichier complet\n files_used.append(path.name)\n totalFiles+=1\n\nprint(files_used)\nprint(totalFiles)\n\nli = []\n\nfor filename in files_used:\n\tdf = pd.read_csv(PATH+folder+filename,sep=';',skiprows=range(1,6),index_col=0)\n\tli.append(df)\n\nframe = pd.concat(li)\n\n\nframe.to_csv(\"merged.csv\",sep=';')\nprint('FINISH MERGING FILES!')\n\n#Move all files used in folder dest\nfolder_dest = 'dest'\nfor file in files_used:\n shutil.move(PATH+folder+file, PATH+folder_dest)\nprint('FINISH MOVING MERGED FILES!')\n\n\ndf = pd.read_csv('merged.csv',sep=';') \n\n\ndf['Date'] = df['Date'].str[0:10] +' '+df['Date'].str[11:19]\ndf = df.rename(columns={'Date': 'horodatage','Nom parking': 'nom','Type de parc': 'type_parking',\"Horaires d'accès au public (pour les usagers non abonnés)\": 'horaires','Code parking': 'code_parking','Type de compteur': 'type_compteur', 'Places disponibles': 'places_disponibles'})\ndf['horodatage'] = pd.to_datetime(df['horodatage'])\ndf = df.loc[: ,['code_parking','type_compteur','horodatage','places_disponibles']]\nprint('FINISH CLEAN DF!')\nprint(df)\ndf.info()\n\nhost = ''\nport = ''\ndb = ''\nuser = ''\npsw = ''\nname_table = ''\n\n\n# dialect+driver://username:password@host:port/database\nengine = sqla.create_engine('mysql://'+user+':'+psw+'@'+host+':'+port+'/'+db)\nprint('CONNECTED!')\n\n\"\"\"\n\ndf.to_sql(name_table,engine,if_exists='append',index=False,chunksize=1024,dtype={'id': sqla.Integer,'code_parking': sqla.String(255),'type_compteur': sqla.String(255),'horodatage': sqla.DateTime,'places_disponibles': sqla.Integer})\nprint('Finished export to Database!')\n\"\"\"\n\n\n\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from flask import Flask, render_template app = Flask(__name__) @app.route("/") def index(): headline = "Hello world from a variable!" # headline de la izq es el nombre de la variable en la vista # headline de la der es el nombre de la variable en el server return render_template("index.html", headline=headline) # Ahora usamos el mismo idex.html pero con un contenido distinto! @app.route("/bye/") def bye(): headline = "Goodbye!" return render_template("index.html", headline=headline)
normal
{ "blob_id": "83bbb6433d1577be869bf840bdd42aa86e415da6", "index": 9328, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n headline = 'Hello world from a variable!'\n return render_template('index.html', headline=headline)\n\n\[email protected]('/bye/')\ndef bye():\n headline = 'Goodbye!'\n return render_template('index.html', headline=headline)\n", "step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n headline = 'Hello world from a variable!'\n return render_template('index.html', headline=headline)\n\n\[email protected]('/bye/')\ndef bye():\n headline = 'Goodbye!'\n return render_template('index.html', headline=headline)\n", "step-4": "from flask import Flask, render_template\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n headline = 'Hello world from a variable!'\n return render_template('index.html', headline=headline)\n\n\[email protected]('/bye/')\ndef bye():\n headline = 'Goodbye!'\n return render_template('index.html', headline=headline)\n", "step-5": "from flask import Flask, render_template\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef index():\n headline = \"Hello world from a variable!\"\n # headline de la izq es el nombre de la variable en la vista\n # headline de la der es el nombre de la variable en el server\n return render_template(\"index.html\", headline=headline)\n\n# Ahora usamos el mismo idex.html pero con un contenido distinto!\[email protected](\"/bye/\")\ndef bye():\n headline = \"Goodbye!\"\n return render_template(\"index.html\", headline=headline)", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
import json import sys import os # Change to Singularity working directory. os.chdir('/mnt/cwd') # Take subset index as argument subset_index = sys.argv[1] # Open up subset matching this. with open('/mnt/scripts/outputs/instcat_list_subset'+str(subset_index)+'.json', 'r') as f: instcat_list_subset = json.load(f) # Import instcat trimmer sys.path.append('/mnt/scripts') import instcat_trimmer as ict ict.determine_instcat_work(instcat_list_subset, '/mnt/scripts/outputs/worklist_subset'+str(subset_index)+'.json')
normal
{ "blob_id": "e2e5ca388d67f2a13eaef6067fc19e2dfe284a55", "index": 4469, "step-1": "<mask token>\n", "step-2": "<mask token>\nos.chdir('/mnt/cwd')\n<mask token>\nwith open('/mnt/scripts/outputs/instcat_list_subset' + str(subset_index) +\n '.json', 'r') as f:\n instcat_list_subset = json.load(f)\nsys.path.append('/mnt/scripts')\n<mask token>\nict.determine_instcat_work(instcat_list_subset, \n '/mnt/scripts/outputs/worklist_subset' + str(subset_index) + '.json')\n", "step-3": "<mask token>\nos.chdir('/mnt/cwd')\nsubset_index = sys.argv[1]\nwith open('/mnt/scripts/outputs/instcat_list_subset' + str(subset_index) +\n '.json', 'r') as f:\n instcat_list_subset = json.load(f)\nsys.path.append('/mnt/scripts')\n<mask token>\nict.determine_instcat_work(instcat_list_subset, \n '/mnt/scripts/outputs/worklist_subset' + str(subset_index) + '.json')\n", "step-4": "import json\nimport sys\nimport os\nos.chdir('/mnt/cwd')\nsubset_index = sys.argv[1]\nwith open('/mnt/scripts/outputs/instcat_list_subset' + str(subset_index) +\n '.json', 'r') as f:\n instcat_list_subset = json.load(f)\nsys.path.append('/mnt/scripts')\nimport instcat_trimmer as ict\nict.determine_instcat_work(instcat_list_subset, \n '/mnt/scripts/outputs/worklist_subset' + str(subset_index) + '.json')\n", "step-5": "import json\nimport sys\nimport os\n\n# Change to Singularity working directory.\nos.chdir('/mnt/cwd')\n\n# Take subset index as argument\nsubset_index = sys.argv[1]\n\n# Open up subset matching this.\nwith open('/mnt/scripts/outputs/instcat_list_subset'+str(subset_index)+'.json', 'r') as f:\n instcat_list_subset = json.load(f)\n\n# Import instcat trimmer\nsys.path.append('/mnt/scripts')\nimport instcat_trimmer as ict\n\nict.determine_instcat_work(instcat_list_subset, '/mnt/scripts/outputs/worklist_subset'+str(subset_index)+'.json')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os import imageio import h5py import numpy as np def create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks): with h5py.File(data_path, 'a') as f: f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks) f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape), chunks=chunks) def create_image_collection_test_data(folder, n_images, min_shape, max_shape): im_folder = os.path.join(folder, 'images') label_folder = os.path.join(folder, 'labels') os.makedirs(im_folder, exist_ok=True) os.makedirs(label_folder, exist_ok=True) for i in range(n_images): shape = tuple(np.random.randint(mins, maxs) for mins, maxs in zip(min_shape, max_shape)) raw = np.random.rand(*shape).astype('int16') label = np.random.randint(0, 4, size=shape) imageio.imwrite(os.path.join(im_folder, f"im_{i}.tif"), raw) imageio.imwrite(os.path.join(label_folder, f"im_{i}.tif"), label)
normal
{ "blob_id": "e3417980599448f1293b56cb95312088e7a8abe3", "index": 9713, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks\n ):\n with h5py.File(data_path, 'a') as f:\n f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks)\n f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape\n ), chunks=chunks)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks\n ):\n with h5py.File(data_path, 'a') as f:\n f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks)\n f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape\n ), chunks=chunks)\n\n\ndef create_image_collection_test_data(folder, n_images, min_shape, max_shape):\n im_folder = os.path.join(folder, 'images')\n label_folder = os.path.join(folder, 'labels')\n os.makedirs(im_folder, exist_ok=True)\n os.makedirs(label_folder, exist_ok=True)\n for i in range(n_images):\n shape = tuple(np.random.randint(mins, maxs) for mins, maxs in zip(\n min_shape, max_shape))\n raw = np.random.rand(*shape).astype('int16')\n label = np.random.randint(0, 4, size=shape)\n imageio.imwrite(os.path.join(im_folder, f'im_{i}.tif'), raw)\n imageio.imwrite(os.path.join(label_folder, f'im_{i}.tif'), label)\n", "step-4": "import os\nimport imageio\nimport h5py\nimport numpy as np\n\n\ndef create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks\n ):\n with h5py.File(data_path, 'a') as f:\n f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks)\n f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape\n ), chunks=chunks)\n\n\ndef create_image_collection_test_data(folder, n_images, min_shape, max_shape):\n im_folder = os.path.join(folder, 'images')\n label_folder = os.path.join(folder, 'labels')\n os.makedirs(im_folder, exist_ok=True)\n os.makedirs(label_folder, exist_ok=True)\n for i in range(n_images):\n shape = tuple(np.random.randint(mins, maxs) for mins, maxs in zip(\n min_shape, max_shape))\n raw = np.random.rand(*shape).astype('int16')\n label = np.random.randint(0, 4, size=shape)\n imageio.imwrite(os.path.join(im_folder, f'im_{i}.tif'), raw)\n imageio.imwrite(os.path.join(label_folder, f'im_{i}.tif'), label)\n", "step-5": "import os\nimport imageio\nimport h5py\nimport numpy as np\n\n\ndef create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks):\n with h5py.File(data_path, 'a') as f:\n f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks)\n f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape), chunks=chunks)\n\n\ndef create_image_collection_test_data(folder, n_images, min_shape, max_shape):\n im_folder = os.path.join(folder, 'images')\n label_folder = os.path.join(folder, 'labels')\n os.makedirs(im_folder, exist_ok=True)\n os.makedirs(label_folder, exist_ok=True)\n\n for i in range(n_images):\n shape = tuple(np.random.randint(mins, maxs) for mins, maxs in zip(min_shape, max_shape))\n raw = np.random.rand(*shape).astype('int16')\n label = np.random.randint(0, 4, size=shape)\n imageio.imwrite(os.path.join(im_folder, f\"im_{i}.tif\"), raw)\n imageio.imwrite(os.path.join(label_folder, f\"im_{i}.tif\"), label)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Reference: https://docs.python.org/2/library/unittest.html import unittest import sys sys.path.append('..') from database_utils import DatabaseUtils class Test_DatabaseUtils(unittest.TestCase): def setUp(self): self.db=DatabaseUtils() def dataCount(self): with self.db.connection.cursor() as cursor: cursor.execute("select count(*) from LmsUser") return cursor.fetchone()[0] def test_getUser(self): count = self.dataCount() try: trueResult=self.db.getUser("username") print("Test passed") except: print("Test failed") def test_insertBookTransaction(self): testData=(1,1,"2019-01-01","abc") result=self.db.insertBookTransaction(testData[0],testData[1],testData[2],testData[3]) print("result: ",result) self.assertTrue(result) def test_updateBookStatus(self): testData=(1,"anything") result=self.db.updateBookStatus(testData[1],testData[0]) self.assertFalse(result) def test_updateBookTransaction(self): testData=(1,"anything","2019-01-01") result=self.db.updateBookTransaction(testData[0],testData[1],testData[2]) self.assertFalse(result) def test_searchBooks(self): result=self.db.searchBooks("abc") self.assertFalse(result) result=self.db.searchBooks("Harry") self.assertTrue(result) def test_searchBooksAuthur(self): result=self.db.searchBooksAuthur("abc") self.assertFalse(result) result=self.db.searchBooksAuthur("gavin") self.assertTrue(result) def test_searchBooksISBN(self): result=self.db.searchBooksISBN(1) self.assertFalse(result) def test_listBooks(self): result=self.db.listBooks() self.assertTrue(result) def test_getBook(self): result=self.db.getBook(1) self.assertTrue(result) def test_getBookISBN(self): result=self.db.getBookISBN(1) self.assertFalse(result) def test_listReturnBooks(self): result=self.db.listReturnBooks(1) self.assertTrue(result) def test_getReturnBook(self): result=self.db.getReturnBook(1,1) self.assertTrue(result) if __name__ == "__main__": unittest.main()
normal
{ "blob_id": "ff8e8af72a8eb97a392fcfec5960eed7a2e51f68", "index": 9211, "step-1": "<mask token>\n\n\nclass Test_DatabaseUtils(unittest.TestCase):\n\n def setUp(self):\n self.db = DatabaseUtils()\n <mask token>\n\n def test_getUser(self):\n count = self.dataCount()\n try:\n trueResult = self.db.getUser('username')\n print('Test passed')\n except:\n print('Test failed')\n <mask token>\n <mask token>\n\n def test_updateBookTransaction(self):\n testData = 1, 'anything', '2019-01-01'\n result = self.db.updateBookTransaction(testData[0], testData[1],\n testData[2])\n self.assertFalse(result)\n <mask token>\n\n def test_searchBooksAuthur(self):\n result = self.db.searchBooksAuthur('abc')\n self.assertFalse(result)\n result = self.db.searchBooksAuthur('gavin')\n self.assertTrue(result)\n\n def test_searchBooksISBN(self):\n result = self.db.searchBooksISBN(1)\n self.assertFalse(result)\n <mask token>\n\n def test_getBook(self):\n result = self.db.getBook(1)\n self.assertTrue(result)\n\n def test_getBookISBN(self):\n result = self.db.getBookISBN(1)\n self.assertFalse(result)\n <mask token>\n\n def test_getReturnBook(self):\n result = self.db.getReturnBook(1, 1)\n self.assertTrue(result)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Test_DatabaseUtils(unittest.TestCase):\n\n def setUp(self):\n self.db = DatabaseUtils()\n <mask token>\n\n def test_getUser(self):\n count = self.dataCount()\n try:\n trueResult = self.db.getUser('username')\n print('Test passed')\n except:\n print('Test failed')\n <mask token>\n <mask token>\n\n def test_updateBookTransaction(self):\n testData = 1, 'anything', '2019-01-01'\n result = self.db.updateBookTransaction(testData[0], testData[1],\n testData[2])\n self.assertFalse(result)\n <mask token>\n\n def test_searchBooksAuthur(self):\n result = self.db.searchBooksAuthur('abc')\n self.assertFalse(result)\n result = self.db.searchBooksAuthur('gavin')\n self.assertTrue(result)\n\n def test_searchBooksISBN(self):\n result = self.db.searchBooksISBN(1)\n self.assertFalse(result)\n\n def test_listBooks(self):\n result = self.db.listBooks()\n self.assertTrue(result)\n\n def test_getBook(self):\n result = self.db.getBook(1)\n self.assertTrue(result)\n\n def test_getBookISBN(self):\n result = self.db.getBookISBN(1)\n self.assertFalse(result)\n <mask token>\n\n def test_getReturnBook(self):\n result = self.db.getReturnBook(1, 1)\n self.assertTrue(result)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Test_DatabaseUtils(unittest.TestCase):\n\n def setUp(self):\n self.db = DatabaseUtils()\n <mask token>\n\n def test_getUser(self):\n count = self.dataCount()\n try:\n trueResult = self.db.getUser('username')\n print('Test passed')\n except:\n print('Test failed')\n\n def test_insertBookTransaction(self):\n testData = 1, 1, '2019-01-01', 'abc'\n result = self.db.insertBookTransaction(testData[0], testData[1],\n testData[2], testData[3])\n print('result: ', result)\n self.assertTrue(result)\n\n def test_updateBookStatus(self):\n testData = 1, 'anything'\n result = self.db.updateBookStatus(testData[1], testData[0])\n self.assertFalse(result)\n\n def test_updateBookTransaction(self):\n testData = 1, 'anything', '2019-01-01'\n result = self.db.updateBookTransaction(testData[0], testData[1],\n testData[2])\n self.assertFalse(result)\n\n def test_searchBooks(self):\n result = self.db.searchBooks('abc')\n self.assertFalse(result)\n result = self.db.searchBooks('Harry')\n self.assertTrue(result)\n\n def test_searchBooksAuthur(self):\n result = self.db.searchBooksAuthur('abc')\n self.assertFalse(result)\n result = self.db.searchBooksAuthur('gavin')\n self.assertTrue(result)\n\n def test_searchBooksISBN(self):\n result = self.db.searchBooksISBN(1)\n self.assertFalse(result)\n\n def test_listBooks(self):\n result = self.db.listBooks()\n self.assertTrue(result)\n\n def test_getBook(self):\n result = self.db.getBook(1)\n self.assertTrue(result)\n\n def test_getBookISBN(self):\n result = self.db.getBookISBN(1)\n self.assertFalse(result)\n <mask token>\n\n def test_getReturnBook(self):\n result = self.db.getReturnBook(1, 1)\n self.assertTrue(result)\n\n\n<mask token>\n", "step-4": "import unittest\nimport sys\nsys.path.append('..')\nfrom database_utils import DatabaseUtils\n\n\nclass Test_DatabaseUtils(unittest.TestCase):\n\n def setUp(self):\n self.db = DatabaseUtils()\n\n def dataCount(self):\n with self.db.connection.cursor() as cursor:\n cursor.execute('select count(*) from LmsUser')\n return cursor.fetchone()[0]\n\n def test_getUser(self):\n count = self.dataCount()\n try:\n trueResult = self.db.getUser('username')\n print('Test passed')\n except:\n print('Test failed')\n\n def test_insertBookTransaction(self):\n testData = 1, 1, '2019-01-01', 'abc'\n result = self.db.insertBookTransaction(testData[0], testData[1],\n testData[2], testData[3])\n print('result: ', result)\n self.assertTrue(result)\n\n def test_updateBookStatus(self):\n testData = 1, 'anything'\n result = self.db.updateBookStatus(testData[1], testData[0])\n self.assertFalse(result)\n\n def test_updateBookTransaction(self):\n testData = 1, 'anything', '2019-01-01'\n result = self.db.updateBookTransaction(testData[0], testData[1],\n testData[2])\n self.assertFalse(result)\n\n def test_searchBooks(self):\n result = self.db.searchBooks('abc')\n self.assertFalse(result)\n result = self.db.searchBooks('Harry')\n self.assertTrue(result)\n\n def test_searchBooksAuthur(self):\n result = self.db.searchBooksAuthur('abc')\n self.assertFalse(result)\n result = self.db.searchBooksAuthur('gavin')\n self.assertTrue(result)\n\n def test_searchBooksISBN(self):\n result = self.db.searchBooksISBN(1)\n self.assertFalse(result)\n\n def test_listBooks(self):\n result = self.db.listBooks()\n self.assertTrue(result)\n\n def test_getBook(self):\n result = self.db.getBook(1)\n self.assertTrue(result)\n\n def test_getBookISBN(self):\n result = self.db.getBookISBN(1)\n self.assertFalse(result)\n\n def test_listReturnBooks(self):\n result = self.db.listReturnBooks(1)\n self.assertTrue(result)\n\n def test_getReturnBook(self):\n result = self.db.getReturnBook(1, 1)\n self.assertTrue(result)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "# Reference: https://docs.python.org/2/library/unittest.html\nimport unittest\nimport sys\nsys.path.append('..')\nfrom database_utils import DatabaseUtils\n\nclass Test_DatabaseUtils(unittest.TestCase):\n def setUp(self):\n self.db=DatabaseUtils()\n \n def dataCount(self):\n with self.db.connection.cursor() as cursor:\n cursor.execute(\"select count(*) from LmsUser\")\n return cursor.fetchone()[0]\n\n def test_getUser(self):\n count = self.dataCount()\n try:\n trueResult=self.db.getUser(\"username\")\n print(\"Test passed\")\n except:\n print(\"Test failed\")\n\n def test_insertBookTransaction(self):\n testData=(1,1,\"2019-01-01\",\"abc\")\n result=self.db.insertBookTransaction(testData[0],testData[1],testData[2],testData[3])\n print(\"result: \",result)\n self.assertTrue(result)\n\n def test_updateBookStatus(self):\n testData=(1,\"anything\")\n result=self.db.updateBookStatus(testData[1],testData[0])\n self.assertFalse(result)\n\n def test_updateBookTransaction(self):\n testData=(1,\"anything\",\"2019-01-01\")\n result=self.db.updateBookTransaction(testData[0],testData[1],testData[2])\n self.assertFalse(result)\n \n def test_searchBooks(self):\n result=self.db.searchBooks(\"abc\")\n self.assertFalse(result)\n result=self.db.searchBooks(\"Harry\")\n self.assertTrue(result)\n \n def test_searchBooksAuthur(self):\n result=self.db.searchBooksAuthur(\"abc\")\n self.assertFalse(result)\n result=self.db.searchBooksAuthur(\"gavin\")\n self.assertTrue(result)\n \n def test_searchBooksISBN(self):\n result=self.db.searchBooksISBN(1)\n self.assertFalse(result)\n\n def test_listBooks(self):\n result=self.db.listBooks()\n self.assertTrue(result)\n\n def test_getBook(self):\n result=self.db.getBook(1)\n self.assertTrue(result)\n\n def test_getBookISBN(self):\n result=self.db.getBookISBN(1)\n self.assertFalse(result)\n\n def test_listReturnBooks(self):\n result=self.db.listReturnBooks(1)\n self.assertTrue(result)\n\n def test_getReturnBook(self):\n result=self.db.getReturnBook(1,1)\n self.assertTrue(result)\n\nif __name__ == \"__main__\":\n unittest.main()", "step-ids": [ 9, 10, 13, 17, 18 ] }
[ 9, 10, 13, 17, 18 ]
from django.apps import AppConfig class StonewallConfig(AppConfig): name = 'stonewall'
normal
{ "blob_id": "8364264851895ccabeb74fd3fab1d4f39da717f8", "index": 8398, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass StonewallConfig(AppConfig):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass StonewallConfig(AppConfig):\n name = 'stonewall'\n", "step-4": "from django.apps import AppConfig\n\n\nclass StonewallConfig(AppConfig):\n name = 'stonewall'\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Generated by Django 3.1.6 on 2021-07-17 10:35 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('shop', '0032_product_sex'), ] operations = [ migrations.AddField( model_name='product', name='price_ret_sale', field=models.IntegerField(default=0, verbose_name='Розничная цена, с учетом скидки'), ), migrations.AddField( model_name='product', name='size_5xl', field=models.IntegerField(default=0, verbose_name='5XL размер'), ), migrations.AddField( model_name='product', name='size_6xl', field=models.IntegerField(default=0, verbose_name='6XL размер'), ), migrations.AlterField( model_name='product', name='price_opt_2', field=models.IntegerField(default=0, verbose_name='- 3% от 30000'), ), migrations.AlterField( model_name='product', name='price_opt_3', field=models.IntegerField(default=0, verbose_name='- 7% от 70000'), ), migrations.AlterField( model_name='product', name='price_opt_4', field=models.IntegerField(default=0, verbose_name='- 11% от 110000'), ), migrations.AlterField( model_name='product', name='sex', field=models.CharField(choices=[('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), ('Унисекс', 'Unisex')], default='Мужское', max_length=10), ), ]
normal
{ "blob_id": "09660cfcff7d5da0339da201cb18b6f63bec2df9", "index": 1394, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop', '0032_product_sex')]\n operations = [migrations.AddField(model_name='product', name=\n 'price_ret_sale', field=models.IntegerField(default=0, verbose_name\n ='Розничная цена, с учетом скидки')), migrations.AddField(\n model_name='product', name='size_5xl', field=models.IntegerField(\n default=0, verbose_name='5XL размер')), migrations.AddField(\n model_name='product', name='size_6xl', field=models.IntegerField(\n default=0, verbose_name='6XL размер')), migrations.AlterField(\n model_name='product', name='price_opt_2', field=models.IntegerField\n (default=0, verbose_name='- 3% от 30000')), migrations.AlterField(\n model_name='product', name='price_opt_3', field=models.IntegerField\n (default=0, verbose_name='- 7% от 70000')), migrations.AlterField(\n model_name='product', name='price_opt_4', field=models.IntegerField\n (default=0, verbose_name='- 11% от 110000')), migrations.AlterField\n (model_name='product', name='sex', field=models.CharField(choices=[\n ('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), (\n 'Унисекс', 'Unisex')], default='Мужское', max_length=10))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop', '0032_product_sex')]\n operations = [migrations.AddField(model_name='product', name=\n 'price_ret_sale', field=models.IntegerField(default=0, verbose_name\n ='Розничная цена, с учетом скидки')), migrations.AddField(\n model_name='product', name='size_5xl', field=models.IntegerField(\n default=0, verbose_name='5XL размер')), migrations.AddField(\n model_name='product', name='size_6xl', field=models.IntegerField(\n default=0, verbose_name='6XL размер')), migrations.AlterField(\n model_name='product', name='price_opt_2', field=models.IntegerField\n (default=0, verbose_name='- 3% от 30000')), migrations.AlterField(\n model_name='product', name='price_opt_3', field=models.IntegerField\n (default=0, verbose_name='- 7% от 70000')), migrations.AlterField(\n model_name='product', name='price_opt_4', field=models.IntegerField\n (default=0, verbose_name='- 11% от 110000')), migrations.AlterField\n (model_name='product', name='sex', field=models.CharField(choices=[\n ('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), (\n 'Унисекс', 'Unisex')], default='Мужское', max_length=10))]\n", "step-5": "# Generated by Django 3.1.6 on 2021-07-17 10:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0032_product_sex'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='price_ret_sale',\n field=models.IntegerField(default=0, verbose_name='Розничная цена, с учетом скидки'),\n ),\n migrations.AddField(\n model_name='product',\n name='size_5xl',\n field=models.IntegerField(default=0, verbose_name='5XL размер'),\n ),\n migrations.AddField(\n model_name='product',\n name='size_6xl',\n field=models.IntegerField(default=0, verbose_name='6XL размер'),\n ),\n migrations.AlterField(\n model_name='product',\n name='price_opt_2',\n field=models.IntegerField(default=0, verbose_name='- 3% от 30000'),\n ),\n migrations.AlterField(\n model_name='product',\n name='price_opt_3',\n field=models.IntegerField(default=0, verbose_name='- 7% от 70000'),\n ),\n migrations.AlterField(\n model_name='product',\n name='price_opt_4',\n field=models.IntegerField(default=0, verbose_name='- 11% от 110000'),\n ),\n migrations.AlterField(\n model_name='product',\n name='sex',\n field=models.CharField(choices=[('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), ('Унисекс', 'Unisex')], default='Мужское', max_length=10),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import json import joblib import numpy as np import datetime import sqlalchemy as sa import cx_Oracle import pandas as pd from flask import Flask, render_template, session, request, redirect, url_for app = Flask(__name__) oracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe') @app.route("/") def index(): return render_template('index.html') @app.route("/survey", methods=['POST', 'GET']) def survey(): if request.method == 'GET': return render_template('survey.html') if request.method == 'POST': is_oversea = request.form['oversea'] gender = request.form['gender'] age = request.form['age'] income = request.form['income'] knowledge = request.form['knowledge'] exp = request.form['exp'] risk = request.form['risk'] term = request.form['term'] s1 = request.form['s1'] s2 = request.form['s2'] s3 = request.form['s3'] s4 = request.form['s4'] s5 = request.form['s5'] i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2, s3, s4, s5] i_list = list(map(int, i_list)) # str -> int score = sum(i_list) i_list.append(score) data = np.array(i_list).reshape(1, -1) clf = joblib.load('./models/rf_model.pkl') type_num = clf.predict(data) if type_num == 0: invest_type = "안정추구형" elif type_num == 1: invest_type = "안정형" elif type_num == 2: invest_type = "적극투자형" elif type_num == 3: invest_type = "공격투자형" else: invest_type = "위험중립형" return render_template('result.html', KEY_INVEST_TYPE=invest_type, IS_OVERSEA=is_oversea) @app.route("/portfolio", methods=['POST', 'GET']) def portfolio(): if request.method == 'POST': # 국내 portfolio0 = ['195930', '133690', '273130', '284430', '183700'] # 안정형 portfolio1 = ['195930', '133690', '239660', '284430', '183700'] # 안정추구형 portfolio2 = ['195930', '133690', '239660', '278620', '284430'] # 위험중립형 portfolio3 = ['195930', '278530', '133690', '239660', '284430'] # 적극투자형 portfolio4 = ['195930', '278530', '277630', '133690', '284430'] # 공격투자형 # 미국 portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ'] # 안정형 portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL'] # 안정추구형 portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK'] # 위험중립형 portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK'] # 적극투자형 portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK'] # 공격투자형 price = request.form['price'] invest_type = request.form['type'] risk_no = request.form['risk_no'] is_oversea = request.form['oversea'] db = "" if is_oversea == '0': # 해외 ETF db = "ETF_US" else: # 국내 ETF db = "ETF_KR" print(db) with oracle_engine.connect() as conn: try: sql = "select * from " + db + " where invest_type=:1" results = conn.execute(sql, (invest_type)).fetchall() name_list = [] # 상품명 risk_list = [] # 위험등급 weight_list = [] # 가중치 returns_1y = [] # 1년 수익률 returns_3y = [] # 3년 수익률 returns_5y = [] # 5년 수익률 for etf in results: name_list.append(etf[0]) risk_list.append(etf[2]) weight_list.append(etf[3]) returns_1y.append(etf[4]) returns_3y.append(etf[5]) returns_5y.append(etf[6]) # 투자성향 상품별 과거 수익률 데이터 가져오기 sql = "select * from RETURN" return_df = pd.read_sql(sql, conn) etf_list = [] return_list = {} date_list = {} if is_oversea == '0': # 해외 if invest_type == '안정형': portfolio_data = portfolio5 elif invest_type == '안정추구형': portfolio_data = portfolio6 elif invest_type == '위험중립형': portfolio_data = portfolio7 elif invest_type == '적극투자형': portfolio_data = portfolio8 else: portfolio_data = portfolio9 else: if invest_type == '안정형': portfolio_data = portfolio0 elif invest_type == '안정추구형': portfolio_data = portfolio1 elif invest_type == '위험중립형': portfolio_data = portfolio2 elif invest_type == '적극투자형': portfolio_data = portfolio3 else: portfolio_data = portfolio4 for i, ticker in enumerate(portfolio_data): name = return_df[return_df['ticker'] == ticker]['name'].unique().tolist()[0] if name not in etf_list: etf_list.append(name) return_list[i] = list(return_df[return_df['ticker'] == ticker]['return'].map(float).values) date_list[i] = list( return_df[return_df['ticker'] == ticker]['rdate'].dt.strftime('%Y-%m-%d').unique()) # 포트폴리오 수익률 데이터 가져오기 if is_oversea == '0': # 해외 sql = "select * from pf_us" pf_df = pd.read_sql(sql, conn) pf_df = pf_df[46:] else: # 국내 sql = "select * from pf_kr" pf_df = pd.read_sql(sql, conn) pf_df = pf_df[140:] pf_list = pf_df[invest_type].map(float).tolist() bt_data = [] for i, pf in enumerate(pf_list): bt_data.append({'x': i, 'y': pf}); except Exception as e: print(e) # 투자 등급 카운팅 (파이차트에 비중 나타내기 위해 사용) count_list = [0,0,0] for risk_type in risk_list: if risk_type == '위험': count_list[0] += 1 elif risk_type == '중립': count_list[1] += 1 else: count_list[2] += 1 return render_template('portfolio.html', KEY_PRICE=price, KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list, KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list, KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y, KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y, KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list, KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data) if __name__ == '__main__': app.run(debug=True)
normal
{ "blob_id": "74aa93bf3731d4e3ddb920bedc7daced50b4f2c3", "index": 1565, "step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/survey', methods=['POST', 'GET'])\ndef survey():\n if request.method == 'GET':\n return render_template('survey.html')\n if request.method == 'POST':\n is_oversea = request.form['oversea']\n gender = request.form['gender']\n age = request.form['age']\n income = request.form['income']\n knowledge = request.form['knowledge']\n exp = request.form['exp']\n risk = request.form['risk']\n term = request.form['term']\n s1 = request.form['s1']\n s2 = request.form['s2']\n s3 = request.form['s3']\n s4 = request.form['s4']\n s5 = request.form['s5']\n i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,\n s3, s4, s5]\n i_list = list(map(int, i_list))\n score = sum(i_list)\n i_list.append(score)\n data = np.array(i_list).reshape(1, -1)\n clf = joblib.load('./models/rf_model.pkl')\n type_num = clf.predict(data)\n if type_num == 0:\n invest_type = '안정추구형'\n elif type_num == 1:\n invest_type = '안정형'\n elif type_num == 2:\n invest_type = '적극투자형'\n elif type_num == 3:\n invest_type = '공격투자형'\n else:\n invest_type = '위험중립형'\n return render_template('result.html', KEY_INVEST_TYPE=invest_type,\n IS_OVERSEA=is_oversea)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/survey', methods=['POST', 'GET'])\ndef survey():\n if request.method == 'GET':\n return render_template('survey.html')\n if request.method == 'POST':\n is_oversea = request.form['oversea']\n gender = request.form['gender']\n age = request.form['age']\n income = request.form['income']\n knowledge = request.form['knowledge']\n exp = request.form['exp']\n risk = request.form['risk']\n term = request.form['term']\n s1 = request.form['s1']\n s2 = request.form['s2']\n s3 = request.form['s3']\n s4 = request.form['s4']\n s5 = request.form['s5']\n i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,\n s3, s4, s5]\n i_list = list(map(int, i_list))\n score = sum(i_list)\n i_list.append(score)\n data = np.array(i_list).reshape(1, -1)\n clf = joblib.load('./models/rf_model.pkl')\n type_num = clf.predict(data)\n if type_num == 0:\n invest_type = '안정추구형'\n elif type_num == 1:\n invest_type = '안정형'\n elif type_num == 2:\n invest_type = '적극투자형'\n elif type_num == 3:\n invest_type = '공격투자형'\n else:\n invest_type = '위험중립형'\n return render_template('result.html', KEY_INVEST_TYPE=invest_type,\n IS_OVERSEA=is_oversea)\n\n\[email protected]('/portfolio', methods=['POST', 'GET'])\ndef portfolio():\n if request.method == 'POST':\n portfolio0 = ['195930', '133690', '273130', '284430', '183700']\n portfolio1 = ['195930', '133690', '239660', '284430', '183700']\n portfolio2 = ['195930', '133690', '239660', '278620', '284430']\n portfolio3 = ['195930', '278530', '133690', '239660', '284430']\n portfolio4 = ['195930', '278530', '277630', '133690', '284430']\n portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']\n portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']\n portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']\n portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']\n portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']\n price = request.form['price']\n invest_type = request.form['type']\n risk_no = request.form['risk_no']\n is_oversea = request.form['oversea']\n db = ''\n if is_oversea == '0':\n db = 'ETF_US'\n else:\n db = 'ETF_KR'\n print(db)\n with oracle_engine.connect() as conn:\n try:\n sql = 'select * from ' + db + ' where invest_type=:1'\n results = conn.execute(sql, invest_type).fetchall()\n name_list = []\n risk_list = []\n weight_list = []\n returns_1y = []\n returns_3y = []\n returns_5y = []\n for etf in results:\n name_list.append(etf[0])\n risk_list.append(etf[2])\n weight_list.append(etf[3])\n returns_1y.append(etf[4])\n returns_3y.append(etf[5])\n returns_5y.append(etf[6])\n sql = 'select * from RETURN'\n return_df = pd.read_sql(sql, conn)\n etf_list = []\n return_list = {}\n date_list = {}\n if is_oversea == '0':\n if invest_type == '안정형':\n portfolio_data = portfolio5\n elif invest_type == '안정추구형':\n portfolio_data = portfolio6\n elif invest_type == '위험중립형':\n portfolio_data = portfolio7\n elif invest_type == '적극투자형':\n portfolio_data = portfolio8\n else:\n portfolio_data = portfolio9\n elif invest_type == '안정형':\n portfolio_data = portfolio0\n elif invest_type == '안정추구형':\n portfolio_data = portfolio1\n elif invest_type == '위험중립형':\n portfolio_data = portfolio2\n elif invest_type == '적극투자형':\n portfolio_data = portfolio3\n else:\n portfolio_data = portfolio4\n for i, ticker in enumerate(portfolio_data):\n name = return_df[return_df['ticker'] == ticker]['name'\n ].unique().tolist()[0]\n if name not in etf_list:\n etf_list.append(name)\n return_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['return'].map(float).values)\n date_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())\n if is_oversea == '0':\n sql = 'select * from pf_us'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[46:]\n else:\n sql = 'select * from pf_kr'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[140:]\n pf_list = pf_df[invest_type].map(float).tolist()\n bt_data = []\n for i, pf in enumerate(pf_list):\n bt_data.append({'x': i, 'y': pf})\n except Exception as e:\n print(e)\n count_list = [0, 0, 0]\n for risk_type in risk_list:\n if risk_type == '위험':\n count_list[0] += 1\n elif risk_type == '중립':\n count_list[1] += 1\n else:\n count_list[2] += 1\n return render_template('portfolio.html', KEY_PRICE=price,\n KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,\n KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,\n KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,\n KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,\n KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,\n KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-3": "<mask token>\napp = Flask(__name__)\noracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/survey', methods=['POST', 'GET'])\ndef survey():\n if request.method == 'GET':\n return render_template('survey.html')\n if request.method == 'POST':\n is_oversea = request.form['oversea']\n gender = request.form['gender']\n age = request.form['age']\n income = request.form['income']\n knowledge = request.form['knowledge']\n exp = request.form['exp']\n risk = request.form['risk']\n term = request.form['term']\n s1 = request.form['s1']\n s2 = request.form['s2']\n s3 = request.form['s3']\n s4 = request.form['s4']\n s5 = request.form['s5']\n i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,\n s3, s4, s5]\n i_list = list(map(int, i_list))\n score = sum(i_list)\n i_list.append(score)\n data = np.array(i_list).reshape(1, -1)\n clf = joblib.load('./models/rf_model.pkl')\n type_num = clf.predict(data)\n if type_num == 0:\n invest_type = '안정추구형'\n elif type_num == 1:\n invest_type = '안정형'\n elif type_num == 2:\n invest_type = '적극투자형'\n elif type_num == 3:\n invest_type = '공격투자형'\n else:\n invest_type = '위험중립형'\n return render_template('result.html', KEY_INVEST_TYPE=invest_type,\n IS_OVERSEA=is_oversea)\n\n\[email protected]('/portfolio', methods=['POST', 'GET'])\ndef portfolio():\n if request.method == 'POST':\n portfolio0 = ['195930', '133690', '273130', '284430', '183700']\n portfolio1 = ['195930', '133690', '239660', '284430', '183700']\n portfolio2 = ['195930', '133690', '239660', '278620', '284430']\n portfolio3 = ['195930', '278530', '133690', '239660', '284430']\n portfolio4 = ['195930', '278530', '277630', '133690', '284430']\n portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']\n portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']\n portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']\n portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']\n portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']\n price = request.form['price']\n invest_type = request.form['type']\n risk_no = request.form['risk_no']\n is_oversea = request.form['oversea']\n db = ''\n if is_oversea == '0':\n db = 'ETF_US'\n else:\n db = 'ETF_KR'\n print(db)\n with oracle_engine.connect() as conn:\n try:\n sql = 'select * from ' + db + ' where invest_type=:1'\n results = conn.execute(sql, invest_type).fetchall()\n name_list = []\n risk_list = []\n weight_list = []\n returns_1y = []\n returns_3y = []\n returns_5y = []\n for etf in results:\n name_list.append(etf[0])\n risk_list.append(etf[2])\n weight_list.append(etf[3])\n returns_1y.append(etf[4])\n returns_3y.append(etf[5])\n returns_5y.append(etf[6])\n sql = 'select * from RETURN'\n return_df = pd.read_sql(sql, conn)\n etf_list = []\n return_list = {}\n date_list = {}\n if is_oversea == '0':\n if invest_type == '안정형':\n portfolio_data = portfolio5\n elif invest_type == '안정추구형':\n portfolio_data = portfolio6\n elif invest_type == '위험중립형':\n portfolio_data = portfolio7\n elif invest_type == '적극투자형':\n portfolio_data = portfolio8\n else:\n portfolio_data = portfolio9\n elif invest_type == '안정형':\n portfolio_data = portfolio0\n elif invest_type == '안정추구형':\n portfolio_data = portfolio1\n elif invest_type == '위험중립형':\n portfolio_data = portfolio2\n elif invest_type == '적극투자형':\n portfolio_data = portfolio3\n else:\n portfolio_data = portfolio4\n for i, ticker in enumerate(portfolio_data):\n name = return_df[return_df['ticker'] == ticker]['name'\n ].unique().tolist()[0]\n if name not in etf_list:\n etf_list.append(name)\n return_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['return'].map(float).values)\n date_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())\n if is_oversea == '0':\n sql = 'select * from pf_us'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[46:]\n else:\n sql = 'select * from pf_kr'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[140:]\n pf_list = pf_df[invest_type].map(float).tolist()\n bt_data = []\n for i, pf in enumerate(pf_list):\n bt_data.append({'x': i, 'y': pf})\n except Exception as e:\n print(e)\n count_list = [0, 0, 0]\n for risk_type in risk_list:\n if risk_type == '위험':\n count_list[0] += 1\n elif risk_type == '중립':\n count_list[1] += 1\n else:\n count_list[2] += 1\n return render_template('portfolio.html', KEY_PRICE=price,\n KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,\n KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,\n KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,\n KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,\n KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,\n KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-4": "import json\nimport joblib\nimport numpy as np\nimport datetime\nimport sqlalchemy as sa\nimport cx_Oracle\nimport pandas as pd\nfrom flask import Flask, render_template, session, request, redirect, url_for\napp = Flask(__name__)\noracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/survey', methods=['POST', 'GET'])\ndef survey():\n if request.method == 'GET':\n return render_template('survey.html')\n if request.method == 'POST':\n is_oversea = request.form['oversea']\n gender = request.form['gender']\n age = request.form['age']\n income = request.form['income']\n knowledge = request.form['knowledge']\n exp = request.form['exp']\n risk = request.form['risk']\n term = request.form['term']\n s1 = request.form['s1']\n s2 = request.form['s2']\n s3 = request.form['s3']\n s4 = request.form['s4']\n s5 = request.form['s5']\n i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,\n s3, s4, s5]\n i_list = list(map(int, i_list))\n score = sum(i_list)\n i_list.append(score)\n data = np.array(i_list).reshape(1, -1)\n clf = joblib.load('./models/rf_model.pkl')\n type_num = clf.predict(data)\n if type_num == 0:\n invest_type = '안정추구형'\n elif type_num == 1:\n invest_type = '안정형'\n elif type_num == 2:\n invest_type = '적극투자형'\n elif type_num == 3:\n invest_type = '공격투자형'\n else:\n invest_type = '위험중립형'\n return render_template('result.html', KEY_INVEST_TYPE=invest_type,\n IS_OVERSEA=is_oversea)\n\n\[email protected]('/portfolio', methods=['POST', 'GET'])\ndef portfolio():\n if request.method == 'POST':\n portfolio0 = ['195930', '133690', '273130', '284430', '183700']\n portfolio1 = ['195930', '133690', '239660', '284430', '183700']\n portfolio2 = ['195930', '133690', '239660', '278620', '284430']\n portfolio3 = ['195930', '278530', '133690', '239660', '284430']\n portfolio4 = ['195930', '278530', '277630', '133690', '284430']\n portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']\n portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']\n portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']\n portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']\n portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']\n price = request.form['price']\n invest_type = request.form['type']\n risk_no = request.form['risk_no']\n is_oversea = request.form['oversea']\n db = ''\n if is_oversea == '0':\n db = 'ETF_US'\n else:\n db = 'ETF_KR'\n print(db)\n with oracle_engine.connect() as conn:\n try:\n sql = 'select * from ' + db + ' where invest_type=:1'\n results = conn.execute(sql, invest_type).fetchall()\n name_list = []\n risk_list = []\n weight_list = []\n returns_1y = []\n returns_3y = []\n returns_5y = []\n for etf in results:\n name_list.append(etf[0])\n risk_list.append(etf[2])\n weight_list.append(etf[3])\n returns_1y.append(etf[4])\n returns_3y.append(etf[5])\n returns_5y.append(etf[6])\n sql = 'select * from RETURN'\n return_df = pd.read_sql(sql, conn)\n etf_list = []\n return_list = {}\n date_list = {}\n if is_oversea == '0':\n if invest_type == '안정형':\n portfolio_data = portfolio5\n elif invest_type == '안정추구형':\n portfolio_data = portfolio6\n elif invest_type == '위험중립형':\n portfolio_data = portfolio7\n elif invest_type == '적극투자형':\n portfolio_data = portfolio8\n else:\n portfolio_data = portfolio9\n elif invest_type == '안정형':\n portfolio_data = portfolio0\n elif invest_type == '안정추구형':\n portfolio_data = portfolio1\n elif invest_type == '위험중립형':\n portfolio_data = portfolio2\n elif invest_type == '적극투자형':\n portfolio_data = portfolio3\n else:\n portfolio_data = portfolio4\n for i, ticker in enumerate(portfolio_data):\n name = return_df[return_df['ticker'] == ticker]['name'\n ].unique().tolist()[0]\n if name not in etf_list:\n etf_list.append(name)\n return_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['return'].map(float).values)\n date_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())\n if is_oversea == '0':\n sql = 'select * from pf_us'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[46:]\n else:\n sql = 'select * from pf_kr'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[140:]\n pf_list = pf_df[invest_type].map(float).tolist()\n bt_data = []\n for i, pf in enumerate(pf_list):\n bt_data.append({'x': i, 'y': pf})\n except Exception as e:\n print(e)\n count_list = [0, 0, 0]\n for risk_type in risk_list:\n if risk_type == '위험':\n count_list[0] += 1\n elif risk_type == '중립':\n count_list[1] += 1\n else:\n count_list[2] += 1\n return render_template('portfolio.html', KEY_PRICE=price,\n KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,\n KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,\n KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,\n KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,\n KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,\n KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-5": "import json\nimport joblib\nimport numpy as np\nimport datetime\nimport sqlalchemy as sa\nimport cx_Oracle\nimport pandas as pd\n\nfrom flask import Flask, render_template, session, request, redirect, url_for\n\napp = Flask(__name__)\noracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\[email protected](\"/survey\", methods=['POST', 'GET'])\ndef survey():\n if request.method == 'GET':\n return render_template('survey.html')\n\n if request.method == 'POST':\n is_oversea = request.form['oversea']\n\n gender = request.form['gender']\n age = request.form['age']\n income = request.form['income']\n knowledge = request.form['knowledge']\n exp = request.form['exp']\n risk = request.form['risk']\n term = request.form['term']\n s1 = request.form['s1']\n s2 = request.form['s2']\n s3 = request.form['s3']\n s4 = request.form['s4']\n s5 = request.form['s5']\n\n i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2, s3, s4, s5]\n i_list = list(map(int, i_list)) # str -> int\n score = sum(i_list)\n i_list.append(score)\n data = np.array(i_list).reshape(1, -1)\n\n clf = joblib.load('./models/rf_model.pkl')\n type_num = clf.predict(data)\n\n if type_num == 0:\n invest_type = \"안정추구형\"\n elif type_num == 1:\n invest_type = \"안정형\"\n elif type_num == 2:\n invest_type = \"적극투자형\"\n elif type_num == 3:\n invest_type = \"공격투자형\"\n else:\n invest_type = \"위험중립형\"\n\n return render_template('result.html', KEY_INVEST_TYPE=invest_type, IS_OVERSEA=is_oversea)\n\[email protected](\"/portfolio\", methods=['POST', 'GET'])\ndef portfolio():\n if request.method == 'POST':\n # 국내\n portfolio0 = ['195930', '133690', '273130', '284430', '183700'] # 안정형\n portfolio1 = ['195930', '133690', '239660', '284430', '183700'] # 안정추구형\n portfolio2 = ['195930', '133690', '239660', '278620', '284430'] # 위험중립형\n portfolio3 = ['195930', '278530', '133690', '239660', '284430'] # 적극투자형\n portfolio4 = ['195930', '278530', '277630', '133690', '284430'] # 공격투자형\n\n # 미국\n portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ'] # 안정형\n portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL'] # 안정추구형\n portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK'] # 위험중립형\n portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK'] # 적극투자형\n portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK'] # 공격투자형\n\n price = request.form['price']\n invest_type = request.form['type']\n risk_no = request.form['risk_no']\n is_oversea = request.form['oversea']\n\n db = \"\"\n\n if is_oversea == '0': # 해외 ETF\n db = \"ETF_US\"\n else: # 국내 ETF\n db = \"ETF_KR\"\n\n print(db)\n\n with oracle_engine.connect() as conn:\n try:\n sql = \"select * from \" + db + \" where invest_type=:1\"\n results = conn.execute(sql, (invest_type)).fetchall()\n\n name_list = [] # 상품명\n risk_list = [] # 위험등급\n weight_list = [] # 가중치\n returns_1y = [] # 1년 수익률\n returns_3y = [] # 3년 수익률\n returns_5y = [] # 5년 수익률\n\n for etf in results:\n name_list.append(etf[0])\n risk_list.append(etf[2])\n weight_list.append(etf[3])\n returns_1y.append(etf[4])\n returns_3y.append(etf[5])\n returns_5y.append(etf[6])\n\n # 투자성향 상품별 과거 수익률 데이터 가져오기\n sql = \"select * from RETURN\"\n return_df = pd.read_sql(sql, conn)\n\n etf_list = []\n return_list = {}\n date_list = {}\n\n if is_oversea == '0': # 해외\n if invest_type == '안정형':\n portfolio_data = portfolio5\n elif invest_type == '안정추구형':\n portfolio_data = portfolio6\n elif invest_type == '위험중립형':\n portfolio_data = portfolio7\n elif invest_type == '적극투자형':\n portfolio_data = portfolio8\n else:\n portfolio_data = portfolio9\n else:\n if invest_type == '안정형':\n portfolio_data = portfolio0\n elif invest_type == '안정추구형':\n portfolio_data = portfolio1\n elif invest_type == '위험중립형':\n portfolio_data = portfolio2\n elif invest_type == '적극투자형':\n portfolio_data = portfolio3\n else:\n portfolio_data = portfolio4\n\n for i, ticker in enumerate(portfolio_data):\n name = return_df[return_df['ticker'] == ticker]['name'].unique().tolist()[0]\n if name not in etf_list:\n etf_list.append(name)\n\n return_list[i] = list(return_df[return_df['ticker'] == ticker]['return'].map(float).values)\n date_list[i] = list(\n return_df[return_df['ticker'] == ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())\n\n # 포트폴리오 수익률 데이터 가져오기\n if is_oversea == '0': # 해외\n sql = \"select * from pf_us\"\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[46:]\n else: # 국내\n sql = \"select * from pf_kr\"\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[140:]\n\n pf_list = pf_df[invest_type].map(float).tolist()\n\n bt_data = []\n for i, pf in enumerate(pf_list):\n bt_data.append({'x': i, 'y': pf});\n\n except Exception as e:\n print(e)\n\n # 투자 등급 카운팅 (파이차트에 비중 나타내기 위해 사용)\n count_list = [0,0,0]\n\n for risk_type in risk_list:\n if risk_type == '위험':\n count_list[0] += 1\n elif risk_type == '중립':\n count_list[1] += 1\n else:\n count_list[2] += 1\n\n return render_template('portfolio.html', KEY_PRICE=price, KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,\n KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list, KEY_COUNT_LIST=count_list,\n KEY_RETURN_1Y=returns_1y, KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,\n KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list, KEY_DATE_LIST=date_list,\n KEY_BACKTESTING=bt_data)\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
# #1 # def bi_search(l, r, arr, x): # # Code Here # if(l == r): # return arr[r] == x # mid = (l + r)//2 + 1 # if(arr[mid] > x): # return bi_search(l,mid-1,arr,x) # else: # return bi_search(mid,r,arr,x) # inp = input('Enter Input : ').split('/') # arr, k = list(map(int, inp[0].split())), int(inp[1]) # print(bi_search(0, len(arr) - 1, sorted(arr), k)) # #2 # def bi_search(l, r, arr, x): # if(l == r): # if arr[l] > x : # return arr[l] # else: # return None # mid = (l + r)//2 + 1 # res = None # if(arr[mid] > x): # res = bi_search(l,mid-1,arr,x) # else: # res = bi_search(mid,r,arr,x) # return res if res else (arr[mid] if arr[mid] > x else None) # inp = input('Enter Input : ').split('/') # arr, arr2 = sorted(list(map(int, inp[0].split()))), list(map(int, inp[1].split())) # for k in arr2: # res = bi_search(0, len(arr) - 1, arr, k) # print(res if res else "No First Greater Value") #3 # class Data: # def __init__(self, key, value): # self.key = key # self.value = value # def __str__(self): # return "({0}, {1})".format(self.key, self.value) # class hash: # def __init__(self,max,chain): # self.data = [None for i in range(max)] # self.limit= max # self.chain= chain # self.length = 0 # def code(self,a): # return sum([ord(i) for i in a]) # def isFull(self): # return self.length == self.limit # def insert(self,value): # key,val = value.split(" ") # s = self.code(key) # co = 0 # now = 0 # while(co <= self.chain): # if(co != 0): # print ("collision number",co,"at",now) # if(co == self.chain): # break # now = (s + (0 if not co else co*co) ) % self.limit # if(self.data[now] == None): # self.data[now] = Data(key,val) # self.length += 1 # break # co += 1 # if(co >= self.chain): # print("Max of collisionChain") # def __str__(self): # return "\n".join(list(map(str,[ "#{0} {1}".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + "\n---------------------------" # print(" ***** Fun with hashing *****") # val,arr = input("Enter Input : ").split("/") # h = hash(int(val.split(" ")[0]),int(val.split(" ")[1])) # arr = arr.split(",") # for i in arr: # h.insert(i) # print(h) # if(h.isFull()): # print("This table is full !!!!!!") # break #4 # import math # class Data: # def __init__(self, value): # self.value = value # def __str__(self): # return str(self.value) # class hash: # def __init__(self,max,chain,t): # self.data = [None for i in range(max)] # self.limit = max # self.chain = chain # self.length = 0 # self.threshold = t # self.bu = list() # def code(self,a): # # return sum([ord(i) for i in a]) # return int(a) # def isFull(self): # return self.length == self.limit # def findNearPrime(self): # i = self.limit * 2 # while(True): # c = True # for j in range(2, int(math.sqrt(i)) + 1): # if(not i % j): # i += 1 # c = False # break # if c : # break # return i # def handlerIllegal(self,co,value): # if(self.length * 100 // self.limit >= self.threshold): # print("****** Data over threshold - Rehash !!! ******") # self.resize() # self.Rehash() # elif (co >= self.chain): # print("****** Max collision - Rehash !!! ******") # self.resize() # self.Rehash() # def resize(self): # self.data += [None for i in range(self.findNearPrime() - self.limit)] # self.limit = len(self.data) # def Rehash(self): # for i in range(self.limit): # self.data[i] = None # for i in self.bu: # self.insert(i,False) # def insert(self,value,Rehash = True): # s = self.code(value) # co = 0 # now = 0 # while(co <= self.chain): # if(co != 0): # print ("collision number",co,"at",now) # if(co == self.chain): # break # now = (s + (0 if not co else co*co) ) % self.limit # if(self.data[now] == None): # self.data[now] = Data(value) # if(Rehash): # self.length += 1 # break # co += 1 # if(Rehash): # self.handlerIllegal(co,value) # def addBuff(self,value): # self.bu.append(value) # def __str__(self): # return "\n".join(list(map(str,[ "#{0} {1}".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + "\n----------------------------------------" # print(" ***** Rehashing *****") # val,arr = input("Enter Input : ").split("/") # h = hash(int(val.split(" ")[0]),int(val.split(" ")[1]),int(val.split(" ")[2])) # arr = arr.split() # print("Initial Table :",h,sep="\n") # for i in arr: # print("Add :",i) # h.addBuff(i) # h.insert(i) # print(h) # if(h.isFull()): # print("This table is full !!!!!!") # break # 5 boxes = 0 ans = -1 def solve(dpArr,list,box,i): global boxes global ans if(box == boxes): s = 0 for j in list: s += len(j) if(s == len(dpArr)): mx = 0 for j in list: if(sum(j) > mx): mx = sum(j) if(mx < ans or ans == -1): ans = mx return for j in range(1,len(dpArr) + 1): if ( i + j > len(dpArr) ): break solve(dpArr,list + [dpArr[i:i + j]],box + 1 ,i + j) inp = input("Enter Input : ") inp,boxes = list(map(int,inp.split("/")[0].split() )) , int( inp.split("/")[1]) # for i in range(1,len(inp)): # inp[i] += inp[i-1] solve(dpArr = inp,list = [],box = 0,i = 0) print("Minimum weigth for",boxes,"box(es) =",ans)
normal
{ "blob_id": "883b4de18dddede97f850e3a184a0e1072bda99e", "index": 814, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef solve(dpArr, list, box, i):\n global boxes\n global ans\n if box == boxes:\n s = 0\n for j in list:\n s += len(j)\n if s == len(dpArr):\n mx = 0\n for j in list:\n if sum(j) > mx:\n mx = sum(j)\n if mx < ans or ans == -1:\n ans = mx\n return\n for j in range(1, len(dpArr) + 1):\n if i + j > len(dpArr):\n break\n solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef solve(dpArr, list, box, i):\n global boxes\n global ans\n if box == boxes:\n s = 0\n for j in list:\n s += len(j)\n if s == len(dpArr):\n mx = 0\n for j in list:\n if sum(j) > mx:\n mx = sum(j)\n if mx < ans or ans == -1:\n ans = mx\n return\n for j in range(1, len(dpArr) + 1):\n if i + j > len(dpArr):\n break\n solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)\n\n\n<mask token>\nsolve(dpArr=inp, list=[], box=0, i=0)\nprint('Minimum weigth for', boxes, 'box(es) =', ans)\n", "step-4": "boxes = 0\nans = -1\n\n\ndef solve(dpArr, list, box, i):\n global boxes\n global ans\n if box == boxes:\n s = 0\n for j in list:\n s += len(j)\n if s == len(dpArr):\n mx = 0\n for j in list:\n if sum(j) > mx:\n mx = sum(j)\n if mx < ans or ans == -1:\n ans = mx\n return\n for j in range(1, len(dpArr) + 1):\n if i + j > len(dpArr):\n break\n solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)\n\n\ninp = input('Enter Input : ')\ninp, boxes = list(map(int, inp.split('/')[0].split())), int(inp.split('/')[1])\nsolve(dpArr=inp, list=[], box=0, i=0)\nprint('Minimum weigth for', boxes, 'box(es) =', ans)\n", "step-5": "# #1\n# def bi_search(l, r, arr, x):\n# # Code Here\n# if(l == r):\n# return arr[r] == x\n \n# mid = (l + r)//2 + 1\n# if(arr[mid] > x):\n# return bi_search(l,mid-1,arr,x)\n# else:\n# return bi_search(mid,r,arr,x)\n\n# inp = input('Enter Input : ').split('/')\n# arr, k = list(map(int, inp[0].split())), int(inp[1])\n# print(bi_search(0, len(arr) - 1, sorted(arr), k))\n\n# #2\n# def bi_search(l, r, arr, x):\n# if(l == r):\n# if arr[l] > x :\n# return arr[l]\n# else: \n# return None\n\n# mid = (l + r)//2 + 1\n# res = None\n# if(arr[mid] > x):\n# res = bi_search(l,mid-1,arr,x)\n# else:\n# res = bi_search(mid,r,arr,x)\n# return res if res else (arr[mid] if arr[mid] > x else None)\n\n\n# inp = input('Enter Input : ').split('/')\n# arr, arr2 = sorted(list(map(int, inp[0].split()))), list(map(int, inp[1].split()))\n# for k in arr2:\n# res = bi_search(0, len(arr) - 1, arr, k) \n# print(res if res else \"No First Greater Value\")\n\n#3\n# class Data:\n# def __init__(self, key, value):\n# self.key = key\n# self.value = value\n\n# def __str__(self):\n# return \"({0}, {1})\".format(self.key, self.value)\n\n# class hash:\n\n# def __init__(self,max,chain):\n# self.data = [None for i in range(max)]\n# self.limit= max\n# self.chain= chain\n# self.length = 0\n\n# def code(self,a):\n# return sum([ord(i) for i in a]) \n\n# def isFull(self):\n# return self.length == self.limit\n\n# def insert(self,value):\n# key,val = value.split(\" \")\n# s = self.code(key)\n# co = 0\n# now = 0\n# while(co <= self.chain):\n# if(co != 0):\n# print (\"collision number\",co,\"at\",now)\n# if(co == self.chain):\n# break\n# now = (s + (0 if not co else co*co) ) % self.limit \n \n\n# if(self.data[now] == None):\n# self.data[now] = Data(key,val)\n# self.length += 1\n# break\n# co += 1\n\n# if(co >= self.chain):\n# print(\"Max of collisionChain\")\n\n\n# def __str__(self):\n# return \"\\n\".join(list(map(str,[ \"#{0}\t{1}\".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + \"\\n---------------------------\"\n\n\n# print(\" ***** Fun with hashing *****\")\n\n# val,arr = input(\"Enter Input : \").split(\"/\")\n\n# h = hash(int(val.split(\" \")[0]),int(val.split(\" \")[1]))\n\n# arr = arr.split(\",\")\n\n# for i in arr:\n# h.insert(i)\n# print(h)\n# if(h.isFull()):\n# print(\"This table is full !!!!!!\")\n# break\n\n\n#4\n# import math\n# class Data:\n# def __init__(self, value):\n# self.value = value\n\n# def __str__(self):\n# return str(self.value)\n\n# class hash:\n\n# def __init__(self,max,chain,t):\n# self.data = [None for i in range(max)]\n# self.limit = max\n# self.chain = chain\n# self.length = 0\n# self.threshold = t\n# self.bu = list()\n\n# def code(self,a):\n# # return sum([ord(i) for i in a]) \n# return int(a)\n\n# def isFull(self):\n# return self.length == self.limit\n\n# def findNearPrime(self):\n# i = self.limit * 2\n# while(True):\n# c = True\n# for j in range(2, int(math.sqrt(i)) + 1):\n# if(not i % j):\n# i += 1\n# c = False\n# break\n# if c :\n# break\n\n# return i\n\n# def handlerIllegal(self,co,value):\n# if(self.length * 100 // self.limit >= self.threshold):\n# print(\"****** Data over threshold - Rehash !!! ******\")\n# self.resize()\n# self.Rehash()\n# elif (co >= self.chain):\n# print(\"****** Max collision - Rehash !!! ******\")\n# self.resize()\n# self.Rehash()\n\n# def resize(self):\n# self.data += [None for i in range(self.findNearPrime() - self.limit)]\n# self.limit = len(self.data)\n\n# def Rehash(self):\n# for i in range(self.limit):\n# self.data[i] = None\n# for i in self.bu:\n# self.insert(i,False)\n\n# def insert(self,value,Rehash = True):\n# s = self.code(value)\n# co = 0\n# now = 0\n# while(co <= self.chain):\n# if(co != 0):\n# print (\"collision number\",co,\"at\",now)\n# if(co == self.chain):\n# break\n# now = (s + (0 if not co else co*co) ) % self.limit \n\n# if(self.data[now] == None):\n# self.data[now] = Data(value)\n# if(Rehash):\n# self.length += 1\n# break\n# co += 1\n\n# if(Rehash):\n# self.handlerIllegal(co,value)\n\n# def addBuff(self,value):\n# self.bu.append(value)\n\n# def __str__(self):\n# return \"\\n\".join(list(map(str,[ \"#{0}\t{1}\".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + \"\\n----------------------------------------\"\n\n\n# print(\" ***** Rehashing *****\")\n\n# val,arr = input(\"Enter Input : \").split(\"/\")\n\n# h = hash(int(val.split(\" \")[0]),int(val.split(\" \")[1]),int(val.split(\" \")[2]))\n\n# arr = arr.split()\n\n# print(\"Initial Table :\",h,sep=\"\\n\")\n\n# for i in arr:\n# print(\"Add :\",i)\n# h.addBuff(i)\n# h.insert(i)\n# print(h)\n# if(h.isFull()):\n# print(\"This table is full !!!!!!\")\n# break\n\n\n# 5\nboxes = 0\nans = -1\ndef solve(dpArr,list,box,i):\n global boxes \n global ans\n if(box == boxes):\n s = 0\n for j in list:\n s += len(j)\n \n if(s == len(dpArr)):\n mx = 0\n for j in list:\n if(sum(j) > mx):\n mx = sum(j)\n\n if(mx < ans or ans == -1):\n ans = mx \n return\n\n for j in range(1,len(dpArr) + 1):\n if ( i + j > len(dpArr) ):\n break\n solve(dpArr,list + [dpArr[i:i + j]],box + 1 ,i + j)\n\n\ninp = input(\"Enter Input : \")\n\ninp,boxes = list(map(int,inp.split(\"/\")[0].split() )) , int( inp.split(\"/\")[1])\n\n# for i in range(1,len(inp)):\n# inp[i] += inp[i-1]\n\nsolve(dpArr = inp,list = [],box = 0,i = 0)\nprint(\"Minimum weigth for\",boxes,\"box(es) =\",ans)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
''' Aluno: Lucas Airam Castro de Souza Resumo: Programa para calcular a raiz com a precisão n de casas decimais def raiz(numero, casas_decimais=0): if ((numero == 0) or (numero == 1)): return "O resultado eh: " + str(numero) elif (numero<0): return "A raiz nao existe no conjunto real" else: resultado = [0] if (casas_decimais == 0): while True: if ((resultado[0]+1)**2<numero): resultado[0]+=1 else: return resultado[0] else: for cont in range(casas_decimais): resultado+=[0] primeiro_numero=0 while True: if ((primeiro_numero+1)**2<numero): primeiro_numero+=1 else: resultado[0]=str(primeiro_numero) casas_corretas = 1 while (casas_corretas < len(resultado)): cont1=0 while ((cont1 < 10) and (cont1 < len(resultado))): numero_parcial = "" print resultado for cont2 in range(casas_corretas): numero_parcial+=str(resultado[cont2]) print resultado print resultado [1] print resultado[cont2] if ((int(numero_parcial)+1)**2<numero): cont3 = int(resultado[casas_corretas])+1 resultado[casas_corretas]=str(cont3) else: resultado[casas_corretas]=str(cont1) casas_corretas+=1 cont1+=1 resultado_final = "" for cont4 in range(casas_corretas): resultado_final+=resultado[cont4] return int(resultado_final) ''' def raiz(numero): casas_decimais=18 if ((numero == 0) or (numero == 1)): return "O resultado eh: " + str(numero) elif (numero<0): return "A raiz nao existe no conjunto real" else: posicao = 0 casa_decimal = 10**posicao resultado_parcial = 0.0 while (-posicao != casas_decimais+1): # print 'resultado: ' +str(resultado_parcial) # print 'casa decimal: ' +str(casa_decimal) # print 'posicao: ' +str(posicao) if ((resultado_parcial+casa_decimal+(casa_decimal*1))**2 < numero): casa_decimal+=(10**posicao)*1 resultado_parcial+=casa_decimal else: posicao-=1 casa_decimal=10**posicao return resultado_parcial
normal
{ "blob_id": "5c174dd514d0a7d9aa932fcb436f22d9a44d2327", "index": 1486, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef raiz(numero):\n casas_decimais = 18\n if numero == 0 or numero == 1:\n return 'O resultado eh: ' + str(numero)\n elif numero < 0:\n return 'A raiz nao existe no conjunto real'\n else:\n posicao = 0\n casa_decimal = 10 ** posicao\n resultado_parcial = 0.0\n while -posicao != casas_decimais + 1:\n if (resultado_parcial + casa_decimal + casa_decimal * 1\n ) ** 2 < numero:\n casa_decimal += 10 ** posicao * 1\n resultado_parcial += casa_decimal\n else:\n posicao -= 1\n casa_decimal = 10 ** posicao\n return resultado_parcial\n", "step-3": "'''\r\n\r\nAluno: Lucas Airam Castro de Souza\r\nResumo: Programa para calcular a raiz com a precisão n de casas decimais\r\n\r\n\r\ndef raiz(numero, casas_decimais=0):\r\n if ((numero == 0) or (numero == 1)):\r\n return \"O resultado eh: \" + str(numero)\r\n elif (numero<0):\r\n return \"A raiz nao existe no conjunto real\"\r\n else: \r\n resultado = [0]\r\n if (casas_decimais == 0):\r\n while True:\r\n if ((resultado[0]+1)**2<numero):\r\n resultado[0]+=1\r\n else:\r\n return resultado[0]\r\n else:\r\n for cont in range(casas_decimais):\r\n resultado+=[0]\r\n primeiro_numero=0\r\n while True:\r\n if ((primeiro_numero+1)**2<numero):\r\n primeiro_numero+=1\r\n else:\r\n resultado[0]=str(primeiro_numero)\r\n casas_corretas = 1\r\n while (casas_corretas < len(resultado)): \r\n cont1=0\r\n while ((cont1 < 10) and (cont1 < len(resultado))):\r\n numero_parcial = \"\"\r\n print resultado\r\n for cont2 in range(casas_corretas):\r\n numero_parcial+=str(resultado[cont2])\r\n print resultado\r\n print resultado [1]\r\n print resultado[cont2]\r\n if ((int(numero_parcial)+1)**2<numero):\r\n cont3 = int(resultado[casas_corretas])+1\r\n resultado[casas_corretas]=str(cont3)\r\n else:\r\n resultado[casas_corretas]=str(cont1)\r\n casas_corretas+=1\r\n cont1+=1\r\n resultado_final = \"\"\r\n for cont4 in range(casas_corretas):\r\n resultado_final+=resultado[cont4] \r\n return int(resultado_final)\r\n \r\n \r\n'''\r\ndef raiz(numero):\r\n casas_decimais=18\r\n if ((numero == 0) or (numero == 1)):\r\n return \"O resultado eh: \" + str(numero)\r\n elif (numero<0):\r\n return \"A raiz nao existe no conjunto real\"\r\n else:\r\n posicao = 0\r\n casa_decimal = 10**posicao\r\n resultado_parcial = 0.0\r\n while (-posicao != casas_decimais+1):\r\n # print 'resultado: ' +str(resultado_parcial)\r\n # print 'casa decimal: ' +str(casa_decimal)\r\n # print 'posicao: ' +str(posicao)\r\n if ((resultado_parcial+casa_decimal+(casa_decimal*1))**2 < numero):\r\n casa_decimal+=(10**posicao)*1\r\n resultado_parcial+=casa_decimal\r\n else:\r\n posicao-=1\r\n casa_decimal=10**posicao\r\n return resultado_parcial\r\n \r\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from django.db import models from django.utils.translation import ugettext_lazy as _ class Especialidade(models.Model): def __str__(self): return self.nome # add unique=True? nome = models.CharField(max_length=200, verbose_name=_('Especialidade'), unique=True, blank=False, null=False)
normal
{ "blob_id": "9cc672702d960088f0230cbd1694b295216d8b5a", "index": 4617, "step-1": "<mask token>\n\n\nclass Especialidade(models.Model):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Especialidade(models.Model):\n\n def __str__(self):\n return self.nome\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Especialidade(models.Model):\n\n def __str__(self):\n return self.nome\n nome = models.CharField(max_length=200, verbose_name=_('Especialidade'),\n unique=True, blank=False, null=False)\n", "step-4": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Especialidade(models.Model):\n\n def __str__(self):\n return self.nome\n nome = models.CharField(max_length=200, verbose_name=_('Especialidade'),\n unique=True, blank=False, null=False)\n", "step-5": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Especialidade(models.Model):\n def __str__(self):\n return self.nome\n\n # add unique=True?\n nome = models.CharField(max_length=200, verbose_name=_('Especialidade'), unique=True, blank=False, null=False)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import time import threading lock_a = threading.Lock() lock_b = threading.Lock() def task1(): print('Task 1 is starting...') print('Task 1 is waiting to acquire Lock A') with lock_a: print('Task 1 has acquired Lock A') print('Task 1 is doing some calculations') time.sleep(2) print('Task 1 is waiting to acquire Lock B') with lock_b: print('Task 1 has acquired Lock B') print('Task 1 is doing some calculations') time.sleep(2) print('Task 1 is releasing both locks') def task2(): print('Task 2 is starting...') print('Task 2 is waiting to acquire Lock B') with lock_b: print('Task 2 has acquired Lock B') print('Task 2 is doing some calculations') time.sleep(5) print('Task 2 is waiting to acquire Lock A') with lock_a: print('Task 2 has acquired Lock A') print('Task 2 is doing some calculations') time.sleep(5) print('Task 2 is releasing both locks') if __name__ == '__main__': t1 = threading.Thread(target=task1) t2 = threading.Thread(target=task2) t1.start() t2.start() t1.join() t2.join()
normal
{ "blob_id": "c7d8a67587a6ca01c23ed922faabbaca8bbaf337", "index": 6307, "step-1": "<mask token>\n\n\ndef task1():\n print('Task 1 is starting...')\n print('Task 1 is waiting to acquire Lock A')\n with lock_a:\n print('Task 1 has acquired Lock A')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is waiting to acquire Lock B')\n with lock_b:\n print('Task 1 has acquired Lock B')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is releasing both locks')\n\n\ndef task2():\n print('Task 2 is starting...')\n print('Task 2 is waiting to acquire Lock B')\n with lock_b:\n print('Task 2 has acquired Lock B')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is waiting to acquire Lock A')\n with lock_a:\n print('Task 2 has acquired Lock A')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is releasing both locks')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef task1():\n print('Task 1 is starting...')\n print('Task 1 is waiting to acquire Lock A')\n with lock_a:\n print('Task 1 has acquired Lock A')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is waiting to acquire Lock B')\n with lock_b:\n print('Task 1 has acquired Lock B')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is releasing both locks')\n\n\ndef task2():\n print('Task 2 is starting...')\n print('Task 2 is waiting to acquire Lock B')\n with lock_b:\n print('Task 2 has acquired Lock B')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is waiting to acquire Lock A')\n with lock_a:\n print('Task 2 has acquired Lock A')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is releasing both locks')\n\n\nif __name__ == '__main__':\n t1 = threading.Thread(target=task1)\n t2 = threading.Thread(target=task2)\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n", "step-3": "<mask token>\nlock_a = threading.Lock()\nlock_b = threading.Lock()\n\n\ndef task1():\n print('Task 1 is starting...')\n print('Task 1 is waiting to acquire Lock A')\n with lock_a:\n print('Task 1 has acquired Lock A')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is waiting to acquire Lock B')\n with lock_b:\n print('Task 1 has acquired Lock B')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is releasing both locks')\n\n\ndef task2():\n print('Task 2 is starting...')\n print('Task 2 is waiting to acquire Lock B')\n with lock_b:\n print('Task 2 has acquired Lock B')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is waiting to acquire Lock A')\n with lock_a:\n print('Task 2 has acquired Lock A')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is releasing both locks')\n\n\nif __name__ == '__main__':\n t1 = threading.Thread(target=task1)\n t2 = threading.Thread(target=task2)\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n", "step-4": "import time\nimport threading\nlock_a = threading.Lock()\nlock_b = threading.Lock()\n\n\ndef task1():\n print('Task 1 is starting...')\n print('Task 1 is waiting to acquire Lock A')\n with lock_a:\n print('Task 1 has acquired Lock A')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is waiting to acquire Lock B')\n with lock_b:\n print('Task 1 has acquired Lock B')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is releasing both locks')\n\n\ndef task2():\n print('Task 2 is starting...')\n print('Task 2 is waiting to acquire Lock B')\n with lock_b:\n print('Task 2 has acquired Lock B')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is waiting to acquire Lock A')\n with lock_a:\n print('Task 2 has acquired Lock A')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is releasing both locks')\n\n\nif __name__ == '__main__':\n t1 = threading.Thread(target=task1)\n t2 = threading.Thread(target=task2)\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
# TUPLE IMUTAVEL # GERALMENTE HETEORGENEA # tupla com 1 ou 0 elementos # # empty = () # singleton = 'breno', # print(type(empty)) # print(singleton) # tuplas podem ser aninhadas # t = 12345, 54321, 'hello!' # u = t, (1, 2, 3, 4, 5) #imutaveis # t[0] = 88888
normal
{ "blob_id": "34e902fbced13629657494eedfe385d3b5ae3f55", "index": 2489, "step-1": "# TUPLE IMUTAVEL\n# GERALMENTE HETEORGENEA\n\n# tupla com 1 ou 0 elementos\n#\n# empty = ()\n# singleton = 'breno',\n# print(type(empty))\n# print(singleton)\n\n# tuplas podem ser aninhadas\n# t = 12345, 54321, 'hello!'\n# u = t, (1, 2, 3, 4, 5)\n\n#imutaveis\n# t[0] = 88888", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 1 ] }
[ 1 ]
# __author__: Stanley # date: 2018/10/22 class Foo: def __init__(self, name, age): self.name = name self.age = age def __getitem__(self, item): return item + 10 def __setitem__(self, key, value): print(key, value) def __delitem__(self, key): print(key) obj = Foo("stnley", 25) # 自动执行obj对象的类中的__getitem__方法。555当作参数传递 result = obj[555] print(result) obj[111] = 444 del obj[222]
normal
{ "blob_id": "d4b9403366a16dfbb12a2161a996e641b3a785a5", "index": 8027, "step-1": "class Foo:\n <mask token>\n <mask token>\n\n def __setitem__(self, key, value):\n print(key, value)\n <mask token>\n\n\n<mask token>\n", "step-2": "class Foo:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n <mask token>\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\n<mask token>\n", "step-3": "class Foo:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __getitem__(self, item):\n return item + 10\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\n<mask token>\nprint(result)\n<mask token>\ndel obj[222]\n", "step-4": "class Foo:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __getitem__(self, item):\n return item + 10\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\nobj = Foo('stnley', 25)\nresult = obj[555]\nprint(result)\nobj[111] = 444\ndel obj[222]\n", "step-5": "# __author__: Stanley\n# date: 2018/10/22\n\nclass Foo:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __getitem__(self, item):\n return item + 10\n\n def __setitem__(self, key, value):\n print(key, value)\n\n def __delitem__(self, key):\n print(key)\n\n\nobj = Foo(\"stnley\", 25)\n# 自动执行obj对象的类中的__getitem__方法。555当作参数传递\nresult = obj[555]\nprint(result)\nobj[111] = 444\ndel obj[222]\n\n", "step-ids": [ 2, 4, 6, 7, 8 ] }
[ 2, 4, 6, 7, 8 ]
times = np.linspace(0.0, 10.0, 100) result = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(), sigmay()]) fig, ax = plt.subplots() ax.plot(times, result.expect[0]) # doctest: +SKIP ax.plot(times, result.expect[1]) # doctest: +SKIP ax.set_xlabel('Time') # doctest: +SKIP ax.set_ylabel('Expectation values') # doctest: +SKIP ax.legend(("Sigma-Z", "Sigma-Y")) # doctest: +SKIP plt.show() # doctest: +SKIP
normal
{ "blob_id": "8474205d49aef2d18755fc1a25a82718962f4120", "index": 6912, "step-1": "<mask token>\n", "step-2": "<mask token>\nax.plot(times, result.expect[0])\nax.plot(times, result.expect[1])\nax.set_xlabel('Time')\nax.set_ylabel('Expectation values')\nax.legend(('Sigma-Z', 'Sigma-Y'))\nplt.show()\n", "step-3": "times = np.linspace(0.0, 10.0, 100)\nresult = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(),\n sigmay()])\nfig, ax = plt.subplots()\nax.plot(times, result.expect[0])\nax.plot(times, result.expect[1])\nax.set_xlabel('Time')\nax.set_ylabel('Expectation values')\nax.legend(('Sigma-Z', 'Sigma-Y'))\nplt.show()\n", "step-4": "times = np.linspace(0.0, 10.0, 100)\nresult = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(), sigmay()])\nfig, ax = plt.subplots()\nax.plot(times, result.expect[0]) # doctest: +SKIP\nax.plot(times, result.expect[1]) # doctest: +SKIP\nax.set_xlabel('Time') # doctest: +SKIP\nax.set_ylabel('Expectation values') # doctest: +SKIP\nax.legend((\"Sigma-Z\", \"Sigma-Y\")) # doctest: +SKIP\nplt.show() # doctest: +SKIP\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python # Copyright (c) 2018, University of Stuttgart # All rights reserved. # # Permission to use, copy, modify, and distribute this software for any purpose # with or without fee is hereby granted, provided that the above copyright # notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, # INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR # OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. # # Jim Mainprice on Sunday June 13 2018 import demos_common_imports from pyrieef.geometry.workspace import * from pyrieef.geometry.pixel_map import sdf from pyrieef.rendering.workspace_planar import WorkspaceDrawer env = EnvBox(dim=np.array([2., 2.])) box = Box(origin=np.array([-.2, -.2])) segment = Segment(origin=np.array([.4, -.1]), orientation=0.2) circle = Circle(origin=np.array([.5, .5]), radius=0.2) workspace = Workspace(env) workspace.obstacles.append(box) workspace.obstacles.append(segment) workspace.obstacles.append(circle) # Compute Occupancy map and SDF nb_points = 20 occupancy_map = occupancy_map(nb_points, workspace) signed_distance_field = sdf(occupancy_map) # Setup viewer viewer = WorkspaceDrawer(workspace, wait_for_keyboard=True) viewer.draw_ws_img(signed_distance_field) # viewer.draw_ws_img(occupancy_map) # import cv2 # Draw blured image # viewer.draw_ws_img( # ndimage.gaussian_filter( # cv2.resize(src=signed_distance_field, # dsize=(300, 300), # interpolation=cv2.INTER_NEAREST), sigma=3)) viewer.draw_ws_obstacles() viewer.show_once()
normal
{ "blob_id": "0d6177660a9b9c22bcf6eb11763e7fe1ee03b46a", "index": 3454, "step-1": "<mask token>\n", "step-2": "<mask token>\nworkspace.obstacles.append(box)\nworkspace.obstacles.append(segment)\nworkspace.obstacles.append(circle)\n<mask token>\nviewer.draw_ws_img(signed_distance_field)\nviewer.draw_ws_obstacles()\nviewer.show_once()\n", "step-3": "<mask token>\nenv = EnvBox(dim=np.array([2.0, 2.0]))\nbox = Box(origin=np.array([-0.2, -0.2]))\nsegment = Segment(origin=np.array([0.4, -0.1]), orientation=0.2)\ncircle = Circle(origin=np.array([0.5, 0.5]), radius=0.2)\nworkspace = Workspace(env)\nworkspace.obstacles.append(box)\nworkspace.obstacles.append(segment)\nworkspace.obstacles.append(circle)\nnb_points = 20\noccupancy_map = occupancy_map(nb_points, workspace)\nsigned_distance_field = sdf(occupancy_map)\nviewer = WorkspaceDrawer(workspace, wait_for_keyboard=True)\nviewer.draw_ws_img(signed_distance_field)\nviewer.draw_ws_obstacles()\nviewer.show_once()\n", "step-4": "import demos_common_imports\nfrom pyrieef.geometry.workspace import *\nfrom pyrieef.geometry.pixel_map import sdf\nfrom pyrieef.rendering.workspace_planar import WorkspaceDrawer\nenv = EnvBox(dim=np.array([2.0, 2.0]))\nbox = Box(origin=np.array([-0.2, -0.2]))\nsegment = Segment(origin=np.array([0.4, -0.1]), orientation=0.2)\ncircle = Circle(origin=np.array([0.5, 0.5]), radius=0.2)\nworkspace = Workspace(env)\nworkspace.obstacles.append(box)\nworkspace.obstacles.append(segment)\nworkspace.obstacles.append(circle)\nnb_points = 20\noccupancy_map = occupancy_map(nb_points, workspace)\nsigned_distance_field = sdf(occupancy_map)\nviewer = WorkspaceDrawer(workspace, wait_for_keyboard=True)\nviewer.draw_ws_img(signed_distance_field)\nviewer.draw_ws_obstacles()\nviewer.show_once()\n", "step-5": "#!/usr/bin/env python\n\n# Copyright (c) 2018, University of Stuttgart\n# All rights reserved.\n#\n# Permission to use, copy, modify, and distribute this software for any purpose\n# with or without fee is hereby granted, provided that the above copyright\n# notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\n# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\n# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n#\n# Jim Mainprice on Sunday June 13 2018\n\nimport demos_common_imports\nfrom pyrieef.geometry.workspace import *\nfrom pyrieef.geometry.pixel_map import sdf\nfrom pyrieef.rendering.workspace_planar import WorkspaceDrawer\n\nenv = EnvBox(dim=np.array([2., 2.]))\nbox = Box(origin=np.array([-.2, -.2]))\nsegment = Segment(origin=np.array([.4, -.1]), orientation=0.2)\ncircle = Circle(origin=np.array([.5, .5]), radius=0.2)\nworkspace = Workspace(env)\nworkspace.obstacles.append(box)\nworkspace.obstacles.append(segment)\nworkspace.obstacles.append(circle)\n\n# Compute Occupancy map and SDF\nnb_points = 20\noccupancy_map = occupancy_map(nb_points, workspace)\nsigned_distance_field = sdf(occupancy_map)\n\n# Setup viewer\nviewer = WorkspaceDrawer(workspace, wait_for_keyboard=True)\nviewer.draw_ws_img(signed_distance_field)\n# viewer.draw_ws_img(occupancy_map)\n\n# import cv2\n# Draw blured image\n# viewer.draw_ws_img(\n# ndimage.gaussian_filter(\n# cv2.resize(src=signed_distance_field,\n# dsize=(300, 300),\n# interpolation=cv2.INTER_NEAREST), sigma=3))\n\nviewer.draw_ws_obstacles()\nviewer.show_once()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from dataclasses import dataclass, field from typing import List @dataclass class Root: a: List[object] = field( default_factory=list, metadata={ "type": "Element", "namespace": "", "min_occurs": 2, "max_occurs": 4, "sequence": 1, } ) b: List[object] = field( default_factory=list, metadata={ "type": "Element", "namespace": "", "max_occurs": 2, "sequence": 1, } )
normal
{ "blob_id": "7e318ae7317eac90d6ce9a6b1d0dcc8ff65abef0", "index": 9430, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@dataclass\nclass Root:\n a: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'min_occurs': 2, 'max_occurs': 4,\n 'sequence': 1})\n b: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'max_occurs': 2, 'sequence': 1})\n", "step-3": "from dataclasses import dataclass, field\nfrom typing import List\n\n\n@dataclass\nclass Root:\n a: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'min_occurs': 2, 'max_occurs': 4,\n 'sequence': 1})\n b: List[object] = field(default_factory=list, metadata={'type':\n 'Element', 'namespace': '', 'max_occurs': 2, 'sequence': 1})\n", "step-4": "from dataclasses import dataclass, field\nfrom typing import List\n\n\n@dataclass\nclass Root:\n a: List[object] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"min_occurs\": 2,\n \"max_occurs\": 4,\n \"sequence\": 1,\n }\n )\n b: List[object] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"max_occurs\": 2,\n \"sequence\": 1,\n }\n )\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
def coroutine(func): def start_coroutine(*args, **kwargs): cr = func(*args, **kwargs) next(cr) #cr.send(None) return cr return start_coroutine @coroutine def grep(pattern): print('start grep') try: while True: line = yield if pattern in line: print(line) except GeneratorExit: print('stop grep') @coroutine def grep_python_coroutine(): g = grep('python') yield from g g = grep('python') #next(g) #g.send(None) g.send("php is better") g.send("python is simplier") g.close()
normal
{ "blob_id": "bebe098c5abb579eb155a1dc325347d100ddfa8f", "index": 1805, "step-1": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n<mask token>\n", "step-2": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n@coroutine\ndef grep(pattern):\n print('start grep')\n try:\n while True:\n line = yield\n if pattern in line:\n print(line)\n except GeneratorExit:\n print('stop grep')\n\n\n<mask token>\n", "step-3": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n@coroutine\ndef grep(pattern):\n print('start grep')\n try:\n while True:\n line = yield\n if pattern in line:\n print(line)\n except GeneratorExit:\n print('stop grep')\n\n\n@coroutine\ndef grep_python_coroutine():\n g = grep('python')\n yield from g\n\n\n<mask token>\n", "step-4": "def coroutine(func):\n\n def start_coroutine(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start_coroutine\n\n\n@coroutine\ndef grep(pattern):\n print('start grep')\n try:\n while True:\n line = yield\n if pattern in line:\n print(line)\n except GeneratorExit:\n print('stop grep')\n\n\n@coroutine\ndef grep_python_coroutine():\n g = grep('python')\n yield from g\n\n\n<mask token>\ng.send('php is better')\ng.send('python is simplier')\ng.close()\n", "step-5": "def coroutine(func):\n\tdef start_coroutine(*args, **kwargs):\n\t\tcr = func(*args, **kwargs)\n\t\tnext(cr) #cr.send(None)\n\t\treturn cr\n\treturn start_coroutine\n\n@coroutine\ndef grep(pattern):\n\tprint('start grep')\n\ttry:\n\t\twhile True:\n\t\t\tline = yield\n\t\t\tif pattern in line:\n\t\t\t\tprint(line)\n\texcept GeneratorExit:\n\t\tprint('stop grep')\n\n@coroutine\ndef grep_python_coroutine():\n\tg = grep('python') \n\tyield from g\n\ng = grep('python')\n#next(g) #g.send(None)\ng.send(\"php is better\")\ng.send(\"python is simplier\")\ng.close()", "step-ids": [ 1, 2, 3, 4, 6 ] }
[ 1, 2, 3, 4, 6 ]
from datetime import datetime from app import db class Vocabulary(db.Model): _id = db.Column(db.Integer, primary_key=True) language = db.Column(db.String(64), index=True) word = db.Column(db.String(64), index=True, unique=True) date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
normal
{ "blob_id": "834469f9c6e065fb29dfe1fd3e421fbb752f5094", "index": 7708, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Vocabulary(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Vocabulary(db.Model):\n _id = db.Column(db.Integer, primary_key=True)\n language = db.Column(db.String(64), index=True)\n word = db.Column(db.String(64), index=True, unique=True)\n date = db.Column(db.DateTime, index=True, default=datetime.utcnow)\n", "step-4": "from datetime import datetime\nfrom app import db\n\n\nclass Vocabulary(db.Model):\n _id = db.Column(db.Integer, primary_key=True)\n language = db.Column(db.String(64), index=True)\n word = db.Column(db.String(64), index=True, unique=True)\n date = db.Column(db.DateTime, index=True, default=datetime.utcnow)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from pyramid.request import Request from pyramid.response import Response from pyramid.view import view_config from svc1_first_auto_service.data.repository import Repository @view_config(route_name='autos_api', request_method='GET', renderer='json') def all_autos(_): cars = Repository.all_cars(limit=25) return cars @view_config(route_name='auto_api', request_method='GET', renderer='json') def single_auto(request: Request): car_id = request.matchdict.get('car_id') car = Repository.car_by_id(car_id) if not car: msg = "The car with id '{}' was not found.".format(car_id) return Response(status=404, json_body={'error': msg}) return car @view_config(route_name='auto', request_method='GET', renderer='json') def auto_by_id(request: Request): cid = request.matchdict.get('cid') cid = int(cid) if cid is not None: car = Repository.car_by_cid(cid) if not car: msg = f"The car with id '{cid}' was not found." return Response(status=404, json_body={'error': msg}) return car else: msg = f"The cid is None" return Response(status=404, json_body={'error': msg})
normal
{ "blob_id": "cb903f3f7fd3c4f3ba5f8ff2ce12aac9c680aa15", "index": 6116, "step-1": "<mask token>\n\n\n@view_config(route_name='auto_api', request_method='GET', renderer='json')\ndef single_auto(request: Request):\n car_id = request.matchdict.get('car_id')\n car = Repository.car_by_id(car_id)\n if not car:\n msg = \"The car with id '{}' was not found.\".format(car_id)\n return Response(status=404, json_body={'error': msg})\n return car\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@view_config(route_name='autos_api', request_method='GET', renderer='json')\ndef all_autos(_):\n cars = Repository.all_cars(limit=25)\n return cars\n\n\n@view_config(route_name='auto_api', request_method='GET', renderer='json')\ndef single_auto(request: Request):\n car_id = request.matchdict.get('car_id')\n car = Repository.car_by_id(car_id)\n if not car:\n msg = \"The car with id '{}' was not found.\".format(car_id)\n return Response(status=404, json_body={'error': msg})\n return car\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\n@view_config(route_name='autos_api', request_method='GET', renderer='json')\ndef all_autos(_):\n cars = Repository.all_cars(limit=25)\n return cars\n\n\n@view_config(route_name='auto_api', request_method='GET', renderer='json')\ndef single_auto(request: Request):\n car_id = request.matchdict.get('car_id')\n car = Repository.car_by_id(car_id)\n if not car:\n msg = \"The car with id '{}' was not found.\".format(car_id)\n return Response(status=404, json_body={'error': msg})\n return car\n\n\n@view_config(route_name='auto', request_method='GET', renderer='json')\ndef auto_by_id(request: Request):\n cid = request.matchdict.get('cid')\n cid = int(cid)\n if cid is not None:\n car = Repository.car_by_cid(cid)\n if not car:\n msg = f\"The car with id '{cid}' was not found.\"\n return Response(status=404, json_body={'error': msg})\n return car\n else:\n msg = f'The cid is None'\n return Response(status=404, json_body={'error': msg})\n", "step-4": "from pyramid.request import Request\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom svc1_first_auto_service.data.repository import Repository\n\n\n@view_config(route_name='autos_api', request_method='GET', renderer='json')\ndef all_autos(_):\n cars = Repository.all_cars(limit=25)\n return cars\n\n\n@view_config(route_name='auto_api', request_method='GET', renderer='json')\ndef single_auto(request: Request):\n car_id = request.matchdict.get('car_id')\n car = Repository.car_by_id(car_id)\n if not car:\n msg = \"The car with id '{}' was not found.\".format(car_id)\n return Response(status=404, json_body={'error': msg})\n return car\n\n\n@view_config(route_name='auto', request_method='GET', renderer='json')\ndef auto_by_id(request: Request):\n cid = request.matchdict.get('cid')\n cid = int(cid)\n if cid is not None:\n car = Repository.car_by_cid(cid)\n if not car:\n msg = f\"The car with id '{cid}' was not found.\"\n return Response(status=404, json_body={'error': msg})\n return car\n else:\n msg = f'The cid is None'\n return Response(status=404, json_body={'error': msg})\n", "step-5": "from pyramid.request import Request\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\n\nfrom svc1_first_auto_service.data.repository import Repository\n\n\n@view_config(route_name='autos_api',\n request_method='GET',\n renderer='json')\ndef all_autos(_):\n cars = Repository.all_cars(limit=25)\n return cars\n\n\n@view_config(route_name='auto_api',\n request_method='GET',\n renderer='json')\ndef single_auto(request: Request):\n car_id = request.matchdict.get('car_id')\n\n car = Repository.car_by_id(car_id)\n if not car:\n msg = \"The car with id '{}' was not found.\".format(car_id)\n return Response(status=404, json_body={'error': msg})\n\n return car\n\n\n@view_config(route_name='auto',\n request_method='GET',\n renderer='json')\ndef auto_by_id(request: Request):\n cid = request.matchdict.get('cid')\n cid = int(cid)\n\n if cid is not None:\n car = Repository.car_by_cid(cid)\n if not car:\n msg = f\"The car with id '{cid}' was not found.\"\n return Response(status=404, json_body={'error': msg})\n\n return car\n else:\n msg = f\"The cid is None\"\n return Response(status=404, json_body={'error': msg})\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
""" Given an array of integers, 1 ≤ a[i] ≤ n (n = size of array), some elements appear twice and others appear once. Find all the elements that appear twice in this array. Could you do it without extra space and in O(n) runtime? Example: Input: [4,3,2,7,8,2,3,1] Output: [2,3] """ # O(n) TC and SC class Solution: def findDuplicates(self, nums: List[int]) -> List[int]: cnt = {} for num in nums: cnt[num] = cnt.get(num, 0) + 1 res = [] for k, v in cnt.items(): if v > 1: res.append(k) return res # O(n) TC and O(1) SC class Solution: def findDuplicates(self, nums: List[int]) -> List[int]: res = [] for num in nums: if nums[abs(num)-1] < 0: res.append(abs(num)) else: nums[abs(num)-1] *= -1 return res
normal
{ "blob_id": "5cfd7744f98c80483cb4dd318c17a7cd83ed3ae3", "index": 758, "step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Solution:\n\n def findDuplicates(self, nums: List[int]) ->List[int]:\n res = []\n for num in nums:\n if nums[abs(num) - 1] < 0:\n res.append(abs(num))\n else:\n nums[abs(num) - 1] *= -1\n return res\n", "step-3": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\nclass Solution:\n\n def findDuplicates(self, nums: List[int]) ->List[int]:\n res = []\n for num in nums:\n if nums[abs(num) - 1] < 0:\n res.append(abs(num))\n else:\n nums[abs(num) - 1] *= -1\n return res\n", "step-4": "<mask token>\n\n\nclass Solution:\n\n def findDuplicates(self, nums: List[int]) ->List[int]:\n cnt = {}\n for num in nums:\n cnt[num] = cnt.get(num, 0) + 1\n res = []\n for k, v in cnt.items():\n if v > 1:\n res.append(k)\n return res\n\n\nclass Solution:\n\n def findDuplicates(self, nums: List[int]) ->List[int]:\n res = []\n for num in nums:\n if nums[abs(num) - 1] < 0:\n res.append(abs(num))\n else:\n nums[abs(num) - 1] *= -1\n return res\n", "step-5": "\"\"\"\nGiven an array of integers, 1 ≤ a[i] ≤ n (n = size of array), some elements appear twice and others appear once.\nFind all the elements that appear twice in this array.\nCould you do it without extra space and in O(n) runtime?\n\nExample:\nInput:\n[4,3,2,7,8,2,3,1]\n\nOutput:\n[2,3]\n\"\"\"\n\n# O(n) TC and SC\nclass Solution:\n def findDuplicates(self, nums: List[int]) -> List[int]:\n cnt = {}\n\n for num in nums:\n cnt[num] = cnt.get(num, 0) + 1\n\n res = []\n\n for k, v in cnt.items():\n if v > 1:\n res.append(k)\n\n return res\n\n\n# O(n) TC and O(1) SC\nclass Solution:\n def findDuplicates(self, nums: List[int]) -> List[int]:\n res = []\n \n for num in nums:\n if nums[abs(num)-1] < 0:\n res.append(abs(num))\n else:\n nums[abs(num)-1] *= -1\n \n return res\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """eval standalone script""" import os import re import argparse from mindspore import context from mindspore.train.serialization import load_checkpoint, load_param_into_net from src.dataset import create_dataset from src.config import eval_cfg, student_net_cfg, task_cfg from src.tinybert_model import BertModelCLS def parse_args(): """ parse args """ parser = argparse.ArgumentParser(description='ternarybert evaluation') parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU'], help='Device where the code will be implemented. (Default: GPU)') parser.add_argument('--device_id', type=int, default=0, help='Device id. (Default: 0)') parser.add_argument('--model_dir', type=str, default='', help='The checkpoint directory of model.') parser.add_argument('--data_dir', type=str, default='', help='Data directory.') parser.add_argument('--task_name', type=str, default='sts-b', choices=['sts-b', 'qnli', 'mnli'], help='The name of the task to train. (Default: sts-b)') parser.add_argument('--dataset_type', type=str, default='tfrecord', choices=['tfrecord', 'mindrecord'], help='The name of the task to train. (Default: tfrecord)') parser.add_argument('--batch_size', type=int, default=32, help='Batch size for evaluating') parser.add_argument('--data_name', type=str, default='eval.tf_record', help='') return parser.parse_args() def get_ckpt(ckpt_file): lists = os.listdir(ckpt_file) lists.sort(key=lambda fn: os.path.getmtime(ckpt_file + '/' + fn)) return os.path.join(ckpt_file, lists[-1]) def do_eval_standalone(args_opt): """ do eval standalone """ ckpt_file = os.path.join(args_opt.model_dir, args_opt.task_name) ckpt_file = get_ckpt(ckpt_file) print('ckpt file:', ckpt_file) task = task_cfg[args_opt.task_name] student_net_cfg.seq_length = task.seq_length eval_cfg.batch_size = args_opt.batch_size eval_data_dir = os.path.join(args_opt.data_dir, args_opt.task_name, args_opt.data_name) context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, device_id=args.device_id) eval_dataset = create_dataset(batch_size=eval_cfg.batch_size, device_num=1, rank=0, do_shuffle=False, data_dir=eval_data_dir, data_type=args_opt.dataset_type, seq_length=task.seq_length, task_type=task.task_type, drop_remainder=False) print('eval dataset size:', eval_dataset.get_dataset_size()) print('eval dataset batch size:', eval_dataset.get_batch_size()) eval_model = BertModelCLS(student_net_cfg, False, task.num_labels, 0.0, phase_type='student') param_dict = load_checkpoint(ckpt_file) new_param_dict = {} for key, value in param_dict.items(): new_key = re.sub('tinybert_', 'bert_', key) new_key = re.sub('^bert.', '', new_key) new_param_dict[new_key] = value load_param_into_net(eval_model, new_param_dict) eval_model.set_train(False) columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"] callback = task.metrics() for step, data in enumerate(eval_dataset.create_dict_iterator()): input_data = [] for i in columns_list: input_data.append(data[i]) input_ids, input_mask, token_type_id, label_ids = input_data _, _, logits, _ = eval_model(input_ids, token_type_id, input_mask) callback.update(logits, label_ids) print('eval step: {}, {}: {}'.format(step, callback.name, callback.get_metrics())) metrics = callback.get_metrics() print('The best {}: {}'.format(callback.name, metrics)) if __name__ == '__main__': args = parse_args() do_eval_standalone(args)
normal
{ "blob_id": "883d2efeb6d7d43cf82eef2e0397110fd8e3ea03", "index": 4368, "step-1": "<mask token>\n\n\ndef parse_args():\n \"\"\"\n parse args\n \"\"\"\n parser = argparse.ArgumentParser(description='ternarybert evaluation')\n parser.add_argument('--device_target', type=str, default='Ascend',\n choices=['Ascend', 'GPU'], help=\n 'Device where the code will be implemented. (Default: GPU)')\n parser.add_argument('--device_id', type=int, default=0, help=\n 'Device id. (Default: 0)')\n parser.add_argument('--model_dir', type=str, default='', help=\n 'The checkpoint directory of model.')\n parser.add_argument('--data_dir', type=str, default='', help=\n 'Data directory.')\n parser.add_argument('--task_name', type=str, default='sts-b', choices=[\n 'sts-b', 'qnli', 'mnli'], help=\n 'The name of the task to train. (Default: sts-b)')\n parser.add_argument('--dataset_type', type=str, default='tfrecord',\n choices=['tfrecord', 'mindrecord'], help=\n 'The name of the task to train. (Default: tfrecord)')\n parser.add_argument('--batch_size', type=int, default=32, help=\n 'Batch size for evaluating')\n parser.add_argument('--data_name', type=str, default='eval.tf_record',\n help='')\n return parser.parse_args()\n\n\n<mask token>\n\n\ndef do_eval_standalone(args_opt):\n \"\"\"\n do eval standalone\n \"\"\"\n ckpt_file = os.path.join(args_opt.model_dir, args_opt.task_name)\n ckpt_file = get_ckpt(ckpt_file)\n print('ckpt file:', ckpt_file)\n task = task_cfg[args_opt.task_name]\n student_net_cfg.seq_length = task.seq_length\n eval_cfg.batch_size = args_opt.batch_size\n eval_data_dir = os.path.join(args_opt.data_dir, args_opt.task_name,\n args_opt.data_name)\n context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.\n device_target, device_id=args.device_id)\n eval_dataset = create_dataset(batch_size=eval_cfg.batch_size,\n device_num=1, rank=0, do_shuffle=False, data_dir=eval_data_dir,\n data_type=args_opt.dataset_type, seq_length=task.seq_length,\n task_type=task.task_type, drop_remainder=False)\n print('eval dataset size:', eval_dataset.get_dataset_size())\n print('eval dataset batch size:', eval_dataset.get_batch_size())\n eval_model = BertModelCLS(student_net_cfg, False, task.num_labels, 0.0,\n phase_type='student')\n param_dict = load_checkpoint(ckpt_file)\n new_param_dict = {}\n for key, value in param_dict.items():\n new_key = re.sub('tinybert_', 'bert_', key)\n new_key = re.sub('^bert.', '', new_key)\n new_param_dict[new_key] = value\n load_param_into_net(eval_model, new_param_dict)\n eval_model.set_train(False)\n columns_list = ['input_ids', 'input_mask', 'segment_ids', 'label_ids']\n callback = task.metrics()\n for step, data in enumerate(eval_dataset.create_dict_iterator()):\n input_data = []\n for i in columns_list:\n input_data.append(data[i])\n input_ids, input_mask, token_type_id, label_ids = input_data\n _, _, logits, _ = eval_model(input_ids, token_type_id, input_mask)\n callback.update(logits, label_ids)\n print('eval step: {}, {}: {}'.format(step, callback.name, callback.\n get_metrics()))\n metrics = callback.get_metrics()\n print('The best {}: {}'.format(callback.name, metrics))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef parse_args():\n \"\"\"\n parse args\n \"\"\"\n parser = argparse.ArgumentParser(description='ternarybert evaluation')\n parser.add_argument('--device_target', type=str, default='Ascend',\n choices=['Ascend', 'GPU'], help=\n 'Device where the code will be implemented. (Default: GPU)')\n parser.add_argument('--device_id', type=int, default=0, help=\n 'Device id. (Default: 0)')\n parser.add_argument('--model_dir', type=str, default='', help=\n 'The checkpoint directory of model.')\n parser.add_argument('--data_dir', type=str, default='', help=\n 'Data directory.')\n parser.add_argument('--task_name', type=str, default='sts-b', choices=[\n 'sts-b', 'qnli', 'mnli'], help=\n 'The name of the task to train. (Default: sts-b)')\n parser.add_argument('--dataset_type', type=str, default='tfrecord',\n choices=['tfrecord', 'mindrecord'], help=\n 'The name of the task to train. (Default: tfrecord)')\n parser.add_argument('--batch_size', type=int, default=32, help=\n 'Batch size for evaluating')\n parser.add_argument('--data_name', type=str, default='eval.tf_record',\n help='')\n return parser.parse_args()\n\n\ndef get_ckpt(ckpt_file):\n lists = os.listdir(ckpt_file)\n lists.sort(key=lambda fn: os.path.getmtime(ckpt_file + '/' + fn))\n return os.path.join(ckpt_file, lists[-1])\n\n\ndef do_eval_standalone(args_opt):\n \"\"\"\n do eval standalone\n \"\"\"\n ckpt_file = os.path.join(args_opt.model_dir, args_opt.task_name)\n ckpt_file = get_ckpt(ckpt_file)\n print('ckpt file:', ckpt_file)\n task = task_cfg[args_opt.task_name]\n student_net_cfg.seq_length = task.seq_length\n eval_cfg.batch_size = args_opt.batch_size\n eval_data_dir = os.path.join(args_opt.data_dir, args_opt.task_name,\n args_opt.data_name)\n context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.\n device_target, device_id=args.device_id)\n eval_dataset = create_dataset(batch_size=eval_cfg.batch_size,\n device_num=1, rank=0, do_shuffle=False, data_dir=eval_data_dir,\n data_type=args_opt.dataset_type, seq_length=task.seq_length,\n task_type=task.task_type, drop_remainder=False)\n print('eval dataset size:', eval_dataset.get_dataset_size())\n print('eval dataset batch size:', eval_dataset.get_batch_size())\n eval_model = BertModelCLS(student_net_cfg, False, task.num_labels, 0.0,\n phase_type='student')\n param_dict = load_checkpoint(ckpt_file)\n new_param_dict = {}\n for key, value in param_dict.items():\n new_key = re.sub('tinybert_', 'bert_', key)\n new_key = re.sub('^bert.', '', new_key)\n new_param_dict[new_key] = value\n load_param_into_net(eval_model, new_param_dict)\n eval_model.set_train(False)\n columns_list = ['input_ids', 'input_mask', 'segment_ids', 'label_ids']\n callback = task.metrics()\n for step, data in enumerate(eval_dataset.create_dict_iterator()):\n input_data = []\n for i in columns_list:\n input_data.append(data[i])\n input_ids, input_mask, token_type_id, label_ids = input_data\n _, _, logits, _ = eval_model(input_ids, token_type_id, input_mask)\n callback.update(logits, label_ids)\n print('eval step: {}, {}: {}'.format(step, callback.name, callback.\n get_metrics()))\n metrics = callback.get_metrics()\n print('The best {}: {}'.format(callback.name, metrics))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef parse_args():\n \"\"\"\n parse args\n \"\"\"\n parser = argparse.ArgumentParser(description='ternarybert evaluation')\n parser.add_argument('--device_target', type=str, default='Ascend',\n choices=['Ascend', 'GPU'], help=\n 'Device where the code will be implemented. (Default: GPU)')\n parser.add_argument('--device_id', type=int, default=0, help=\n 'Device id. (Default: 0)')\n parser.add_argument('--model_dir', type=str, default='', help=\n 'The checkpoint directory of model.')\n parser.add_argument('--data_dir', type=str, default='', help=\n 'Data directory.')\n parser.add_argument('--task_name', type=str, default='sts-b', choices=[\n 'sts-b', 'qnli', 'mnli'], help=\n 'The name of the task to train. (Default: sts-b)')\n parser.add_argument('--dataset_type', type=str, default='tfrecord',\n choices=['tfrecord', 'mindrecord'], help=\n 'The name of the task to train. (Default: tfrecord)')\n parser.add_argument('--batch_size', type=int, default=32, help=\n 'Batch size for evaluating')\n parser.add_argument('--data_name', type=str, default='eval.tf_record',\n help='')\n return parser.parse_args()\n\n\ndef get_ckpt(ckpt_file):\n lists = os.listdir(ckpt_file)\n lists.sort(key=lambda fn: os.path.getmtime(ckpt_file + '/' + fn))\n return os.path.join(ckpt_file, lists[-1])\n\n\ndef do_eval_standalone(args_opt):\n \"\"\"\n do eval standalone\n \"\"\"\n ckpt_file = os.path.join(args_opt.model_dir, args_opt.task_name)\n ckpt_file = get_ckpt(ckpt_file)\n print('ckpt file:', ckpt_file)\n task = task_cfg[args_opt.task_name]\n student_net_cfg.seq_length = task.seq_length\n eval_cfg.batch_size = args_opt.batch_size\n eval_data_dir = os.path.join(args_opt.data_dir, args_opt.task_name,\n args_opt.data_name)\n context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.\n device_target, device_id=args.device_id)\n eval_dataset = create_dataset(batch_size=eval_cfg.batch_size,\n device_num=1, rank=0, do_shuffle=False, data_dir=eval_data_dir,\n data_type=args_opt.dataset_type, seq_length=task.seq_length,\n task_type=task.task_type, drop_remainder=False)\n print('eval dataset size:', eval_dataset.get_dataset_size())\n print('eval dataset batch size:', eval_dataset.get_batch_size())\n eval_model = BertModelCLS(student_net_cfg, False, task.num_labels, 0.0,\n phase_type='student')\n param_dict = load_checkpoint(ckpt_file)\n new_param_dict = {}\n for key, value in param_dict.items():\n new_key = re.sub('tinybert_', 'bert_', key)\n new_key = re.sub('^bert.', '', new_key)\n new_param_dict[new_key] = value\n load_param_into_net(eval_model, new_param_dict)\n eval_model.set_train(False)\n columns_list = ['input_ids', 'input_mask', 'segment_ids', 'label_ids']\n callback = task.metrics()\n for step, data in enumerate(eval_dataset.create_dict_iterator()):\n input_data = []\n for i in columns_list:\n input_data.append(data[i])\n input_ids, input_mask, token_type_id, label_ids = input_data\n _, _, logits, _ = eval_model(input_ids, token_type_id, input_mask)\n callback.update(logits, label_ids)\n print('eval step: {}, {}: {}'.format(step, callback.name, callback.\n get_metrics()))\n metrics = callback.get_metrics()\n print('The best {}: {}'.format(callback.name, metrics))\n\n\nif __name__ == '__main__':\n args = parse_args()\n do_eval_standalone(args)\n", "step-4": "<mask token>\nimport os\nimport re\nimport argparse\nfrom mindspore import context\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom src.dataset import create_dataset\nfrom src.config import eval_cfg, student_net_cfg, task_cfg\nfrom src.tinybert_model import BertModelCLS\n\n\ndef parse_args():\n \"\"\"\n parse args\n \"\"\"\n parser = argparse.ArgumentParser(description='ternarybert evaluation')\n parser.add_argument('--device_target', type=str, default='Ascend',\n choices=['Ascend', 'GPU'], help=\n 'Device where the code will be implemented. (Default: GPU)')\n parser.add_argument('--device_id', type=int, default=0, help=\n 'Device id. (Default: 0)')\n parser.add_argument('--model_dir', type=str, default='', help=\n 'The checkpoint directory of model.')\n parser.add_argument('--data_dir', type=str, default='', help=\n 'Data directory.')\n parser.add_argument('--task_name', type=str, default='sts-b', choices=[\n 'sts-b', 'qnli', 'mnli'], help=\n 'The name of the task to train. (Default: sts-b)')\n parser.add_argument('--dataset_type', type=str, default='tfrecord',\n choices=['tfrecord', 'mindrecord'], help=\n 'The name of the task to train. (Default: tfrecord)')\n parser.add_argument('--batch_size', type=int, default=32, help=\n 'Batch size for evaluating')\n parser.add_argument('--data_name', type=str, default='eval.tf_record',\n help='')\n return parser.parse_args()\n\n\ndef get_ckpt(ckpt_file):\n lists = os.listdir(ckpt_file)\n lists.sort(key=lambda fn: os.path.getmtime(ckpt_file + '/' + fn))\n return os.path.join(ckpt_file, lists[-1])\n\n\ndef do_eval_standalone(args_opt):\n \"\"\"\n do eval standalone\n \"\"\"\n ckpt_file = os.path.join(args_opt.model_dir, args_opt.task_name)\n ckpt_file = get_ckpt(ckpt_file)\n print('ckpt file:', ckpt_file)\n task = task_cfg[args_opt.task_name]\n student_net_cfg.seq_length = task.seq_length\n eval_cfg.batch_size = args_opt.batch_size\n eval_data_dir = os.path.join(args_opt.data_dir, args_opt.task_name,\n args_opt.data_name)\n context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.\n device_target, device_id=args.device_id)\n eval_dataset = create_dataset(batch_size=eval_cfg.batch_size,\n device_num=1, rank=0, do_shuffle=False, data_dir=eval_data_dir,\n data_type=args_opt.dataset_type, seq_length=task.seq_length,\n task_type=task.task_type, drop_remainder=False)\n print('eval dataset size:', eval_dataset.get_dataset_size())\n print('eval dataset batch size:', eval_dataset.get_batch_size())\n eval_model = BertModelCLS(student_net_cfg, False, task.num_labels, 0.0,\n phase_type='student')\n param_dict = load_checkpoint(ckpt_file)\n new_param_dict = {}\n for key, value in param_dict.items():\n new_key = re.sub('tinybert_', 'bert_', key)\n new_key = re.sub('^bert.', '', new_key)\n new_param_dict[new_key] = value\n load_param_into_net(eval_model, new_param_dict)\n eval_model.set_train(False)\n columns_list = ['input_ids', 'input_mask', 'segment_ids', 'label_ids']\n callback = task.metrics()\n for step, data in enumerate(eval_dataset.create_dict_iterator()):\n input_data = []\n for i in columns_list:\n input_data.append(data[i])\n input_ids, input_mask, token_type_id, label_ids = input_data\n _, _, logits, _ = eval_model(input_ids, token_type_id, input_mask)\n callback.update(logits, label_ids)\n print('eval step: {}, {}: {}'.format(step, callback.name, callback.\n get_metrics()))\n metrics = callback.get_metrics()\n print('The best {}: {}'.format(callback.name, metrics))\n\n\nif __name__ == '__main__':\n args = parse_args()\n do_eval_standalone(args)\n", "step-5": "# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"eval standalone script\"\"\"\n\nimport os\nimport re\nimport argparse\n\nfrom mindspore import context\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom src.dataset import create_dataset\nfrom src.config import eval_cfg, student_net_cfg, task_cfg\nfrom src.tinybert_model import BertModelCLS\n\n\ndef parse_args():\n \"\"\"\n parse args\n \"\"\"\n parser = argparse.ArgumentParser(description='ternarybert evaluation')\n parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU'],\n help='Device where the code will be implemented. (Default: GPU)')\n parser.add_argument('--device_id', type=int, default=0, help='Device id. (Default: 0)')\n parser.add_argument('--model_dir', type=str, default='', help='The checkpoint directory of model.')\n parser.add_argument('--data_dir', type=str, default='', help='Data directory.')\n parser.add_argument('--task_name', type=str, default='sts-b', choices=['sts-b', 'qnli', 'mnli'],\n help='The name of the task to train. (Default: sts-b)')\n parser.add_argument('--dataset_type', type=str, default='tfrecord', choices=['tfrecord', 'mindrecord'],\n help='The name of the task to train. (Default: tfrecord)')\n parser.add_argument('--batch_size', type=int, default=32, help='Batch size for evaluating')\n parser.add_argument('--data_name', type=str, default='eval.tf_record', help='')\n return parser.parse_args()\n\n\ndef get_ckpt(ckpt_file):\n lists = os.listdir(ckpt_file)\n lists.sort(key=lambda fn: os.path.getmtime(ckpt_file + '/' + fn))\n return os.path.join(ckpt_file, lists[-1])\n\n\ndef do_eval_standalone(args_opt):\n \"\"\"\n do eval standalone\n \"\"\"\n ckpt_file = os.path.join(args_opt.model_dir, args_opt.task_name)\n ckpt_file = get_ckpt(ckpt_file)\n print('ckpt file:', ckpt_file)\n task = task_cfg[args_opt.task_name]\n student_net_cfg.seq_length = task.seq_length\n eval_cfg.batch_size = args_opt.batch_size\n eval_data_dir = os.path.join(args_opt.data_dir, args_opt.task_name, args_opt.data_name)\n\n context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, device_id=args.device_id)\n\n eval_dataset = create_dataset(batch_size=eval_cfg.batch_size,\n device_num=1,\n rank=0,\n do_shuffle=False,\n data_dir=eval_data_dir,\n data_type=args_opt.dataset_type,\n seq_length=task.seq_length,\n task_type=task.task_type,\n drop_remainder=False)\n print('eval dataset size:', eval_dataset.get_dataset_size())\n print('eval dataset batch size:', eval_dataset.get_batch_size())\n\n eval_model = BertModelCLS(student_net_cfg, False, task.num_labels, 0.0, phase_type='student')\n param_dict = load_checkpoint(ckpt_file)\n new_param_dict = {}\n for key, value in param_dict.items():\n new_key = re.sub('tinybert_', 'bert_', key)\n new_key = re.sub('^bert.', '', new_key)\n new_param_dict[new_key] = value\n load_param_into_net(eval_model, new_param_dict)\n eval_model.set_train(False)\n\n columns_list = [\"input_ids\", \"input_mask\", \"segment_ids\", \"label_ids\"]\n callback = task.metrics()\n for step, data in enumerate(eval_dataset.create_dict_iterator()):\n input_data = []\n for i in columns_list:\n input_data.append(data[i])\n input_ids, input_mask, token_type_id, label_ids = input_data\n _, _, logits, _ = eval_model(input_ids, token_type_id, input_mask)\n callback.update(logits, label_ids)\n print('eval step: {}, {}: {}'.format(step, callback.name, callback.get_metrics()))\n metrics = callback.get_metrics()\n print('The best {}: {}'.format(callback.name, metrics))\n\n\nif __name__ == '__main__':\n args = parse_args()\n do_eval_standalone(args)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import csv import os import requests from bs4 import BeautifulSoup # open html file and parsing lxml with open ('/Users/neeraj.joshi/Downloads/index.html') as html_file: soup = BeautifulSoup(html_file, 'lxml') #row = soup.find_all('tr') #column = row.find_all('td') #print(soup) # create a file by any name and in order to write it in write mode type w filename = '/Users/neeraj.joshi/Downloads/test.csv' csv_writer = csv.writer(open(filename, 'w')) # storing data in data variable #assume tr as a columns for tree in soup.find_all('tr'): data = [] #assume td as rows for todd in tree.find_all('td'): #print(todd.text) "appending data of td into array data made up there " data.append(todd.text) print(data) csv_writer.writerow(data)
normal
{ "blob_id": "47be41bd5838b828acdc90c3ef5abdeec9da1e85", "index": 1579, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('/Users/neeraj.joshi/Downloads/index.html') as html_file:\n soup = BeautifulSoup(html_file, 'lxml')\n<mask token>\nfor tree in soup.find_all('tr'):\n data = []\n for todd in tree.find_all('td'):\n data.append(todd.text)\n print(data)\n csv_writer.writerow(data)\n", "step-3": "<mask token>\nwith open('/Users/neeraj.joshi/Downloads/index.html') as html_file:\n soup = BeautifulSoup(html_file, 'lxml')\nfilename = '/Users/neeraj.joshi/Downloads/test.csv'\ncsv_writer = csv.writer(open(filename, 'w'))\nfor tree in soup.find_all('tr'):\n data = []\n for todd in tree.find_all('td'):\n data.append(todd.text)\n print(data)\n csv_writer.writerow(data)\n", "step-4": "import csv\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nwith open('/Users/neeraj.joshi/Downloads/index.html') as html_file:\n soup = BeautifulSoup(html_file, 'lxml')\nfilename = '/Users/neeraj.joshi/Downloads/test.csv'\ncsv_writer = csv.writer(open(filename, 'w'))\nfor tree in soup.find_all('tr'):\n data = []\n for todd in tree.find_all('td'):\n data.append(todd.text)\n print(data)\n csv_writer.writerow(data)\n", "step-5": "import csv\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n# open html file and parsing lxml \nwith open ('/Users/neeraj.joshi/Downloads/index.html') as html_file:\n soup = BeautifulSoup(html_file, 'lxml')\n #row = soup.find_all('tr')\n #column = row.find_all('td')\n #print(soup)\n# create a file by any name and in order to write it in write mode type w\nfilename = '/Users/neeraj.joshi/Downloads/test.csv'\ncsv_writer = csv.writer(open(filename, 'w'))\n# storing data in data variable\n\n#assume tr as a columns\nfor tree in soup.find_all('tr'):\n data = []\n #assume td as rows \n for todd in tree.find_all('td'): \n #print(todd.text) \"appending data of td into array data made up there \"\n \n data.append(todd.text) \n print(data)\n csv_writer.writerow(data) \n \n \n\n\n\n\n ", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import pandas as pd import sweetviz as sv b = pd.read_csv("final_cricket_players.csv", low_memory=False) b = b.replace(to_replace="-",value="") b = b.replace(to_replace="[]",value="") b = b.replace(to_replace="{}",value="") b.drop(b.columns[b.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True) b.to_csv('Cleaned_dataset.csv', index=False) report = sv.analyze(b, pairwise_analysis='off') report.show_html()
normal
{ "blob_id": "f93b7f2939bbee9b0cb5402d3e5f5d6c482d37c4", "index": 6983, "step-1": "<mask token>\n", "step-2": "<mask token>\nb.drop(b.columns[b.columns.str.contains('unnamed', case=False)], axis=1,\n inplace=True)\nb.to_csv('Cleaned_dataset.csv', index=False)\n<mask token>\nreport.show_html()\n", "step-3": "<mask token>\nb = pd.read_csv('final_cricket_players.csv', low_memory=False)\nb = b.replace(to_replace='-', value='')\nb = b.replace(to_replace='[]', value='')\nb = b.replace(to_replace='{}', value='')\nb.drop(b.columns[b.columns.str.contains('unnamed', case=False)], axis=1,\n inplace=True)\nb.to_csv('Cleaned_dataset.csv', index=False)\nreport = sv.analyze(b, pairwise_analysis='off')\nreport.show_html()\n", "step-4": "import pandas as pd\nimport sweetviz as sv\nb = pd.read_csv('final_cricket_players.csv', low_memory=False)\nb = b.replace(to_replace='-', value='')\nb = b.replace(to_replace='[]', value='')\nb = b.replace(to_replace='{}', value='')\nb.drop(b.columns[b.columns.str.contains('unnamed', case=False)], axis=1,\n inplace=True)\nb.to_csv('Cleaned_dataset.csv', index=False)\nreport = sv.analyze(b, pairwise_analysis='off')\nreport.show_html()\n", "step-5": "import pandas as pd\r\nimport sweetviz as sv\r\nb = pd.read_csv(\"final_cricket_players.csv\", low_memory=False)\r\nb = b.replace(to_replace=\"-\",value=\"\")\r\nb = b.replace(to_replace=\"[]\",value=\"\")\r\nb = b.replace(to_replace=\"{}\",value=\"\")\r\n\r\nb.drop(b.columns[b.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)\r\nb.to_csv('Cleaned_dataset.csv', index=False)\r\nreport = sv.analyze(b, pairwise_analysis='off')\r\nreport.show_html()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import logging from datetime import datetime import boto3 from pytz import timezone from mliyweb.api.v1.api_session_limiter import session_is_okay from mliyweb.api.v1.json_view import JsonView from mliyweb.dns import deleteDnsEntry from mliyweb.models import Cluster from mliyweb.resources.clusters import ClusterService from mliyweb.settings import AWS_REGION from mliyweb.utils import log_enter_exit class UserGroupClusters(JsonView): ''' Returns a json struct with the current clusters. If the last updated time in the db is greater than the timeout, it returns the current data and launches a background thread to refresh and prune the cluster list. If called with ?forcerefresh as a url argument it'll refresh regardless of the last updated time. ''' logger = logging.getLogger('mliyweb.views.UserClusters') cluster_service = ClusterService() # global instance refresh time stamp @log_enter_exit(logger) def get_data(self, context): user = self.request.user try: if session_is_okay(self.request.session, "group_clusters"): self.logger.info("Updating clusters in database") return self.cluster_service.update_by_user_group(user) else: self.logger.info("Getting clusters from database") return self.cluster_service.get_by_user_group(user) except Exception as e: self.logger.exception(e) return [] class UserClusters(JsonView): # TODO There needs to be a Cluster Launch thread cleanup/rework logger = logging.getLogger('mliyweb.views.UserClusters') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): username = self.request.user.username try: if session_is_okay(self.request.session, "user_clusters"): self.logger.info("Updating clusters in database") return self.cluster_service.update_by_user(username) else: self.logger.info("Getting clusters from database") return self.cluster_service.get_by_user(username) except Exception as e: self.logger.exception(e) raise class SingleCluster(JsonView): logger = logging.getLogger('mliyweb.views.SingleCluster') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): cluster_id = self.kwargs['pk'] try: if session_is_okay(self.request.session, "user_clusters"): self.logger.info("Updating clusters in database") return self.cluster_service.update_single_cluster(cluster_id) else: self.logger.info("Getting clusters from database") return self.cluster_service.get_single_cluster(cluster_id) except Exception as e: self.logger.exception(e) raise class ChangeClusterState(JsonView): log = logging.getLogger('mliyweb.views.ChangeClusterState') cluster_service = ClusterService() @log_enter_exit(log, log_level=10) def get_data(self,context): client = boto3.client('cloudformation', region_name=AWS_REGION) cluster = Cluster.objects.get(cluster_id = self.kwargs['clusterid']) client.delete_stack(StackName=cluster.stack_id) if cluster.current_bill: cluster.current_bill.ongoing = False cluster.current_bill.end_time = datetime.now(timezone('UTC')) cluster.current_bill.save() if cluster.state == 'TERMINATED' or cluster.state == 'FAILED': deleteDnsEntry(cluster.cluster_id,cluster.master_ip) else: deleteDnsEntry(cluster.cluster_id,cluster.master_ip) cluster.state = "TERMINATED" cluster.save() return { 'action' : 'terminate', 'status' : 'ok'}
normal
{ "blob_id": "f882b73645c6a280a17f40b27c01ecad7e4d85ae", "index": 5860, "step-1": "<mask token>\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n", "step-2": "<mask token>\n\n\nclass UserGroupClusters(JsonView):\n <mask token>\n <mask token>\n <mask token>\n\n @log_enter_exit(logger)\n def get_data(self, context):\n user = self.request.user\n try:\n if session_is_okay(self.request.session, 'group_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user_group(user)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user_group(user)\n except Exception as e:\n self.logger.exception(e)\n return []\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n", "step-3": "<mask token>\n\n\nclass UserGroupClusters(JsonView):\n <mask token>\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n user = self.request.user\n try:\n if session_is_okay(self.request.session, 'group_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user_group(user)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user_group(user)\n except Exception as e:\n self.logger.exception(e)\n return []\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n", "step-4": "<mask token>\n\n\nclass UserGroupClusters(JsonView):\n \"\"\"\n\tReturns a json struct with the current clusters. If the last updated\n\ttime in the db is greater than the timeout, it returns the current data\n\tand launches a background thread to refresh and prune the cluster list.\n\n\tIf called with ?forcerefresh as a url argument it'll refresh regardless\n\tof the last updated time.\n\t\"\"\"\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n user = self.request.user\n try:\n if session_is_okay(self.request.session, 'group_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user_group(user)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user_group(user)\n except Exception as e:\n self.logger.exception(e)\n return []\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n", "step-5": "import logging\nfrom datetime import datetime\n\nimport boto3\nfrom pytz import timezone\n\nfrom mliyweb.api.v1.api_session_limiter import session_is_okay\nfrom mliyweb.api.v1.json_view import JsonView\nfrom mliyweb.dns import deleteDnsEntry\nfrom mliyweb.models import Cluster\nfrom mliyweb.resources.clusters import ClusterService\nfrom mliyweb.settings import AWS_REGION\nfrom mliyweb.utils import log_enter_exit\n\n\nclass UserGroupClusters(JsonView):\n\t'''\n\tReturns a json struct with the current clusters. If the last updated\n\ttime in the db is greater than the timeout, it returns the current data\n\tand launches a background thread to refresh and prune the cluster list.\n\n\tIf called with ?forcerefresh as a url argument it'll refresh regardless\n\tof the last updated time.\n\t'''\n\tlogger = logging.getLogger('mliyweb.views.UserClusters')\n\tcluster_service = ClusterService()\n\n\t# global instance refresh time stamp\n\n\t@log_enter_exit(logger)\n\tdef get_data(self, context):\n\n\t\tuser = self.request.user\n\t\ttry:\n\t\t\tif session_is_okay(self.request.session, \"group_clusters\"):\n\t\t\t\tself.logger.info(\"Updating clusters in database\")\n\t\t\t\treturn self.cluster_service.update_by_user_group(user)\n\t\t\telse:\n\t\t\t\tself.logger.info(\"Getting clusters from database\")\n\t\t\t\treturn self.cluster_service.get_by_user_group(user)\n\n\t\texcept Exception as e:\n\t\t\tself.logger.exception(e)\n\n\t\treturn []\n\n\nclass UserClusters(JsonView):\n\t# TODO There needs to be a Cluster Launch thread cleanup/rework\n\tlogger = logging.getLogger('mliyweb.views.UserClusters')\n\tcluster_service = ClusterService()\n\n\t@log_enter_exit(logger)\n\tdef get_data(self, context):\n\t\tusername = self.request.user.username\n\t\ttry:\n\t\t\tif session_is_okay(self.request.session, \"user_clusters\"):\n\t\t\t\tself.logger.info(\"Updating clusters in database\")\n\t\t\t\treturn self.cluster_service.update_by_user(username)\n\t\t\telse:\n\t\t\t\tself.logger.info(\"Getting clusters from database\")\n\t\t\t\treturn self.cluster_service.get_by_user(username)\n\n\t\texcept Exception as e:\n\t\t\tself.logger.exception(e)\n\t\t\traise\n\nclass SingleCluster(JsonView):\n\tlogger = logging.getLogger('mliyweb.views.SingleCluster')\n\tcluster_service = ClusterService()\n\n\t@log_enter_exit(logger)\n\tdef get_data(self, context):\n\t\tcluster_id = self.kwargs['pk']\n\n\t\ttry:\n\t\t\tif session_is_okay(self.request.session, \"user_clusters\"):\n\t\t\t\tself.logger.info(\"Updating clusters in database\")\n\t\t\t\treturn self.cluster_service.update_single_cluster(cluster_id)\n\t\t\telse:\n\t\t\t\tself.logger.info(\"Getting clusters from database\")\n\t\t\t\treturn self.cluster_service.get_single_cluster(cluster_id)\n\n\t\texcept Exception as e:\n\t\t\tself.logger.exception(e)\n\t\t\traise\n\n\nclass ChangeClusterState(JsonView):\n\tlog = logging.getLogger('mliyweb.views.ChangeClusterState')\n\tcluster_service = ClusterService()\n\n\t@log_enter_exit(log, log_level=10)\n\tdef get_data(self,context):\n\n\t\tclient = boto3.client('cloudformation', region_name=AWS_REGION)\n\t\tcluster = Cluster.objects.get(cluster_id = self.kwargs['clusterid'])\n\n\t\tclient.delete_stack(StackName=cluster.stack_id)\n\t\tif cluster.current_bill:\n\t\t\tcluster.current_bill.ongoing = False\n\t\t\tcluster.current_bill.end_time = datetime.now(timezone('UTC'))\n\t\t\tcluster.current_bill.save()\n\n\t\tif cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n\t\t\tdeleteDnsEntry(cluster.cluster_id,cluster.master_ip)\n\t\telse:\n\t\t\tdeleteDnsEntry(cluster.cluster_id,cluster.master_ip)\n\n\t\tcluster.state = \"TERMINATED\"\n\t\tcluster.save()\n\n\t\treturn { 'action' : 'terminate', 'status' : 'ok'}", "step-ids": [ 9, 11, 12, 13, 15 ] }
[ 9, 11, 12, 13, 15 ]
# This script allows you to copy all files with a certain extention to a new folder without integrating the sub folders # Created by Maurice de Kleijn Vrije Universiteit Amsterdam Spatial Information laboratory for the datamanagement of the the archaological project Barin Hoyuk # 22062016 Python 2.7 import shutil import os org_GIS = raw_input("provide path to GIS folder in dropbox : eg. C:\Dropbox\Barcin_Hoyuk\AIS_Barcin_Hoyuk\AIS\GIS\\: ") outputfolder = raw_input("provide path to output folder : eg. C:\Temp\: ") ext = raw_input("provide extention type to be copied eg .tif or .jpg :") os.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt') file1 = open(org_GIS + 'tempext.txt', 'r') lines = file1.readlines() for line in lines: ln = line.rstrip('\n') shutil.copy(ln, outputfolder) file1.close() os.system('del ' + org_GIS + 'tempext.txt') raw_input("done!")
normal
{ "blob_id": "778cf8064fa45e3e25a66f2165dcf6885c72fb8a", "index": 634, "step-1": "<mask token>\n", "step-2": "<mask token>\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\n<mask token>\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\nos.system('del ' + org_GIS + 'tempext.txt')\nraw_input('done!')\n", "step-3": "<mask token>\norg_GIS = raw_input(\n 'provide path to GIS folder in dropbox : eg. C:\\\\Dropbox\\\\Barcin_Hoyuk\\\\AIS_Barcin_Hoyuk\\\\AIS\\\\GIS\\\\: '\n )\noutputfolder = raw_input('provide path to output folder : eg. C:\\\\Temp\\\\: ')\next = raw_input('provide extention type to be copied eg .tif or .jpg :')\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\nfile1 = open(org_GIS + 'tempext.txt', 'r')\nlines = file1.readlines()\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\nos.system('del ' + org_GIS + 'tempext.txt')\nraw_input('done!')\n", "step-4": "import shutil\nimport os\norg_GIS = raw_input(\n 'provide path to GIS folder in dropbox : eg. C:\\\\Dropbox\\\\Barcin_Hoyuk\\\\AIS_Barcin_Hoyuk\\\\AIS\\\\GIS\\\\: '\n )\noutputfolder = raw_input('provide path to output folder : eg. C:\\\\Temp\\\\: ')\next = raw_input('provide extention type to be copied eg .tif or .jpg :')\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\nfile1 = open(org_GIS + 'tempext.txt', 'r')\nlines = file1.readlines()\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\nos.system('del ' + org_GIS + 'tempext.txt')\nraw_input('done!')\n", "step-5": "# This script allows you to copy all files with a certain extention to a new folder without integrating the sub folders\n# Created by Maurice de Kleijn Vrije Universiteit Amsterdam Spatial Information laboratory for the datamanagement of the the archaological project Barin Hoyuk\n# 22062016 Python 2.7\n\nimport shutil\nimport os\n\norg_GIS = raw_input(\"provide path to GIS folder in dropbox : eg. C:\\Dropbox\\Barcin_Hoyuk\\AIS_Barcin_Hoyuk\\AIS\\GIS\\\\: \")\noutputfolder = raw_input(\"provide path to output folder : eg. C:\\Temp\\: \")\next = raw_input(\"provide extention type to be copied eg .tif or .jpg :\")\n\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\n\nfile1 = open(org_GIS + 'tempext.txt', 'r')\nlines = file1.readlines()\n\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\n\nos.system('del ' + org_GIS + 'tempext.txt')\n\nraw_input(\"done!\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Differences between Python 2 and Python 3 print "hello world" # become print("hello world") # in Pyton 3 raw_input('What is your name?') # become input('What is your name?') # in Python 3 # the language of Python # Reserved words and as assert break class continue def del elif else except finally for from global if import in is lambda nonlocal not or pass raise return try while with yield # Section 1.2
normal
{ "blob_id": "40471bfcf05ef45fbb070bbb5bfd4c425fe59b1c", "index": 7523, "step-1": "# Differences between Python 2 and Python 3\n\nprint \"hello world\" \n # become \nprint(\"hello world\") # in Pyton 3\n\nraw_input('What is your name?') \n# become\ninput('What is your name?') # in Python 3\n\n\n# the language of Python \n# Reserved words\nand\nas\nassert\nbreak\nclass\ncontinue\ndef\ndel\nelif\nelse\nexcept\nfinally\nfor\nfrom\nglobal\nif\nimport\nin\nis\nlambda\nnonlocal\nnot\nor \npass\nraise\nreturn\ntry\nwhile\nwith\nyield\n\n# Section 1.2 ", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from fastapi import FastAPI from app.router.routes import initRoutes from app.cors.cors import initCors app = FastAPI(debug=True,title="Recipe API") initCors(app) initRoutes(app)
normal
{ "blob_id": "1857d76b8c68c58d2d721de529811a6aeb09fcbb", "index": 5407, "step-1": "<mask token>\n", "step-2": "<mask token>\ninitCors(app)\ninitRoutes(app)\n", "step-3": "<mask token>\napp = FastAPI(debug=True, title='Recipe API')\ninitCors(app)\ninitRoutes(app)\n", "step-4": "from fastapi import FastAPI\nfrom app.router.routes import initRoutes\nfrom app.cors.cors import initCors\napp = FastAPI(debug=True, title='Recipe API')\ninitCors(app)\ninitRoutes(app)\n", "step-5": "from fastapi import FastAPI\nfrom app.router.routes import initRoutes\nfrom app.cors.cors import initCors\n\napp = FastAPI(debug=True,title=\"Recipe API\")\ninitCors(app)\ninitRoutes(app)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
string=input(); string=string.replace("(",""); string=string.replace(")",""); string=list(map(int,string.split(","))); if(1 in string): string.remove(1); mid=[string[0]]; string.remove(string[0]); result=0; tar=0; while(string!=[]): tar=0; length=len(string); i=0 while(i<len(string)): cout=0; count=0 for j in mid: for k in range(2,min(string[i],j)+1): if(string[i]%k==0)&(j%k==0): mid.append(string[i]); string.remove(string[i]); count=1; break; if(count==0): cout+=1; else: break; if(count==0): i+=1; if(cout==len(mid)): tar+=1; if (tar == length)|(string==[]): if (len(mid) > result): result = len(mid); if(string!=[]): mid = [string[0]]; string.remove((string[0])); if(len(mid)>result): reuslt=len(mid); print(result)
normal
{ "blob_id": "6a8cab1fceffa0d70441cc600137417a8b81d7b1", "index": 6897, "step-1": "<mask token>\n", "step-2": "<mask token>\nif 1 in string:\n string.remove(1)\n<mask token>\nstring.remove(string[0])\n<mask token>\nwhile string != []:\n tar = 0\n length = len(string)\n i = 0\n while i < len(string):\n cout = 0\n count = 0\n for j in mid:\n for k in range(2, min(string[i], j) + 1):\n if (string[i] % k == 0) & (j % k == 0):\n mid.append(string[i])\n string.remove(string[i])\n count = 1\n break\n if count == 0:\n cout += 1\n else:\n break\n if count == 0:\n i += 1\n if cout == len(mid):\n tar += 1\n if (tar == length) | (string == []):\n if len(mid) > result:\n result = len(mid)\n if string != []:\n mid = [string[0]]\n string.remove(string[0])\nif len(mid) > result:\n reuslt = len(mid)\nprint(result)\n", "step-3": "string = input()\nstring = string.replace('(', '')\nstring = string.replace(')', '')\nstring = list(map(int, string.split(',')))\nif 1 in string:\n string.remove(1)\nmid = [string[0]]\nstring.remove(string[0])\nresult = 0\ntar = 0\nwhile string != []:\n tar = 0\n length = len(string)\n i = 0\n while i < len(string):\n cout = 0\n count = 0\n for j in mid:\n for k in range(2, min(string[i], j) + 1):\n if (string[i] % k == 0) & (j % k == 0):\n mid.append(string[i])\n string.remove(string[i])\n count = 1\n break\n if count == 0:\n cout += 1\n else:\n break\n if count == 0:\n i += 1\n if cout == len(mid):\n tar += 1\n if (tar == length) | (string == []):\n if len(mid) > result:\n result = len(mid)\n if string != []:\n mid = [string[0]]\n string.remove(string[0])\nif len(mid) > result:\n reuslt = len(mid)\nprint(result)\n", "step-4": "string=input();\nstring=string.replace(\"(\",\"\");\nstring=string.replace(\")\",\"\");\nstring=list(map(int,string.split(\",\")));\nif(1 in string):\n string.remove(1);\nmid=[string[0]];\nstring.remove(string[0]);\nresult=0;\ntar=0;\nwhile(string!=[]):\n tar=0;\n length=len(string);\n i=0\n while(i<len(string)):\n cout=0;\n count=0\n for j in mid:\n for k in range(2,min(string[i],j)+1):\n if(string[i]%k==0)&(j%k==0):\n mid.append(string[i]);\n string.remove(string[i]);\n count=1;\n break;\n if(count==0):\n cout+=1;\n else:\n break;\n if(count==0):\n i+=1;\n if(cout==len(mid)):\n tar+=1;\n if (tar == length)|(string==[]):\n if (len(mid) > result):\n result = len(mid);\n if(string!=[]):\n mid = [string[0]];\n string.remove((string[0]));\nif(len(mid)>result):\n reuslt=len(mid);\nprint(result)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- """ This file is part of pyCMBS. (c) 2012- Alexander Loew For COPYING and LICENSE details, please refer to the LICENSE file """ import unittest from pycmbs import data4D class TestPycmbsData4D(unittest.TestCase): def setUp(self): pass def test_DummyTest(self): pass if __name__ == "__main__": unittest.main()
normal
{ "blob_id": "87562ce2a957de3fa2eb84cbb0de18c6ce264c6b", "index": 7676, "step-1": "<mask token>\n\n\nclass TestPycmbsData4D(unittest.TestCase):\n\n def setUp(self):\n pass\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TestPycmbsData4D(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_DummyTest(self):\n pass\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TestPycmbsData4D(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_DummyTest(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "<mask token>\nimport unittest\nfrom pycmbs import data4D\n\n\nclass TestPycmbsData4D(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_DummyTest(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nThis file is part of pyCMBS.\n(c) 2012- Alexander Loew\nFor COPYING and LICENSE details, please refer to the LICENSE file\n\"\"\"\n\nimport unittest\nfrom pycmbs import data4D\n\nclass TestPycmbsData4D(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_DummyTest(self):\n pass\n\nif __name__ == \"__main__\":\n unittest.main()\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
#!/usr/bin/env python from collections import defaultdict from cluster.common import Cluster from cluster.tools import print_table def check_status(args): """ Print node details :param args: Arguments from argparse :type args: argparse.Namespace """ cluster = Cluster(jobs_qstat=True, nodes=True, link=True) nodes = [] if args.filter_states: cluster.filter_node_states(set(args.filter_states.lower().split(','))) for node in cluster.nodes: nodes.append([ node.name, node.states, node.load, "%3d/%3d (%3d%%)" % ( node.cpu_res, node.cpu_all, 1. * node.cpu_res / node.cpu_all * 100.) if node.cpu_all else 'N/A', # Cores "%5.1f/%5.1fG (%3d%%)" % ( node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.) if node.mem_all else 'N/A', # Memory ''.join(('*' * node.cpu_res) + ('-' * (node.cpu_all - node.cpu_res))) ]) if args.show_job_owners: nodes[-1][-1] = '' empty = [''] * 5 users = defaultdict(list) for job in node.jobs_qstat: users[job.user].append(job) for orphan in node.orphans: users['ORPHANS'].append(orphan) for idx, uitem in enumerate(users.items()): u, jobs = uitem column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in jobs])) if idx: nodes.append(empty + [column_data]) else: nodes[-1][-1] = column_data # Printing bits print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory', 'Jobs'], nodes) def main(): """ Execute main program """ # noinspection PyCompatibility import argparse parser = argparse.ArgumentParser(description='Check nodes status.') parser.add_argument('-o', '--show-job-owners', action='store_true', help='List jobs running on nodes') parser.add_argument('-s', '--filter-states', help='Display only nodes in FILTER_STATES (comma separated).') args = parser.parse_args() check_status(args) if __name__ == '__main__': main()
normal
{ "blob_id": "381b59ab9fa85561932a9bfb9ab8cef635901a35", "index": 7249, "step-1": "<mask token>\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n for node in cluster.nodes:\n nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %\n (node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *\n 100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0\n ) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *\n (node.cpu_all - node.cpu_res))])\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in\n jobs]))\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',\n 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n for node in cluster.nodes:\n nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %\n (node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *\n 100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0\n ) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *\n (node.cpu_all - node.cpu_res))])\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in\n jobs]))\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',\n 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "from collections import defaultdict\nfrom cluster.common import Cluster\nfrom cluster.tools import print_table\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n for node in cluster.nodes:\n nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %\n (node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *\n 100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0\n ) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *\n (node.cpu_all - node.cpu_res))])\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in\n jobs]))\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',\n 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/env python\nfrom collections import defaultdict\n\nfrom cluster.common import Cluster\nfrom cluster.tools import print_table\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n\n for node in cluster.nodes:\n nodes.append([\n node.name,\n node.states,\n node.load,\n \"%3d/%3d (%3d%%)\" % (\n node.cpu_res, node.cpu_all, 1. * node.cpu_res / node.cpu_all * 100.) if node.cpu_all else 'N/A', # Cores\n \"%5.1f/%5.1fG (%3d%%)\" % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.) if node.mem_all else 'N/A', # Memory\n ''.join(('*' * node.cpu_res) + ('-' * (node.cpu_all - node.cpu_res)))\n ])\n\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in jobs]))\n\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n\n # Printing bits\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory', 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n # noinspection PyCompatibility\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true', help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help='Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n\n check_status(args)\n\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import json import sys with open(sys.argv[1], 'r') as f: x = json.load(f) with open('my_wire_to_quartus_wire.json', 'r') as f: wirenamemap = json.load(f) print("----- There are {} muxes in the database".format(len(x))) print("----- There are {} routing pairs in the database".format(sum((len(v) for k, v in x.items())))) def bits2str(bits): ret = "" for row in bits: rowstr = "" for bit in row: rowstr += "1" if bit else "0" ret += rowstr + '\n' return ret def parse_xyi(inp): xpos = inp.find('X') ypos = inp.find('Y') ipos = inp.find('I') assert xpos >= 0 assert ypos > xpos assert ipos > ypos return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:])) def parse_xysi(inp): xpos = inp.find('X') ypos = inp.find('Y') spos = inp.find('S') ipos = inp.find('I') assert xpos >= 0 assert ypos > xpos assert spos > ypos assert ipos > spos sval = int(inp[spos + 1:ipos]) assert sval == 0 return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:])) def anybits(bits): for y in bits: for x in y: if not x: return True return False def decodemux(bits): A = not bits[0][0] B = not bits[0][1] C = not bits[0][2] D = not bits[0][3] E = not bits[1][0] F = not bits[1][1] G = not bits[1][2] H = not bits[1][3] assert G + C + D + H == 1 assert A + B + E + F == 1 or (A + B + E + F == 0 and G) if G: assert A + B + C + D + E + F + H == 0 if G: return 0 if C: if A: return 1 if B: return 2 if E: return 3 if F: return 4 if D: if A: return 5 if B: return 6 if E: return 7 if F: return 8 if H: if A: return 9 if B: return 10 if E: return 11 if F: return 12 def flipv(muxbits): return muxbits[::-1] def fliph(muxbits): return [x[::-1] for x in muxbits] # # print(x) # uniq_r_muxes = [] # for _ in range(8): # uniq_r_muxes.append(set()) # for X in range(2, 8): # for Y in range(1, 5): # for N in range(8): # mux = "R:X{}Y{}I{}".format(X, Y, N) # muxvals = x[mux] # # print(muxvals) # for muxsrc, muxbits in muxvals.items(): # uniq_r_muxes[N].add(bits2str(muxbits)) # # print(uniq_r_muxes) # for N in range(8): # print("~~~~~ R{} ~~~~~".format(N)) # for xx in sorted(list(uniq_r_muxes[N])): # print(xx) # # print(x) # uniq_l_muxes = [] # for _ in range(8): # uniq_l_muxes.append(set()) # # print(x) # uniq_l2_muxes = [] # for _ in range(8): # uniq_l2_muxes.append(set()) # for X in [8]: # for Y in range(1, 5): # for N in range(8): # mux = "L2:X{}Y{}I{}".format(X, Y, N) # muxvals = x[mux] # # print(muxvals) # for muxsrc, muxbits in muxvals.items(): # uniq_l2_muxes[N].add(bits2str(muxbits)) # # print(uniq_l2_muxes) # for N in range(8): # print("~~~~~ L2:{} ~~~~~".format(N)) # for xx in sorted(list(uniq_l2_muxes[N])): # print(xx) # # print(x) # uniq_l_muxes = [] # for _ in range(8): # uniq_l_muxes.append(set()) # for X in range(3, 9): # for Y in range(1, 5): # for N in range(8): # mux = "L:X{}Y{}I{}".format(X, Y, N) # muxvals = x[mux] # # print(muxvals) # for muxsrc, muxbits in muxvals.items(): # uniq_l_muxes[N].add(bits2str(muxbits)) # # print(uniq_l_muxes) # for N in range(8): # print("~~~~~ L{} ~~~~~".format(N)) # for xx in sorted(list(uniq_l_muxes[N])): # print(xx) # uniq_u_muxes = [] # for _ in range(7): # uniq_u_muxes.append(set()) # for X in [8]:#range(2, 8): # for Y in range(1, 5): # for N in range(7): # mux = "U:X{}Y{}I{}".format(X, Y, N) # muxvals = x[mux] # # print(muxvals) # for muxsrc, muxbits in muxvals.items(): # uniq_u_muxes[N].add(bits2str(muxbits)) # # print(uniq_r_muxes) # for N in range(7): # print("~~~~~ U{} ~~~~~".format(N)) # for xx in sorted(list(uniq_u_muxes[N])): # print(xx) # uniq_d_muxes = [] # for _ in range(7): # uniq_d_muxes.append(set()) # for X in [8]:#range(2, 8): # for Y in range(1, 5): # for N in range(7): # mux = "D:X{}Y{}I{}".format(X, Y, N) # muxvals = x[mux] # # print(muxvals) # for muxsrc, muxbits in muxvals.items(): # uniq_d_muxes[N].add(bits2str(muxbits)) # # print(uniq_r_muxes) # for N in range(7): # print("~~~~~ D{} ~~~~~".format(N)) # for xx in sorted(list(uniq_d_muxes[N])): # print(xx) # uniq_l_li_muxes = [] # for _ in range(18): # uniq_l_li_muxes.append(set()) # for Y in range(1, 5): # for N in range(18): # mux = "LOCAL_INTERCONNECT:X1Y{}S0I{}".format(Y, N) # muxvals = x[mux] # # print(muxvals) # for muxsrc, muxbits in muxvals.items(): # uniq_l_li_muxes[N].add(bits2str(muxbits)) # # print(uniq_r_muxes) # for N in range(18): # print("~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~".format(N)) # for xx in sorted(list(uniq_l_li_muxes[N])): # print(xx) # uniq_li_muxes = [] # for _ in range(26): # uniq_li_muxes.append(set()) # for X in range(2, 8): # for Y in range(1, 5): # for N in range(26): # mux = "LOCAL_INTERCONNECT:X{}Y{}S0I{}".format(X, Y, N) # muxvals = x[mux] # # print(muxvals) # for muxsrc, muxbits in muxvals.items(): # uniq_li_muxes[N].add(bits2str(muxbits)) # # print(uniq_r_muxes) # for N in range(26): # print("~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~".format(N)) # for xx in sorted(list(uniq_li_muxes[N])): # print(xx) # uniq_top_li_muxes = [] # for _ in range(10): # uniq_top_li_muxes.append(set()) # for X in range(2, 8): # for N in range(10): # mux = "LOCAL_INTERCONNECT:X{}Y5S0I{}".format(X, N) # muxvals = x[mux] # # print(muxvals) # for muxsrc, muxbits in muxvals.items(): # uniq_top_li_muxes[N].add(bits2str(muxbits)) # # print(uniq_r_muxes) # for N in range(10): # print("~~~~~ LOCAL_INTERCONNECT:Y5 {} ~~~~~".format(N)) # for xx in sorted(list(uniq_top_li_muxes[N])): # print(xx) LABELS = [ "|G|C|D|H|A|B|E|F|", "|0| | | | | | | | ", "| |0| | |0| | | | ", "| |0| | | |0| | | ", "| |0| | | | |0| | ", "| |0| | | | | |0| ", "| | |0| |0| | | | ", "| | |0| | |0| | | ", "| | |0| | | |0| | ", "| | |0| | | | |0| ", "| | | |0|0| | | | ", "| | | |0| |0| | | ", "| | | |0| | |0| | ", "| | | |0| | | |0| ", ] for dst, srcs in x.items(): srcs_decoded = [None] * 13 is_tb_io = False for src, muxbits in srcs.items(): if dst.startswith("R:"): _, _, I = parse_xyi(dst) if I >= 4: muxbits = flipv(muxbits) elif dst.startswith("L:") or dst.startswith("L2"): _, _, I = parse_xyi(dst) muxbits = fliph(muxbits) if I >= 4: muxbits = flipv(muxbits) elif dst.startswith("U:"): X, _, I = parse_xyi(dst) if X == 8: muxbits = fliph(muxbits) if I == 0 and X != 8: muxbits = fliph(muxbits) if I >= 4: muxbits = flipv(muxbits) elif dst.startswith("D:"): X, _, I = parse_xyi(dst) if X == 8: muxbits = fliph(muxbits) if I == 6 and X != 8: muxbits = fliph(muxbits) if I >= 3: muxbits = flipv(muxbits) elif dst.startswith("LOCAL_INTERCONNECT:"): X, Y, I = parse_xysi(dst[19:]) if X == 1: muxbits = fliph(muxbits) if I > 8: muxbits = flipv(muxbits) elif X == 8: if I > 8: muxbits = flipv(muxbits) else: if Y == 0 or Y == 5: is_tb_io = True if Y == 0: muxbits = flipv(muxbits) if I < 5: muxbits = fliph(muxbits) else: if I in range(0, 5) or I in range(13, 18): muxbits = fliph(muxbits) if I >= 13: muxbits = flipv(muxbits) else: continue muxidx = decodemux(muxbits) if srcs_decoded[muxidx] is not None: print(dst, src, srcs_decoded[muxidx]) assert srcs_decoded[muxidx] is None srcs_decoded[muxidx] = src print("~~~~~ {} ~~~~~".format(dst)) print(LABELS[0]) if is_tb_io: assert srcs_decoded[0] is None for i in range(len(srcs_decoded)): if is_tb_io and i == 0: continue print(LABELS[i + 1], end='') src = srcs_decoded[i] if src is None: print("???") else: print(src, end='') if src in wirenamemap: print(" ({})".format(wirenamemap[src])) else: print() # if dst.startswith("LOCAL_INTERCONNECT:"): # continue # print(dst, src) # if dst.startswith("L:"): # _, _, I = parse_xyi(dst) # muxbits = fliph(muxbits) # if I >= 4: # muxbits = flipv(muxbits) # if dst.startswith("R:"): # _, _, I = parse_xyi(dst) # if I >= 4: # muxbits = flipv(muxbits) # if dst.startswith("D:"): # X, _, I = parse_xyi(dst) # if I >= 3: # muxbits = flipv(muxbits) # if I == 6: # muxbits = fliph(muxbits) # if X == 8: # muxbits = fliph(muxbits) # if dst.startswith("U:"): # X, _, I = parse_xyi(dst) # if I >= 4: # muxbits = flipv(muxbits) # if I == 0: # muxbits = fliph(muxbits) # if X == 8: # muxbits = fliph(muxbits) # if dst.startswith("L2:"): # _, _, I = parse_xyi(dst) # if I >= 4: # muxbits = flipv(muxbits) # decodemux(muxbits)
normal
{ "blob_id": "95163a28a35cc88240d9d6edc2e9b416e5493909", "index": 6021, "step-1": "<mask token>\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\n<mask token>\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\n<mask token>\n", "step-3": "<mask token>\nwith open(sys.argv[1], 'r') as f:\n x = json.load(f)\nwith open('my_wire_to_quartus_wire.json', 'r') as f:\n wirenamemap = json.load(f)\nprint('----- There are {} muxes in the database'.format(len(x)))\nprint('----- There are {} routing pairs in the database'.format(sum(len(v) for\n k, v in x.items())))\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\nLABELS = ['|G|C|D|H|A|B|E|F|', '|0| | | | | | | | ',\n '| |0| | |0| | | | ', '| |0| | | |0| | | ',\n '| |0| | | | |0| | ', '| |0| | | | | |0| ',\n '| | |0| |0| | | | ', '| | |0| | |0| | | ',\n '| | |0| | | |0| | ', '| | |0| | | | |0| ',\n '| | | |0|0| | | | ', '| | | |0| |0| | | ',\n '| | | |0| | |0| | ', '| | | |0| | | |0| ']\nfor dst, srcs in x.items():\n srcs_decoded = [None] * 13\n is_tb_io = False\n for src, muxbits in srcs.items():\n if dst.startswith('R:'):\n _, _, I = parse_xyi(dst)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('L:') or dst.startswith('L2'):\n _, _, I = parse_xyi(dst)\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('U:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 0 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('D:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 6 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 3:\n muxbits = flipv(muxbits)\n elif dst.startswith('LOCAL_INTERCONNECT:'):\n X, Y, I = parse_xysi(dst[19:])\n if X == 1:\n muxbits = fliph(muxbits)\n if I > 8:\n muxbits = flipv(muxbits)\n elif X == 8:\n if I > 8:\n muxbits = flipv(muxbits)\n elif Y == 0 or Y == 5:\n is_tb_io = True\n if Y == 0:\n muxbits = flipv(muxbits)\n if I < 5:\n muxbits = fliph(muxbits)\n else:\n if I in range(0, 5) or I in range(13, 18):\n muxbits = fliph(muxbits)\n if I >= 13:\n muxbits = flipv(muxbits)\n else:\n continue\n muxidx = decodemux(muxbits)\n if srcs_decoded[muxidx] is not None:\n print(dst, src, srcs_decoded[muxidx])\n assert srcs_decoded[muxidx] is None\n srcs_decoded[muxidx] = src\n print('~~~~~ {} ~~~~~'.format(dst))\n print(LABELS[0])\n if is_tb_io:\n assert srcs_decoded[0] is None\n for i in range(len(srcs_decoded)):\n if is_tb_io and i == 0:\n continue\n print(LABELS[i + 1], end='')\n src = srcs_decoded[i]\n if src is None:\n print('???')\n else:\n print(src, end='')\n if src in wirenamemap:\n print(' ({})'.format(wirenamemap[src]))\n else:\n print()\n", "step-4": "import json\nimport sys\nwith open(sys.argv[1], 'r') as f:\n x = json.load(f)\nwith open('my_wire_to_quartus_wire.json', 'r') as f:\n wirenamemap = json.load(f)\nprint('----- There are {} muxes in the database'.format(len(x)))\nprint('----- There are {} routing pairs in the database'.format(sum(len(v) for\n k, v in x.items())))\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\nLABELS = ['|G|C|D|H|A|B|E|F|', '|0| | | | | | | | ',\n '| |0| | |0| | | | ', '| |0| | | |0| | | ',\n '| |0| | | | |0| | ', '| |0| | | | | |0| ',\n '| | |0| |0| | | | ', '| | |0| | |0| | | ',\n '| | |0| | | |0| | ', '| | |0| | | | |0| ',\n '| | | |0|0| | | | ', '| | | |0| |0| | | ',\n '| | | |0| | |0| | ', '| | | |0| | | |0| ']\nfor dst, srcs in x.items():\n srcs_decoded = [None] * 13\n is_tb_io = False\n for src, muxbits in srcs.items():\n if dst.startswith('R:'):\n _, _, I = parse_xyi(dst)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('L:') or dst.startswith('L2'):\n _, _, I = parse_xyi(dst)\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('U:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 0 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('D:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 6 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 3:\n muxbits = flipv(muxbits)\n elif dst.startswith('LOCAL_INTERCONNECT:'):\n X, Y, I = parse_xysi(dst[19:])\n if X == 1:\n muxbits = fliph(muxbits)\n if I > 8:\n muxbits = flipv(muxbits)\n elif X == 8:\n if I > 8:\n muxbits = flipv(muxbits)\n elif Y == 0 or Y == 5:\n is_tb_io = True\n if Y == 0:\n muxbits = flipv(muxbits)\n if I < 5:\n muxbits = fliph(muxbits)\n else:\n if I in range(0, 5) or I in range(13, 18):\n muxbits = fliph(muxbits)\n if I >= 13:\n muxbits = flipv(muxbits)\n else:\n continue\n muxidx = decodemux(muxbits)\n if srcs_decoded[muxidx] is not None:\n print(dst, src, srcs_decoded[muxidx])\n assert srcs_decoded[muxidx] is None\n srcs_decoded[muxidx] = src\n print('~~~~~ {} ~~~~~'.format(dst))\n print(LABELS[0])\n if is_tb_io:\n assert srcs_decoded[0] is None\n for i in range(len(srcs_decoded)):\n if is_tb_io and i == 0:\n continue\n print(LABELS[i + 1], end='')\n src = srcs_decoded[i]\n if src is None:\n print('???')\n else:\n print(src, end='')\n if src in wirenamemap:\n print(' ({})'.format(wirenamemap[src]))\n else:\n print()\n", "step-5": "import json\nimport sys\n\nwith open(sys.argv[1], 'r') as f:\n x = json.load(f)\nwith open('my_wire_to_quartus_wire.json', 'r') as f:\n wirenamemap = json.load(f)\n\nprint(\"----- There are {} muxes in the database\".format(len(x)))\nprint(\"----- There are {} routing pairs in the database\".format(sum((len(v) for k, v in x.items()))))\n\ndef bits2str(bits):\n ret = \"\"\n for row in bits:\n rowstr = \"\"\n for bit in row:\n rowstr += \"1\" if bit else \"0\"\n ret += rowstr + '\\n'\n return ret\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n\n return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]))\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n\n return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]))\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or (A + B + E + F == 0 and G)\n if G:\n assert A + B + C + D + E + F + H == 0\n\n if G:\n return 0\n if C:\n if A: return 1\n if B: return 2\n if E: return 3\n if F: return 4\n if D:\n if A: return 5\n if B: return 6\n if E: return 7\n if F: return 8\n if H:\n if A: return 9\n if B: return 10\n if E: return 11\n if F: return 12\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n# # print(x)\n# uniq_r_muxes = []\n# for _ in range(8):\n# uniq_r_muxes.append(set())\n\n# for X in range(2, 8):\n# for Y in range(1, 5):\n# for N in range(8):\n# mux = \"R:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_r_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(8):\n# print(\"~~~~~ R{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_r_muxes[N])):\n# print(xx)\n\n# # print(x)\n# uniq_l_muxes = []\n# for _ in range(8):\n# uniq_l_muxes.append(set())\n\n# # print(x)\n# uniq_l2_muxes = []\n# for _ in range(8):\n# uniq_l2_muxes.append(set())\n\n# for X in [8]:\n# for Y in range(1, 5):\n# for N in range(8):\n# mux = \"L2:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_l2_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_l2_muxes)\n# for N in range(8):\n# print(\"~~~~~ L2:{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_l2_muxes[N])):\n# print(xx)\n\n# # print(x)\n# uniq_l_muxes = []\n# for _ in range(8):\n# uniq_l_muxes.append(set())\n\n# for X in range(3, 9):\n# for Y in range(1, 5):\n# for N in range(8):\n# mux = \"L:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_l_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_l_muxes)\n# for N in range(8):\n# print(\"~~~~~ L{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_l_muxes[N])):\n# print(xx)\n\n# uniq_u_muxes = []\n# for _ in range(7):\n# uniq_u_muxes.append(set())\n\n# for X in [8]:#range(2, 8):\n# for Y in range(1, 5):\n# for N in range(7):\n# mux = \"U:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_u_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(7):\n# print(\"~~~~~ U{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_u_muxes[N])):\n# print(xx)\n\n# uniq_d_muxes = []\n# for _ in range(7):\n# uniq_d_muxes.append(set())\n\n# for X in [8]:#range(2, 8):\n# for Y in range(1, 5):\n# for N in range(7):\n# mux = \"D:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_d_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(7):\n# print(\"~~~~~ D{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_d_muxes[N])):\n# print(xx)\n\n# uniq_l_li_muxes = []\n# for _ in range(18):\n# uniq_l_li_muxes.append(set())\n\n# for Y in range(1, 5):\n# for N in range(18):\n# mux = \"LOCAL_INTERCONNECT:X1Y{}S0I{}\".format(Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_l_li_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(18):\n# print(\"~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_l_li_muxes[N])):\n# print(xx)\n\n# uniq_li_muxes = []\n# for _ in range(26):\n# uniq_li_muxes.append(set())\n\n# for X in range(2, 8):\n# for Y in range(1, 5):\n# for N in range(26):\n# mux = \"LOCAL_INTERCONNECT:X{}Y{}S0I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_li_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(26):\n# print(\"~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_li_muxes[N])):\n# print(xx)\n\n# uniq_top_li_muxes = []\n# for _ in range(10):\n# uniq_top_li_muxes.append(set())\n\n# for X in range(2, 8):\n# for N in range(10):\n# mux = \"LOCAL_INTERCONNECT:X{}Y5S0I{}\".format(X, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_top_li_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(10):\n# print(\"~~~~~ LOCAL_INTERCONNECT:Y5 {} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_top_li_muxes[N])):\n# print(xx)\n\nLABELS = [\n \"|G|C|D|H|A|B|E|F|\",\n \"|0| | | | | | | | \",\n \"| |0| | |0| | | | \",\n \"| |0| | | |0| | | \",\n \"| |0| | | | |0| | \",\n \"| |0| | | | | |0| \",\n \"| | |0| |0| | | | \",\n \"| | |0| | |0| | | \",\n \"| | |0| | | |0| | \",\n \"| | |0| | | | |0| \",\n \"| | | |0|0| | | | \",\n \"| | | |0| |0| | | \",\n \"| | | |0| | |0| | \",\n \"| | | |0| | | |0| \",\n]\n\nfor dst, srcs in x.items():\n srcs_decoded = [None] * 13\n is_tb_io = False\n for src, muxbits in srcs.items():\n if dst.startswith(\"R:\"):\n _, _, I = parse_xyi(dst)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"L:\") or dst.startswith(\"L2\"):\n _, _, I = parse_xyi(dst)\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"U:\"):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n\n if I == 0 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"D:\"):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n\n if I == 6 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 3:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"LOCAL_INTERCONNECT:\"):\n X, Y, I = parse_xysi(dst[19:])\n if X == 1:\n muxbits = fliph(muxbits)\n if I > 8:\n muxbits = flipv(muxbits)\n elif X == 8:\n if I > 8:\n muxbits = flipv(muxbits)\n else:\n if Y == 0 or Y == 5:\n is_tb_io = True\n if Y == 0:\n muxbits = flipv(muxbits)\n if I < 5:\n muxbits = fliph(muxbits)\n else:\n if I in range(0, 5) or I in range(13, 18):\n muxbits = fliph(muxbits)\n if I >= 13:\n muxbits = flipv(muxbits)\n else:\n continue\n\n muxidx = decodemux(muxbits)\n\n if srcs_decoded[muxidx] is not None:\n print(dst, src, srcs_decoded[muxidx])\n assert srcs_decoded[muxidx] is None\n srcs_decoded[muxidx] = src\n\n print(\"~~~~~ {} ~~~~~\".format(dst))\n print(LABELS[0])\n if is_tb_io:\n assert srcs_decoded[0] is None\n for i in range(len(srcs_decoded)):\n if is_tb_io and i == 0:\n continue\n print(LABELS[i + 1], end='')\n src = srcs_decoded[i]\n if src is None:\n print(\"???\")\n else:\n print(src, end='')\n if src in wirenamemap:\n print(\" ({})\".format(wirenamemap[src]))\n else:\n print()\n\n # if dst.startswith(\"LOCAL_INTERCONNECT:\"):\n # continue\n\n # print(dst, src)\n\n # if dst.startswith(\"L:\"):\n # _, _, I = parse_xyi(dst)\n # muxbits = fliph(muxbits)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n # if dst.startswith(\"R:\"):\n # _, _, I = parse_xyi(dst)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n # if dst.startswith(\"D:\"):\n # X, _, I = parse_xyi(dst)\n # if I >= 3:\n # muxbits = flipv(muxbits)\n # if I == 6:\n # muxbits = fliph(muxbits)\n # if X == 8:\n # muxbits = fliph(muxbits)\n # if dst.startswith(\"U:\"):\n # X, _, I = parse_xyi(dst)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n # if I == 0:\n # muxbits = fliph(muxbits)\n # if X == 8:\n # muxbits = fliph(muxbits)\n # if dst.startswith(\"L2:\"):\n # _, _, I = parse_xyi(dst)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n\n # decodemux(muxbits)\n", "step-ids": [ 6, 7, 9, 10, 11 ] }
[ 6, 7, 9, 10, 11 ]
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def isBalanced(self, root: TreeNode) -> bool: self.mem = dict() if root is None: return True leftH = self.getHeight(root.left) rightH = self.getHeight(root.right) return ( abs(leftH-rightH) <= 1 and self.isBalanced(root.left) and self.isBalanced(root.right) ) def getHeight(self, node): if node in self.mem: return self.mem[node] if node is None: return 0 h = max(self.getHeight(node.left), self.getHeight(node.right)) + 1 self.mem[node] = h return h
normal
{ "blob_id": "9e98a361ef20049cba488b86ad06eb92b3d29d11", "index": 3584, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n <mask token>\n", "step-3": "class Solution:\n\n def isBalanced(self, root: TreeNode) ->bool:\n self.mem = dict()\n if root is None:\n return True\n leftH = self.getHeight(root.left)\n rightH = self.getHeight(root.right)\n return abs(leftH - rightH) <= 1 and self.isBalanced(root.left\n ) and self.isBalanced(root.right)\n <mask token>\n", "step-4": "class Solution:\n\n def isBalanced(self, root: TreeNode) ->bool:\n self.mem = dict()\n if root is None:\n return True\n leftH = self.getHeight(root.left)\n rightH = self.getHeight(root.right)\n return abs(leftH - rightH) <= 1 and self.isBalanced(root.left\n ) and self.isBalanced(root.right)\n\n def getHeight(self, node):\n if node in self.mem:\n return self.mem[node]\n if node is None:\n return 0\n h = max(self.getHeight(node.left), self.getHeight(node.right)) + 1\n self.mem[node] = h\n return h\n", "step-5": "# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isBalanced(self, root: TreeNode) -> bool:\n self.mem = dict()\n if root is None:\n return True\n \n leftH = self.getHeight(root.left)\n rightH = self.getHeight(root.right)\n \n return (\n abs(leftH-rightH) <= 1 and\n self.isBalanced(root.left) and\n self.isBalanced(root.right)\n )\n \n def getHeight(self, node):\n if node in self.mem:\n return self.mem[node]\n if node is None:\n return 0\n \n h = max(self.getHeight(node.left), self.getHeight(node.right)) + 1\n self.mem[node] = h\n return h\n ", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- """ ------------------------------------------------- # @Project :experiment9 # @File :text1 # @Date :2020/10/28 09:13 # @Author :施嘉伟 # @Email :[email protected] # @Software :PyCharm ------------------------------------------------- """ import urllib.request # 发出请求,得到响应 response=urllib.request.urlopen("http://www.gengdan.cn/") print(response.geturl()) html = response.read().decode("UTF-8") with open("bgdGW.html",'w',encoding="utf-8")as fp: fp.write(html) print(response.geturl())
normal
{ "blob_id": "b186ae7a48afbb70edf3be0d9697deed4f31e542", "index": 2258, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(response.geturl())\n<mask token>\nwith open('bgdGW.html', 'w', encoding='utf-8') as fp:\n fp.write(html)\nprint(response.geturl())\n", "step-3": "<mask token>\nresponse = urllib.request.urlopen('http://www.gengdan.cn/')\nprint(response.geturl())\nhtml = response.read().decode('UTF-8')\nwith open('bgdGW.html', 'w', encoding='utf-8') as fp:\n fp.write(html)\nprint(response.geturl())\n", "step-4": "<mask token>\nimport urllib.request\nresponse = urllib.request.urlopen('http://www.gengdan.cn/')\nprint(response.geturl())\nhtml = response.read().decode('UTF-8')\nwith open('bgdGW.html', 'w', encoding='utf-8') as fp:\n fp.write(html)\nprint(response.geturl())\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n# @Project :experiment9\n# @File :text1\n# @Date :2020/10/28 09:13\n# @Author :施嘉伟\n# @Email :[email protected]\n# @Software :PyCharm\n-------------------------------------------------\n\"\"\"\nimport urllib.request\n# 发出请求,得到响应\nresponse=urllib.request.urlopen(\"http://www.gengdan.cn/\")\nprint(response.geturl())\nhtml = response.read().decode(\"UTF-8\")\nwith open(\"bgdGW.html\",'w',encoding=\"utf-8\")as fp:\n fp.write(html)\nprint(response.geturl())\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
def filter_long_words(word_lng, words_list): return [word for word in words_list if len(word) > word_lng] assert filter_long_words(5, ['piwo', 'wino', 'czasopisma', 'ubrania', 'napoje'] ) == ['czasopisma', 'ubrania', 'napoje']
normal
{ "blob_id": "e221b840239b6e9af735238760fd1157f333c1a4", "index": 9014, "step-1": "<mask token>\n", "step-2": "def filter_long_words(word_lng, words_list):\n return [word for word in words_list if len(word) > word_lng]\n\n\n<mask token>\n", "step-3": "def filter_long_words(word_lng, words_list):\n return [word for word in words_list if len(word) > word_lng]\n\n\nassert filter_long_words(5, ['piwo', 'wino', 'czasopisma', 'ubrania', 'napoje']\n ) == ['czasopisma', 'ubrania', 'napoje']\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# *Using Min & Max Exercise def extremes(nums): return (max(nums), min(nums))
normal
{ "blob_id": "0577c274672bac333500535f21f568ade62100c7", "index": 3580, "step-1": "<mask token>\n", "step-2": "def extremes(nums):\n return max(nums), min(nums)\n", "step-3": "\n# *Using Min & Max Exercise\ndef extremes(nums):\n return (max(nums), min(nums))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# Generated by Django 2.2.2 on 2021-01-23 04:11 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('task', '0022_taskrecycle_create_date'), ] operations = [ migrations.RemoveField( model_name='ansibleextravars', name='playbook', ), migrations.RemoveField( model_name='ansibleplaybook', name='project', ), migrations.DeleteModel( name='CrontabTask', ), migrations.DeleteModel( name='TaskHistory', ), migrations.DeleteModel( name='TaskRecycle', ), migrations.RemoveField( model_name='taskscript', name='project', ), migrations.DeleteModel( name='AnsibleExtravars', ), migrations.DeleteModel( name='AnsiblePlaybook', ), migrations.DeleteModel( name='AnsibleProject', ), migrations.DeleteModel( name='TaskProject', ), migrations.DeleteModel( name='TaskScript', ), ]
normal
{ "blob_id": "d5beff74e3746c77cbaf6b8233b822ed1a86701e", "index": 316, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('task', '0022_taskrecycle_create_date')]\n operations = [migrations.RemoveField(model_name='ansibleextravars',\n name='playbook'), migrations.RemoveField(model_name=\n 'ansibleplaybook', name='project'), migrations.DeleteModel(name=\n 'CrontabTask'), migrations.DeleteModel(name='TaskHistory'),\n migrations.DeleteModel(name='TaskRecycle'), migrations.RemoveField(\n model_name='taskscript', name='project'), migrations.DeleteModel(\n name='AnsibleExtravars'), migrations.DeleteModel(name=\n 'AnsiblePlaybook'), migrations.DeleteModel(name='AnsibleProject'),\n migrations.DeleteModel(name='TaskProject'), migrations.DeleteModel(\n name='TaskScript')]\n", "step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('task', '0022_taskrecycle_create_date')]\n operations = [migrations.RemoveField(model_name='ansibleextravars',\n name='playbook'), migrations.RemoveField(model_name=\n 'ansibleplaybook', name='project'), migrations.DeleteModel(name=\n 'CrontabTask'), migrations.DeleteModel(name='TaskHistory'),\n migrations.DeleteModel(name='TaskRecycle'), migrations.RemoveField(\n model_name='taskscript', name='project'), migrations.DeleteModel(\n name='AnsibleExtravars'), migrations.DeleteModel(name=\n 'AnsiblePlaybook'), migrations.DeleteModel(name='AnsibleProject'),\n migrations.DeleteModel(name='TaskProject'), migrations.DeleteModel(\n name='TaskScript')]\n", "step-5": "# Generated by Django 2.2.2 on 2021-01-23 04:11\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('task', '0022_taskrecycle_create_date'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='ansibleextravars',\n name='playbook',\n ),\n migrations.RemoveField(\n model_name='ansibleplaybook',\n name='project',\n ),\n migrations.DeleteModel(\n name='CrontabTask',\n ),\n migrations.DeleteModel(\n name='TaskHistory',\n ),\n migrations.DeleteModel(\n name='TaskRecycle',\n ),\n migrations.RemoveField(\n model_name='taskscript',\n name='project',\n ),\n migrations.DeleteModel(\n name='AnsibleExtravars',\n ),\n migrations.DeleteModel(\n name='AnsiblePlaybook',\n ),\n migrations.DeleteModel(\n name='AnsibleProject',\n ),\n migrations.DeleteModel(\n name='TaskProject',\n ),\n migrations.DeleteModel(\n name='TaskScript',\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import urlparse import twitter import oauth2 as oauth import re import urllib url_checker = dict() def twitter_auth(): consumer_key = 'IqsuEo5xfTdWwjD1GZNSA' consumer_secret = 'dtYmqEekw53kia3MJhvDagdByWGxuTiqJfcdGkXw8A' request_token_url = 'https://api.twitter.com/oauth/request_token' access_token_url = 'http://api.twitter.com/oauth/access_token' authorize_url = 'http://api.twitter.com/oauth/authorize' #consumer = oauth.Consumer(consumer_key, consumer_secret) #client = oauth.Client(consumer) #resp, content = client.request(request_token_url, "GET") #request_token = dict(urlparse.parse_qsl(content)) #access_token_key = request_token['oauth_token'] #access_token_secret = request_token['oauth_token_secret'] api = twitter.Api(consumer_key,consumer_secret,'36001624-5JrcK4i6UO69IFY6vxZdRYxKBqjB42mwjhoSzzSP6','jgKWIncNLnzBvvhFeVTE0lkMGi1PH222YCEHSZHY') return api def twitter_pull(api,word): results = api.GetSearch(word,None,None,100) tweets = list() for result in results: tweets.append(result) return tweets def twitter_extract_urls(api,tweets): pattern=re.compile('([a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}/*\S*)?$') urls=list() for tweet in tweets: found_urls = pattern.findall(tweet.text) found_urls = map(lambda x: x.strip("?.()[]{}!@#$^&*;'.,"), found_urls) urls.append(found_urls) urls = filter(lambda x: x,urls) return urls def url_follow(url): if url_checker.has_key(url): return url_checker.get(url) try: r1 = urllib.urlopen('http://'+url) url_checker.update({url:r1.geturl()}) return r1.geturl() except: pass def unique_urls(urls): new_urls=list() for url in urls: new_urls.append(url_follow(url[0])) new_urls=filter(None,new_urls) url_dictionary = [{ "url": url, "count": new_urls.count(url)} for url in set(new_urls)] return url_dictionary def compile_twitter_content(tweets,url_data): content = list() for x in range(0,2): tweet = tweets[x] content.append({'type':'tweet','data':tweet,'score':3}) for url in url_data: content.append({'type':'url','data':url['url'],'score':url['count']}) return content def twitter_similar_terms(tweets): stop_words=["a","i","it","am","at","on","in","of","to","is","so","too","my","the","and","but","are","very","here","even","from","them","then","than","this","that","though"] whole_text='' for tweet in tweets: whole_text += (tweet.text) whole_text = whole_text.split() whole_text_list=list() for word in whole_text: if not word in stop_words: whole_text_list.append(word) whole_text_dictionary = [{"word": word, "count": whole_text_list.count(word)} for word in set(whole_text_list)] def get_twitter_content(term): api = twitter_auth() tweets = twitter_pull(api, term) urls = twitter_extract_urls(api,tweets) url_data = unique_urls(urls) twitter_similar_terms(tweets) return compile_twitter_content(tweets,url_data)
normal
{ "blob_id": "afa20d7e9c7843a03090c00cc888d44a77fc29f3", "index": 9205, "step-1": "<mask token>\n\n\ndef twitter_auth():\n consumer_key = 'IqsuEo5xfTdWwjD1GZNSA'\n consumer_secret = 'dtYmqEekw53kia3MJhvDagdByWGxuTiqJfcdGkXw8A'\n request_token_url = 'https://api.twitter.com/oauth/request_token'\n access_token_url = 'http://api.twitter.com/oauth/access_token'\n authorize_url = 'http://api.twitter.com/oauth/authorize'\n api = twitter.Api(consumer_key, consumer_secret,\n '36001624-5JrcK4i6UO69IFY6vxZdRYxKBqjB42mwjhoSzzSP6',\n 'jgKWIncNLnzBvvhFeVTE0lkMGi1PH222YCEHSZHY')\n return api\n\n\ndef twitter_pull(api, word):\n results = api.GetSearch(word, None, None, 100)\n tweets = list()\n for result in results:\n tweets.append(result)\n return tweets\n\n\ndef twitter_extract_urls(api, tweets):\n pattern = re.compile('([a-zA-Z0-9\\\\-\\\\.]+\\\\.[a-zA-Z]{2,3}/*\\\\S*)?$')\n urls = list()\n for tweet in tweets:\n found_urls = pattern.findall(tweet.text)\n found_urls = map(lambda x: x.strip(\"?.()[]{}!@#$^&*;'.,\"), found_urls)\n urls.append(found_urls)\n urls = filter(lambda x: x, urls)\n return urls\n\n\ndef url_follow(url):\n if url_checker.has_key(url):\n return url_checker.get(url)\n try:\n r1 = urllib.urlopen('http://' + url)\n url_checker.update({url: r1.geturl()})\n return r1.geturl()\n except:\n pass\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef twitter_auth():\n consumer_key = 'IqsuEo5xfTdWwjD1GZNSA'\n consumer_secret = 'dtYmqEekw53kia3MJhvDagdByWGxuTiqJfcdGkXw8A'\n request_token_url = 'https://api.twitter.com/oauth/request_token'\n access_token_url = 'http://api.twitter.com/oauth/access_token'\n authorize_url = 'http://api.twitter.com/oauth/authorize'\n api = twitter.Api(consumer_key, consumer_secret,\n '36001624-5JrcK4i6UO69IFY6vxZdRYxKBqjB42mwjhoSzzSP6',\n 'jgKWIncNLnzBvvhFeVTE0lkMGi1PH222YCEHSZHY')\n return api\n\n\ndef twitter_pull(api, word):\n results = api.GetSearch(word, None, None, 100)\n tweets = list()\n for result in results:\n tweets.append(result)\n return tweets\n\n\ndef twitter_extract_urls(api, tweets):\n pattern = re.compile('([a-zA-Z0-9\\\\-\\\\.]+\\\\.[a-zA-Z]{2,3}/*\\\\S*)?$')\n urls = list()\n for tweet in tweets:\n found_urls = pattern.findall(tweet.text)\n found_urls = map(lambda x: x.strip(\"?.()[]{}!@#$^&*;'.,\"), found_urls)\n urls.append(found_urls)\n urls = filter(lambda x: x, urls)\n return urls\n\n\ndef url_follow(url):\n if url_checker.has_key(url):\n return url_checker.get(url)\n try:\n r1 = urllib.urlopen('http://' + url)\n url_checker.update({url: r1.geturl()})\n return r1.geturl()\n except:\n pass\n\n\n<mask token>\n\n\ndef compile_twitter_content(tweets, url_data):\n content = list()\n for x in range(0, 2):\n tweet = tweets[x]\n content.append({'type': 'tweet', 'data': tweet, 'score': 3})\n for url in url_data:\n content.append({'type': 'url', 'data': url['url'], 'score': url[\n 'count']})\n return content\n\n\n<mask token>\n\n\ndef get_twitter_content(term):\n api = twitter_auth()\n tweets = twitter_pull(api, term)\n urls = twitter_extract_urls(api, tweets)\n url_data = unique_urls(urls)\n twitter_similar_terms(tweets)\n return compile_twitter_content(tweets, url_data)\n", "step-3": "<mask token>\n\n\ndef twitter_auth():\n consumer_key = 'IqsuEo5xfTdWwjD1GZNSA'\n consumer_secret = 'dtYmqEekw53kia3MJhvDagdByWGxuTiqJfcdGkXw8A'\n request_token_url = 'https://api.twitter.com/oauth/request_token'\n access_token_url = 'http://api.twitter.com/oauth/access_token'\n authorize_url = 'http://api.twitter.com/oauth/authorize'\n api = twitter.Api(consumer_key, consumer_secret,\n '36001624-5JrcK4i6UO69IFY6vxZdRYxKBqjB42mwjhoSzzSP6',\n 'jgKWIncNLnzBvvhFeVTE0lkMGi1PH222YCEHSZHY')\n return api\n\n\ndef twitter_pull(api, word):\n results = api.GetSearch(word, None, None, 100)\n tweets = list()\n for result in results:\n tweets.append(result)\n return tweets\n\n\ndef twitter_extract_urls(api, tweets):\n pattern = re.compile('([a-zA-Z0-9\\\\-\\\\.]+\\\\.[a-zA-Z]{2,3}/*\\\\S*)?$')\n urls = list()\n for tweet in tweets:\n found_urls = pattern.findall(tweet.text)\n found_urls = map(lambda x: x.strip(\"?.()[]{}!@#$^&*;'.,\"), found_urls)\n urls.append(found_urls)\n urls = filter(lambda x: x, urls)\n return urls\n\n\ndef url_follow(url):\n if url_checker.has_key(url):\n return url_checker.get(url)\n try:\n r1 = urllib.urlopen('http://' + url)\n url_checker.update({url: r1.geturl()})\n return r1.geturl()\n except:\n pass\n\n\ndef unique_urls(urls):\n new_urls = list()\n for url in urls:\n new_urls.append(url_follow(url[0]))\n new_urls = filter(None, new_urls)\n url_dictionary = [{'url': url, 'count': new_urls.count(url)} for url in\n set(new_urls)]\n return url_dictionary\n\n\ndef compile_twitter_content(tweets, url_data):\n content = list()\n for x in range(0, 2):\n tweet = tweets[x]\n content.append({'type': 'tweet', 'data': tweet, 'score': 3})\n for url in url_data:\n content.append({'type': 'url', 'data': url['url'], 'score': url[\n 'count']})\n return content\n\n\ndef twitter_similar_terms(tweets):\n stop_words = ['a', 'i', 'it', 'am', 'at', 'on', 'in', 'of', 'to', 'is',\n 'so', 'too', 'my', 'the', 'and', 'but', 'are', 'very', 'here',\n 'even', 'from', 'them', 'then', 'than', 'this', 'that', 'though']\n whole_text = ''\n for tweet in tweets:\n whole_text += tweet.text\n whole_text = whole_text.split()\n whole_text_list = list()\n for word in whole_text:\n if not word in stop_words:\n whole_text_list.append(word)\n whole_text_dictionary = [{'word': word, 'count': whole_text_list.count(\n word)} for word in set(whole_text_list)]\n\n\ndef get_twitter_content(term):\n api = twitter_auth()\n tweets = twitter_pull(api, term)\n urls = twitter_extract_urls(api, tweets)\n url_data = unique_urls(urls)\n twitter_similar_terms(tweets)\n return compile_twitter_content(tweets, url_data)\n", "step-4": "import urlparse\nimport twitter\nimport oauth2 as oauth\nimport re\nimport urllib\nurl_checker = dict()\n\n\ndef twitter_auth():\n consumer_key = 'IqsuEo5xfTdWwjD1GZNSA'\n consumer_secret = 'dtYmqEekw53kia3MJhvDagdByWGxuTiqJfcdGkXw8A'\n request_token_url = 'https://api.twitter.com/oauth/request_token'\n access_token_url = 'http://api.twitter.com/oauth/access_token'\n authorize_url = 'http://api.twitter.com/oauth/authorize'\n api = twitter.Api(consumer_key, consumer_secret,\n '36001624-5JrcK4i6UO69IFY6vxZdRYxKBqjB42mwjhoSzzSP6',\n 'jgKWIncNLnzBvvhFeVTE0lkMGi1PH222YCEHSZHY')\n return api\n\n\ndef twitter_pull(api, word):\n results = api.GetSearch(word, None, None, 100)\n tweets = list()\n for result in results:\n tweets.append(result)\n return tweets\n\n\ndef twitter_extract_urls(api, tweets):\n pattern = re.compile('([a-zA-Z0-9\\\\-\\\\.]+\\\\.[a-zA-Z]{2,3}/*\\\\S*)?$')\n urls = list()\n for tweet in tweets:\n found_urls = pattern.findall(tweet.text)\n found_urls = map(lambda x: x.strip(\"?.()[]{}!@#$^&*;'.,\"), found_urls)\n urls.append(found_urls)\n urls = filter(lambda x: x, urls)\n return urls\n\n\ndef url_follow(url):\n if url_checker.has_key(url):\n return url_checker.get(url)\n try:\n r1 = urllib.urlopen('http://' + url)\n url_checker.update({url: r1.geturl()})\n return r1.geturl()\n except:\n pass\n\n\ndef unique_urls(urls):\n new_urls = list()\n for url in urls:\n new_urls.append(url_follow(url[0]))\n new_urls = filter(None, new_urls)\n url_dictionary = [{'url': url, 'count': new_urls.count(url)} for url in\n set(new_urls)]\n return url_dictionary\n\n\ndef compile_twitter_content(tweets, url_data):\n content = list()\n for x in range(0, 2):\n tweet = tweets[x]\n content.append({'type': 'tweet', 'data': tweet, 'score': 3})\n for url in url_data:\n content.append({'type': 'url', 'data': url['url'], 'score': url[\n 'count']})\n return content\n\n\ndef twitter_similar_terms(tweets):\n stop_words = ['a', 'i', 'it', 'am', 'at', 'on', 'in', 'of', 'to', 'is',\n 'so', 'too', 'my', 'the', 'and', 'but', 'are', 'very', 'here',\n 'even', 'from', 'them', 'then', 'than', 'this', 'that', 'though']\n whole_text = ''\n for tweet in tweets:\n whole_text += tweet.text\n whole_text = whole_text.split()\n whole_text_list = list()\n for word in whole_text:\n if not word in stop_words:\n whole_text_list.append(word)\n whole_text_dictionary = [{'word': word, 'count': whole_text_list.count(\n word)} for word in set(whole_text_list)]\n\n\ndef get_twitter_content(term):\n api = twitter_auth()\n tweets = twitter_pull(api, term)\n urls = twitter_extract_urls(api, tweets)\n url_data = unique_urls(urls)\n twitter_similar_terms(tweets)\n return compile_twitter_content(tweets, url_data)\n", "step-5": "import urlparse\nimport twitter\nimport oauth2 as oauth\nimport re\nimport urllib \n\n\nurl_checker = dict()\ndef twitter_auth():\n consumer_key = 'IqsuEo5xfTdWwjD1GZNSA'\n consumer_secret = 'dtYmqEekw53kia3MJhvDagdByWGxuTiqJfcdGkXw8A'\n\n request_token_url = 'https://api.twitter.com/oauth/request_token'\n access_token_url = 'http://api.twitter.com/oauth/access_token'\n authorize_url = 'http://api.twitter.com/oauth/authorize'\n\n #consumer = oauth.Consumer(consumer_key, consumer_secret)\n #client = oauth.Client(consumer)\n #resp, content = client.request(request_token_url, \"GET\")\n #request_token = dict(urlparse.parse_qsl(content))\n\n #access_token_key = request_token['oauth_token']\n #access_token_secret = request_token['oauth_token_secret']\n\n api = twitter.Api(consumer_key,consumer_secret,'36001624-5JrcK4i6UO69IFY6vxZdRYxKBqjB42mwjhoSzzSP6','jgKWIncNLnzBvvhFeVTE0lkMGi1PH222YCEHSZHY')\n\n return api\n\n\ndef twitter_pull(api,word):\n results = api.GetSearch(word,None,None,100)\n tweets = list()\n for result in results:\n tweets.append(result)\n return tweets\n\ndef twitter_extract_urls(api,tweets):\n pattern=re.compile('([a-zA-Z0-9\\-\\.]+\\.[a-zA-Z]{2,3}/*\\S*)?$')\n urls=list()\n for tweet in tweets:\n found_urls = pattern.findall(tweet.text)\n found_urls = map(lambda x: x.strip(\"?.()[]{}!@#$^&*;'.,\"), found_urls)\n urls.append(found_urls)\n urls = filter(lambda x: x,urls)\n return urls\n\ndef url_follow(url):\n if url_checker.has_key(url):\n return url_checker.get(url)\n try:\n r1 = urllib.urlopen('http://'+url)\n url_checker.update({url:r1.geturl()})\n return r1.geturl()\n except:\n pass\n\ndef unique_urls(urls):\n new_urls=list()\n for url in urls:\n new_urls.append(url_follow(url[0]))\n new_urls=filter(None,new_urls)\n url_dictionary = [{ \"url\": url, \"count\": new_urls.count(url)} for url in set(new_urls)]\n return url_dictionary \n\ndef compile_twitter_content(tweets,url_data):\n content = list()\n for x in range(0,2):\n tweet = tweets[x]\n content.append({'type':'tweet','data':tweet,'score':3})\n for url in url_data:\n content.append({'type':'url','data':url['url'],'score':url['count']})\n return content\n\ndef twitter_similar_terms(tweets):\n stop_words=[\"a\",\"i\",\"it\",\"am\",\"at\",\"on\",\"in\",\"of\",\"to\",\"is\",\"so\",\"too\",\"my\",\"the\",\"and\",\"but\",\"are\",\"very\",\"here\",\"even\",\"from\",\"them\",\"then\",\"than\",\"this\",\"that\",\"though\"]\n whole_text=''\n for tweet in tweets:\n whole_text += (tweet.text)\n whole_text = whole_text.split()\n whole_text_list=list()\n for word in whole_text:\n if not word in stop_words:\n whole_text_list.append(word)\n whole_text_dictionary = [{\"word\": word, \"count\": whole_text_list.count(word)} for word in set(whole_text_list)]\n\ndef get_twitter_content(term):\n api = twitter_auth()\n tweets = twitter_pull(api, term)\n urls = twitter_extract_urls(api,tweets)\n url_data = unique_urls(urls)\n twitter_similar_terms(tweets)\n return compile_twitter_content(tweets,url_data)\n\n", "step-ids": [ 4, 6, 8, 10, 11 ] }
[ 4, 6, 8, 10, 11 ]
import simple_map import pickle import os import argparse import cv2 argparser = argparse.ArgumentParser() argparser.add_argument("--src", type=str, required=True, help="source directory") argparser.add_argument("--dst", type=str, required=True, help="destination directory") argparser.add_argument("--ref", type=str, required=False, default="train_raw", help="global reference directory (default: train_raw)") args = argparser.parse_args() def get_reference(): json = sorted([os.path.join(args.ref, file) for file in os.listdir(args.ref) if file.endswith(".json")])[0] smap = simple_map.SimpleMap(json) return smap.northing, smap.easting def construct_maps(jsons): cnt = 0 # get first map as reference ref_globals = get_reference() for i in range(len(jsons)): smap = simple_map.SimpleMap(jsons[i], ref_globals) (x, y), (x_real, y_real), imgs = smap.get_route() # resize image imgs = [tuple(map(lambda x: cv2.resize(x, None, fx=0.2, fy=0.2), img)) for img in imgs] for j in range(0, len(imgs), 10): for k in range(3): cnt += 1 path = os.path.join(args.dst, str(cnt)) output_file = open(path, 'wb') obj = {"x_steer": x[j], "y_steer": y[j], "x_utm": x_real[j], "y_utm": y_real[j], "img": imgs[j][k]} pickle.dump(obj, output_file) output_file.close() print("* Video %d done, %s" %( i, jsons[i])) def main(): jsons = sorted([os.path.join(args.src, file) for file in os.listdir(args.src) if file.endswith(".json")]) construct_maps(jsons) if __name__ == "__main__": main()
normal
{ "blob_id": "a8c59f97501b3f9db30c98e334dbfcffffe7accd", "index": 6557, "step-1": "<mask token>\n\n\ndef get_reference():\n json = sorted([os.path.join(args.ref, file) for file in os.listdir(args\n .ref) if file.endswith('.json')])[0]\n smap = simple_map.SimpleMap(json)\n return smap.northing, smap.easting\n\n\n<mask token>\n\n\ndef main():\n jsons = sorted([os.path.join(args.src, file) for file in os.listdir(\n args.src) if file.endswith('.json')])\n construct_maps(jsons)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_reference():\n json = sorted([os.path.join(args.ref, file) for file in os.listdir(args\n .ref) if file.endswith('.json')])[0]\n smap = simple_map.SimpleMap(json)\n return smap.northing, smap.easting\n\n\ndef construct_maps(jsons):\n cnt = 0\n ref_globals = get_reference()\n for i in range(len(jsons)):\n smap = simple_map.SimpleMap(jsons[i], ref_globals)\n (x, y), (x_real, y_real), imgs = smap.get_route()\n imgs = [tuple(map(lambda x: cv2.resize(x, None, fx=0.2, fy=0.2),\n img)) for img in imgs]\n for j in range(0, len(imgs), 10):\n for k in range(3):\n cnt += 1\n path = os.path.join(args.dst, str(cnt))\n output_file = open(path, 'wb')\n obj = {'x_steer': x[j], 'y_steer': y[j], 'x_utm': x_real[j],\n 'y_utm': y_real[j], 'img': imgs[j][k]}\n pickle.dump(obj, output_file)\n output_file.close()\n print('* Video %d done, %s' % (i, jsons[i]))\n\n\ndef main():\n jsons = sorted([os.path.join(args.src, file) for file in os.listdir(\n args.src) if file.endswith('.json')])\n construct_maps(jsons)\n\n\n<mask token>\n", "step-3": "<mask token>\nargparser = argparse.ArgumentParser()\nargparser.add_argument('--src', type=str, required=True, help=\n 'source directory')\nargparser.add_argument('--dst', type=str, required=True, help=\n 'destination directory')\nargparser.add_argument('--ref', type=str, required=False, default=\n 'train_raw', help='global reference directory (default: train_raw)')\nargs = argparser.parse_args()\n\n\ndef get_reference():\n json = sorted([os.path.join(args.ref, file) for file in os.listdir(args\n .ref) if file.endswith('.json')])[0]\n smap = simple_map.SimpleMap(json)\n return smap.northing, smap.easting\n\n\ndef construct_maps(jsons):\n cnt = 0\n ref_globals = get_reference()\n for i in range(len(jsons)):\n smap = simple_map.SimpleMap(jsons[i], ref_globals)\n (x, y), (x_real, y_real), imgs = smap.get_route()\n imgs = [tuple(map(lambda x: cv2.resize(x, None, fx=0.2, fy=0.2),\n img)) for img in imgs]\n for j in range(0, len(imgs), 10):\n for k in range(3):\n cnt += 1\n path = os.path.join(args.dst, str(cnt))\n output_file = open(path, 'wb')\n obj = {'x_steer': x[j], 'y_steer': y[j], 'x_utm': x_real[j],\n 'y_utm': y_real[j], 'img': imgs[j][k]}\n pickle.dump(obj, output_file)\n output_file.close()\n print('* Video %d done, %s' % (i, jsons[i]))\n\n\ndef main():\n jsons = sorted([os.path.join(args.src, file) for file in os.listdir(\n args.src) if file.endswith('.json')])\n construct_maps(jsons)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import simple_map\nimport pickle\nimport os\nimport argparse\nimport cv2\nargparser = argparse.ArgumentParser()\nargparser.add_argument('--src', type=str, required=True, help=\n 'source directory')\nargparser.add_argument('--dst', type=str, required=True, help=\n 'destination directory')\nargparser.add_argument('--ref', type=str, required=False, default=\n 'train_raw', help='global reference directory (default: train_raw)')\nargs = argparser.parse_args()\n\n\ndef get_reference():\n json = sorted([os.path.join(args.ref, file) for file in os.listdir(args\n .ref) if file.endswith('.json')])[0]\n smap = simple_map.SimpleMap(json)\n return smap.northing, smap.easting\n\n\ndef construct_maps(jsons):\n cnt = 0\n ref_globals = get_reference()\n for i in range(len(jsons)):\n smap = simple_map.SimpleMap(jsons[i], ref_globals)\n (x, y), (x_real, y_real), imgs = smap.get_route()\n imgs = [tuple(map(lambda x: cv2.resize(x, None, fx=0.2, fy=0.2),\n img)) for img in imgs]\n for j in range(0, len(imgs), 10):\n for k in range(3):\n cnt += 1\n path = os.path.join(args.dst, str(cnt))\n output_file = open(path, 'wb')\n obj = {'x_steer': x[j], 'y_steer': y[j], 'x_utm': x_real[j],\n 'y_utm': y_real[j], 'img': imgs[j][k]}\n pickle.dump(obj, output_file)\n output_file.close()\n print('* Video %d done, %s' % (i, jsons[i]))\n\n\ndef main():\n jsons = sorted([os.path.join(args.src, file) for file in os.listdir(\n args.src) if file.endswith('.json')])\n construct_maps(jsons)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import simple_map\nimport pickle\nimport os\nimport argparse\nimport cv2\n\nargparser = argparse.ArgumentParser()\n\nargparser.add_argument(\"--src\", type=str, required=True,\n help=\"source directory\")\nargparser.add_argument(\"--dst\", type=str, required=True,\n help=\"destination directory\")\nargparser.add_argument(\"--ref\", type=str, required=False, default=\"train_raw\", \n help=\"global reference directory (default: train_raw)\")\nargs = argparser.parse_args()\n\n\ndef get_reference():\n json = sorted([os.path.join(args.ref, file) for file in os.listdir(args.ref) if file.endswith(\".json\")])[0]\n smap = simple_map.SimpleMap(json)\n return smap.northing, smap.easting\n\ndef construct_maps(jsons):\n cnt = 0\n\n # get first map as reference\n ref_globals = get_reference()\n \n for i in range(len(jsons)):\n smap = simple_map.SimpleMap(jsons[i], ref_globals)\n (x, y), (x_real, y_real), imgs = smap.get_route()\n\n # resize image\n imgs = [tuple(map(lambda x: cv2.resize(x, None, fx=0.2, fy=0.2), img)) for img in imgs]\n\n for j in range(0, len(imgs), 10):\n for k in range(3):\n cnt += 1\n path = os.path.join(args.dst, str(cnt))\n output_file = open(path, 'wb')\n obj = {\"x_steer\": x[j], \"y_steer\": y[j],\n \"x_utm\": x_real[j], \"y_utm\": y_real[j],\n \"img\": imgs[j][k]}\n pickle.dump(obj, output_file)\n output_file.close()\n\n print(\"* Video %d done, %s\" %( i, jsons[i]))\n\n\ndef main():\n jsons = sorted([os.path.join(args.src, file) for file in os.listdir(args.src) if file.endswith(\".json\")])\n construct_maps(jsons)\n\n\nif __name__ == \"__main__\":\n main()\n", "step-ids": [ 2, 3, 5, 6, 7 ] }
[ 2, 3, 5, 6, 7 ]
""" 定义函数,根据年、月、日计算星期。 0 星期一 1 星期二 .... """ import time def get_week(year, month, day): str_time = "%d-%d-%d" % (year, month, day) time_tuple = time.strptime(str_time, "%Y-%m-%d") tuple_week = ("星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日") return tuple_week[time_tuple[6]] print(get_week(2020, 1, 16))
normal
{ "blob_id": "012d9b5aa13c557ad958343cadf935b73c808a56", "index": 4535, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef get_week(year, month, day):\n str_time = '%d-%d-%d' % (year, month, day)\n time_tuple = time.strptime(str_time, '%Y-%m-%d')\n tuple_week = '星期一', '星期二', '星期三', '星期四', '星期五', '星期六', '星期日'\n return tuple_week[time_tuple[6]]\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_week(year, month, day):\n str_time = '%d-%d-%d' % (year, month, day)\n time_tuple = time.strptime(str_time, '%Y-%m-%d')\n tuple_week = '星期一', '星期二', '星期三', '星期四', '星期五', '星期六', '星期日'\n return tuple_week[time_tuple[6]]\n\n\nprint(get_week(2020, 1, 16))\n", "step-4": "<mask token>\nimport time\n\n\ndef get_week(year, month, day):\n str_time = '%d-%d-%d' % (year, month, day)\n time_tuple = time.strptime(str_time, '%Y-%m-%d')\n tuple_week = '星期一', '星期二', '星期三', '星期四', '星期五', '星期六', '星期日'\n return tuple_week[time_tuple[6]]\n\n\nprint(get_week(2020, 1, 16))\n", "step-5": "\"\"\"\n 定义函数,根据年、月、日计算星期。\n 0 星期一\n 1 星期二\n ....\n\"\"\"\nimport time\n\n\ndef get_week(year, month, day):\n str_time = \"%d-%d-%d\" % (year, month, day)\n time_tuple = time.strptime(str_time, \"%Y-%m-%d\")\n tuple_week = (\"星期一\", \"星期二\", \"星期三\", \"星期四\", \"星期五\", \"星期六\", \"星期日\")\n return tuple_week[time_tuple[6]]\n\n\nprint(get_week(2020, 1, 16))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
class String: def reverse(self, s): return s[::-1] s = input() obj1 = String() print(obj1.reverse(s))
normal
{ "blob_id": "c27c29a5b4be9f710e4036f7f73a89c7d20acea5", "index": 4317, "step-1": "class String:\n <mask token>\n\n\n<mask token>\n", "step-2": "class String:\n\n def reverse(self, s):\n return s[::-1]\n\n\n<mask token>\n", "step-3": "class String:\n\n def reverse(self, s):\n return s[::-1]\n\n\n<mask token>\nprint(obj1.reverse(s))\n", "step-4": "class String:\n\n def reverse(self, s):\n return s[::-1]\n\n\ns = input()\nobj1 = String()\nprint(obj1.reverse(s))\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
# coding: utf-8 from __future__ import division, unicode_literals import unittest from monty.inspect import * class LittleCatA(object): pass class LittleCatB(LittleCatA): pass class LittleCatC(object): pass class LittleCatD(LittleCatB): pass class InspectTest(unittest.TestCase): def test_func(self): # Not a real test. Need something better. self.assertTrue(find_top_pyfile()) self.assertTrue(caller_name()) def test_all_subclasses(self): self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD]) if __name__ == "__main__": unittest.main()
normal
{ "blob_id": "89605ff723d2f78e85cae458d576494718b5d456", "index": 1193, "step-1": "<mask token>\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass LittleCatC(object):\n pass\n\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass LittleCatB(LittleCatA):\n pass\n\n\nclass LittleCatC(object):\n pass\n\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\n<mask token>\n", "step-4": "from __future__ import division, unicode_literals\nimport unittest\nfrom monty.inspect import *\n\n\nclass LittleCatA(object):\n pass\n\n\nclass LittleCatB(LittleCatA):\n pass\n\n\nclass LittleCatC(object):\n pass\n\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "# coding: utf-8\nfrom __future__ import division, unicode_literals\n\nimport unittest\n\nfrom monty.inspect import *\n\nclass LittleCatA(object):\n pass\n\nclass LittleCatB(LittleCatA):\n pass\n\nclass LittleCatC(object):\n pass\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n # Not a real test. Need something better.\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "step-ids": [ 2, 5, 6, 9, 10 ] }
[ 2, 5, 6, 9, 10 ]
''' Author: Allen Chen This is an example of entry point to CORE. Pay close attention to the import syntax - they're relative to this repo. Don't try to run this by doing 'python3 main.py' under this directory. Try to add your Target in Makefile under the root dir, and call './run YOUR_TARGET_NAME' from root. ''' from src.CORE.class_TradeBot import TradeBot from src.util.logging import log_ok, log_info, log_error bot = TradeBot() log_info(f"Just initialized a bot named {bot.name}") log_ok(f"Bot is given cash: {bot.cash}") log_error("Nothing else to do ! :(")
normal
{ "blob_id": "18eed41cbc419ecbb215f77235be99f15f86ea9a", "index": 7468, "step-1": "<mask token>\n", "step-2": "<mask token>\nlog_info(f'Just initialized a bot named {bot.name}')\nlog_ok(f'Bot is given cash: {bot.cash}')\nlog_error('Nothing else to do ! :(')\n", "step-3": "<mask token>\nbot = TradeBot()\nlog_info(f'Just initialized a bot named {bot.name}')\nlog_ok(f'Bot is given cash: {bot.cash}')\nlog_error('Nothing else to do ! :(')\n", "step-4": "<mask token>\nfrom src.CORE.class_TradeBot import TradeBot\nfrom src.util.logging import log_ok, log_info, log_error\nbot = TradeBot()\nlog_info(f'Just initialized a bot named {bot.name}')\nlog_ok(f'Bot is given cash: {bot.cash}')\nlog_error('Nothing else to do ! :(')\n", "step-5": "'''\nAuthor: Allen Chen\n\nThis is an example of entry point to CORE. Pay close attention to the import syntax - they're relative to this repo.\nDon't try to run this by doing 'python3 main.py' under this directory. Try to add your Target in Makefile under the root dir,\nand call './run YOUR_TARGET_NAME' from root. \n'''\n\nfrom src.CORE.class_TradeBot import TradeBot\nfrom src.util.logging import log_ok, log_info, log_error\n\n\nbot = TradeBot()\nlog_info(f\"Just initialized a bot named {bot.name}\")\n\nlog_ok(f\"Bot is given cash: {bot.cash}\")\n\nlog_error(\"Nothing else to do ! :(\")", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Copyright (c) 2023 Intel Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import path as osp from typing import Any, Callable, Dict, List, Optional, Tuple import torch from torch.distributed import barrier from torch.nn import Module from nncf.api.compression import CompressionAlgorithmController from nncf.common.compression import BaseCompressionAlgorithmController as BaseController from nncf.common.deprecation import warning_deprecated from nncf.common.logging import nncf_logger from nncf.common.utils.api_marker import api from nncf.common.utils.debug import set_debug_log_dir from nncf.config import NNCFConfig from nncf.config.extractors import extract_algorithm_names from nncf.config.telemetry_extractors import CompressionStartedFromConfig from nncf.telemetry import tracked_function from nncf.telemetry.events import NNCF_PT_CATEGORY from nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS from nncf.torch.algo_selector import NoCompressionAlgorithmBuilder from nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder from nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder from nncf.torch.dynamic_graph.graph_tracer import create_input_infos from nncf.torch.nncf_network import NNCFNetwork # pylint:disable=too-many-branches from nncf.torch.utils import is_dist_avail_and_initialized from nncf.torch.utils import is_main_process from nncf.torch.utils import maybe_convert_legacy_names_in_compress_state from nncf.torch.utils import training_mode_switcher @api(canonical_alias="nncf.torch.create_compressed_model") @tracked_function( NNCF_PT_CATEGORY, [ CompressionStartedFromConfig(argname="config"), ], ) def create_compressed_model( model: Module, config: NNCFConfig, compression_state: Optional[Dict[str, Any]] = None, dummy_forward_fn: Callable[[Module], Any] = None, wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None, wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None, dump_graphs=True, ) -> Tuple[CompressionAlgorithmController, NNCFNetwork]: """ The main function used to produce a model ready for compression fine-tuning from an original PyTorch model and a configuration object. dummy_forward_fn :param model: The original model. Should have its parameters already loaded from a checkpoint or another source. :param config: A configuration object used to determine the exact compression modifications to be applied to the model :type config: nncf.NNCFConfig :param compression_state: representation of the entire compression state to unambiguously restore the compressed model. Includes builder and controller states. :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build the internal graph representation via tracing. Specifying this is useful when the original training pipeline has special formats of data loader output or has additional *forward* arguments other than input tensors. Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified. :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy forward call before passing the inputs to the underlying compressed model. This is required if the model's input tensors that are important for compression are not supplied as arguments to the model's forward call directly, but instead are located in a container (such as list), and the model receives the container as an argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified if dummy_forward_fn is specified. :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs :param dump_graphs: Whether to dump the internal graph representation of the original and compressed models in the .dot format into the log directory. :return: A controller for the compression algorithm (or algorithms, in which case the controller is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped as an object of NNCFNetwork. """ if isinstance(model, NNCFNetwork): raise RuntimeError( "The model object has already been compressed.\n" "NNCF for PyTorch modifies the model object in-place, and repeat calls to " "`nncf.torch.create_compressed_model` with the same model object passed as argument " "will lead to an incorrect attempt to compress the model twice.\n" "Make sure that the model object you are passing has not already been compressed (for " "instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\n" "If you are encountering this in a Jupyter notebook context - make sure that when " "re-running cells involving `nncf.torch.create_compressed_model` the original model object " "is also re-created (via constructor call)." ) if config.get("target_device") == "VPU": warning_deprecated("VPU device is deprecated and will no longer be supported in the future.") set_debug_log_dir(config.get("log_dir", ".")) is_legacy_model_state_dict = ( compression_state is not None and BaseController.BUILDER_STATE not in compression_state and BaseController.CONTROLLER_STATE not in compression_state ) maybe_convert_legacy_names_in_compress_state(compression_state) should_init = compression_state is None nncf_network = create_nncf_network(model, config, dummy_forward_fn, wrap_inputs_fn, wrap_outputs_fn) if dump_graphs and is_main_process(): nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get("log_dir", "."), "original_graph.dot")) builder = create_compression_algorithm_builder(config, should_init) is_state_loadable = not is_legacy_model_state_dict and compression_state is not None if is_state_loadable: builder.load_state(compression_state[BaseController.BUILDER_STATE]) compressed_model = builder.apply_to(nncf_network) compression_ctrl = builder.build_controller(compressed_model) if is_state_loadable: compression_ctrl.load_state(compression_state[BaseController.CONTROLLER_STATE]) compressed_model.nncf.set_compression_controller(compression_ctrl) # Required to ensure that the model leaving create_compressed_model has correct compressed graph. # In particular, this is currently required for correct functioning of RNNs. compressed_model.nncf.rebuild_graph() try: if is_legacy_model_state_dict: from nncf.torch import load_state # pylint: disable=cyclic-import state_dict_to_load = compression_state.get("state_dict", compression_state) load_state(compressed_model, state_dict_to_load, is_resume=True) finally: if dump_graphs and is_main_process(): compressed_model_graph = compressed_model.nncf.get_graph() compressed_model_graph.visualize_graph(osp.join(config.get("log_dir", "."), "compressed_graph.dot")) synchronize_all_processes_in_distributed_mode() return compression_ctrl, compressed_model def create_nncf_network( model: torch.nn.Module, config: NNCFConfig, dummy_forward_fn: Callable[[Module], Any] = None, wrap_inputs_fn: Callable = None, wrap_outputs_fn: Callable = None, ) -> NNCFNetwork: """ The main function used to produce a model ready for adding compression from an original PyTorch model and a configuration object. :param model: The original model. Should have its parameters already loaded from a checkpoint or another source. :param config: A configuration object used to determine the exact compression modifications to be applied to the model :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build the internal graph representation via tracing. Specifying this is useful when the original training pipeline has special formats of data loader output or has additional *forward* arguments other than input tensors. Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified. :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy forward call before passing the inputs to the underlying compressed model. This is required if the model's input tensors that are important for compression are not supplied as arguments to the model's forward call directly, but instead are located in a container (such as list), and the model receives the container as an argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among the supplied model's args and kwargs that is important for compression (e.g. quantization) with an nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified if dummy_forward_fn is specified. :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call. :return: A model wrapped by NNCFNetwork, which is ready for adding compression.""" if dummy_forward_fn is not None and wrap_inputs_fn is None: raise ValueError( "A custom dummy forward function was specified, but the corresponding input wrapping function " "was not. In case a custom dummy forward function is specified for purposes of NNCF graph " "building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with " "the input wrapping done in dummy_forward_fn." ) # Preserve `.training`/`.requires_grad` state since we will be building NNCFNetwork in `.eval` mode with training_mode_switcher(model, is_training=False): # Compress model that will be deployed for the inference on target device. No need to compress parts of the # model that are used on training stage only (e.g. AuxLogits of Inception-v3 model) or unused modules with # weights. As a consequence, no need to care about spoiling BN statistics, as they're disabled in eval mode. input_info_list = create_input_infos(config) scopes_without_shape_matching = config.get("scopes_without_shape_matching", []) ignored_scopes = config.get("ignored_scopes") target_scopes = config.get("target_scopes") nncf_network = NNCFNetwork( model, input_infos=input_info_list, dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=wrap_inputs_fn, wrap_outputs_fn=wrap_outputs_fn, ignored_scopes=ignored_scopes, target_scopes=target_scopes, scopes_without_shape_matching=scopes_without_shape_matching, ) nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph() synchronize_all_processes_in_distributed_mode() return nncf_network def synchronize_all_processes_in_distributed_mode(): if is_dist_avail_and_initialized(): try: barrier() # Exception can be raised during running barrier # if the backend not in the supported list https://pytorch.org/docs/stable/distributed.html except RuntimeError as err: nncf_logger.warning( "Training pipeline spawned an error while synchronizing distributed training processes:" ) nncf_logger.warning(err) nncf_logger.warning("Desynchronization of distributed processes may occur.") def create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder: """ Create compression algorithm builders by a given list of algorithm names. :param config: A configuration object used to determine the exact compression modifications to be applied to the model :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False) the training parameters of the model during model building. :return: compression algorithm builder """ algo_names = extract_algorithm_names(config) return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init) def create_compression_algorithm_builder_from_algo_names( algo_names: List[str], config: NNCFConfig, should_init: bool ) -> PTCompressionAlgorithmBuilder: """ Create compression algorithm builders by a given list of algorithm names. :param algo_names: list of algorithm names :param config: A configuration object used to determine the exact compression modifications to be applied to the model :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False) the training parameters of the model during model building. :return: compression algorithm builder """ if not algo_names: algo_builder_classes = [NoCompressionAlgorithmBuilder] else: algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for algo_name in algo_names] if len(algo_builder_classes) == 1: builder = next(iter(algo_builder_classes))(config, should_init=should_init) else: builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init) return builder
normal
{ "blob_id": "cd1ada2d7979fffc17f707ed113efde7aa134954", "index": 3036, "step-1": "<mask token>\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\n<mask token>\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n", "step-2": "<mask token>\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\n<mask token>\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names,\n config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n", "step-3": "<mask token>\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\ndef create_nncf_network(model: torch.nn.Module, config: NNCFConfig,\n dummy_forward_fn: Callable[[Module], Any]=None, wrap_inputs_fn:\n Callable=None, wrap_outputs_fn: Callable=None) ->NNCFNetwork:\n \"\"\"\n The main function used to produce a model ready for adding compression from an original PyTorch\n model and a configuration object.\n\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input\n functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these\n calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is\n specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's input\n tensors that are important for compression are not supplied as arguments to the model's forward call directly,\n but instead are located in a container (such as list), and the model receives the container as an argument.\n wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying\n model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among\n the supplied model's args and kwargs that is important for compression (e.g. quantization) with an\n nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced\n by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are\n the same as were supplied in input, but each tensor in the original input. Must be specified if\n dummy_forward_fn is specified.\n :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.\n :return: A model wrapped by NNCFNetwork, which is ready for adding compression.\"\"\"\n if dummy_forward_fn is not None and wrap_inputs_fn is None:\n raise ValueError(\n 'A custom dummy forward function was specified, but the corresponding input wrapping function was not. In case a custom dummy forward function is specified for purposes of NNCF graph building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with the input wrapping done in dummy_forward_fn.'\n )\n with training_mode_switcher(model, is_training=False):\n input_info_list = create_input_infos(config)\n scopes_without_shape_matching = config.get(\n 'scopes_without_shape_matching', [])\n ignored_scopes = config.get('ignored_scopes')\n target_scopes = config.get('target_scopes')\n nncf_network = NNCFNetwork(model, input_infos=input_info_list,\n dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=\n wrap_inputs_fn, wrap_outputs_fn=wrap_outputs_fn, ignored_scopes\n =ignored_scopes, target_scopes=target_scopes,\n scopes_without_shape_matching=scopes_without_shape_matching)\n nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()\n synchronize_all_processes_in_distributed_mode()\n return nncf_network\n\n\ndef synchronize_all_processes_in_distributed_mode():\n if is_dist_avail_and_initialized():\n try:\n barrier()\n except RuntimeError as err:\n nncf_logger.warning(\n 'Training pipeline spawned an error while synchronizing distributed training processes:'\n )\n nncf_logger.warning(err)\n nncf_logger.warning(\n 'Desynchronization of distributed processes may occur.')\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names,\n config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n", "step-4": "from os import path as osp\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\nimport torch\nfrom torch.distributed import barrier\nfrom torch.nn import Module\nfrom nncf.api.compression import CompressionAlgorithmController\nfrom nncf.common.compression import BaseCompressionAlgorithmController as BaseController\nfrom nncf.common.deprecation import warning_deprecated\nfrom nncf.common.logging import nncf_logger\nfrom nncf.common.utils.api_marker import api\nfrom nncf.common.utils.debug import set_debug_log_dir\nfrom nncf.config import NNCFConfig\nfrom nncf.config.extractors import extract_algorithm_names\nfrom nncf.config.telemetry_extractors import CompressionStartedFromConfig\nfrom nncf.telemetry import tracked_function\nfrom nncf.telemetry.events import NNCF_PT_CATEGORY\nfrom nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS\nfrom nncf.torch.algo_selector import NoCompressionAlgorithmBuilder\nfrom nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder\nfrom nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder\nfrom nncf.torch.dynamic_graph.graph_tracer import create_input_infos\nfrom nncf.torch.nncf_network import NNCFNetwork\nfrom nncf.torch.utils import is_dist_avail_and_initialized\nfrom nncf.torch.utils import is_main_process\nfrom nncf.torch.utils import maybe_convert_legacy_names_in_compress_state\nfrom nncf.torch.utils import training_mode_switcher\n\n\n@api(canonical_alias='nncf.torch.create_compressed_model')\n@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname=\n 'config')])\ndef create_compressed_model(model: Module, config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]]=None, dummy_forward_fn:\n Callable[[Module], Any]=None, wrap_inputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, wrap_outputs_fn: Callable[[Tuple, Dict],\n Tuple[Tuple, Dict]]=None, dump_graphs=True) ->Tuple[\n CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"\"\"The model object has already been compressed.\nNNCF for PyTorch modifies the model object in-place, and repeat calls to `nncf.torch.create_compressed_model` with the same model object passed as argument will lead to an incorrect attempt to compress the model twice.\nMake sure that the model object you are passing has not already been compressed (for instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\nIf you are encountering this in a Jupyter notebook context - make sure that when re-running cells involving `nncf.torch.create_compressed_model` the original model object is also re-created (via constructor call).\"\"\"\n )\n if config.get('target_device') == 'VPU':\n warning_deprecated(\n 'VPU device is deprecated and will no longer be supported in the future.'\n )\n set_debug_log_dir(config.get('log_dir', '.'))\n is_legacy_model_state_dict = (compression_state is not None and \n BaseController.BUILDER_STATE not in compression_state and \n BaseController.CONTROLLER_STATE not in compression_state)\n maybe_convert_legacy_names_in_compress_state(compression_state)\n should_init = compression_state is None\n nncf_network = create_nncf_network(model, config, dummy_forward_fn,\n wrap_inputs_fn, wrap_outputs_fn)\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'original_graph.dot'))\n builder = create_compression_algorithm_builder(config, should_init)\n is_state_loadable = (not is_legacy_model_state_dict and \n compression_state is not None)\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.\n CONTROLLER_STATE])\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n compressed_model.nncf.rebuild_graph()\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state\n state_dict_to_load = compression_state.get('state_dict',\n compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\n 'log_dir', '.'), 'compressed_graph.dot'))\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\ndef create_nncf_network(model: torch.nn.Module, config: NNCFConfig,\n dummy_forward_fn: Callable[[Module], Any]=None, wrap_inputs_fn:\n Callable=None, wrap_outputs_fn: Callable=None) ->NNCFNetwork:\n \"\"\"\n The main function used to produce a model ready for adding compression from an original PyTorch\n model and a configuration object.\n\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input\n functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these\n calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is\n specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's input\n tensors that are important for compression are not supplied as arguments to the model's forward call directly,\n but instead are located in a container (such as list), and the model receives the container as an argument.\n wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying\n model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among\n the supplied model's args and kwargs that is important for compression (e.g. quantization) with an\n nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced\n by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are\n the same as were supplied in input, but each tensor in the original input. Must be specified if\n dummy_forward_fn is specified.\n :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.\n :return: A model wrapped by NNCFNetwork, which is ready for adding compression.\"\"\"\n if dummy_forward_fn is not None and wrap_inputs_fn is None:\n raise ValueError(\n 'A custom dummy forward function was specified, but the corresponding input wrapping function was not. In case a custom dummy forward function is specified for purposes of NNCF graph building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with the input wrapping done in dummy_forward_fn.'\n )\n with training_mode_switcher(model, is_training=False):\n input_info_list = create_input_infos(config)\n scopes_without_shape_matching = config.get(\n 'scopes_without_shape_matching', [])\n ignored_scopes = config.get('ignored_scopes')\n target_scopes = config.get('target_scopes')\n nncf_network = NNCFNetwork(model, input_infos=input_info_list,\n dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=\n wrap_inputs_fn, wrap_outputs_fn=wrap_outputs_fn, ignored_scopes\n =ignored_scopes, target_scopes=target_scopes,\n scopes_without_shape_matching=scopes_without_shape_matching)\n nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()\n synchronize_all_processes_in_distributed_mode()\n return nncf_network\n\n\ndef synchronize_all_processes_in_distributed_mode():\n if is_dist_avail_and_initialized():\n try:\n barrier()\n except RuntimeError as err:\n nncf_logger.warning(\n 'Training pipeline spawned an error while synchronizing distributed training processes:'\n )\n nncf_logger.warning(err)\n nncf_logger.warning(\n 'Desynchronization of distributed processes may occur.')\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names,\n config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(algo_names: List[\n str], config: NNCFConfig, should_init: bool\n ) ->PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for\n algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=\n should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config,\n should_init=should_init)\n return builder\n", "step-5": "# Copyright (c) 2023 Intel Corporation\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import path as osp\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\n\nimport torch\nfrom torch.distributed import barrier\nfrom torch.nn import Module\n\nfrom nncf.api.compression import CompressionAlgorithmController\nfrom nncf.common.compression import BaseCompressionAlgorithmController as BaseController\nfrom nncf.common.deprecation import warning_deprecated\nfrom nncf.common.logging import nncf_logger\nfrom nncf.common.utils.api_marker import api\nfrom nncf.common.utils.debug import set_debug_log_dir\nfrom nncf.config import NNCFConfig\nfrom nncf.config.extractors import extract_algorithm_names\nfrom nncf.config.telemetry_extractors import CompressionStartedFromConfig\nfrom nncf.telemetry import tracked_function\nfrom nncf.telemetry.events import NNCF_PT_CATEGORY\nfrom nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS\nfrom nncf.torch.algo_selector import NoCompressionAlgorithmBuilder\nfrom nncf.torch.composite_compression import PTCompositeCompressionAlgorithmBuilder\nfrom nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder\nfrom nncf.torch.dynamic_graph.graph_tracer import create_input_infos\nfrom nncf.torch.nncf_network import NNCFNetwork\n\n# pylint:disable=too-many-branches\nfrom nncf.torch.utils import is_dist_avail_and_initialized\nfrom nncf.torch.utils import is_main_process\nfrom nncf.torch.utils import maybe_convert_legacy_names_in_compress_state\nfrom nncf.torch.utils import training_mode_switcher\n\n\n@api(canonical_alias=\"nncf.torch.create_compressed_model\")\n@tracked_function(\n NNCF_PT_CATEGORY,\n [\n CompressionStartedFromConfig(argname=\"config\"),\n ],\n)\ndef create_compressed_model(\n model: Module,\n config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]] = None,\n dummy_forward_fn: Callable[[Module], Any] = None,\n wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,\n wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,\n dump_graphs=True,\n) -> Tuple[CompressionAlgorithmController, NNCFNetwork]:\n \"\"\"\n The main function used to produce a model ready for compression fine-tuning from an original PyTorch\n model and a configuration object.\n dummy_forward_fn\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :type config: nncf.NNCFConfig\n :param compression_state: representation of the entire compression state to unambiguously restore\n the compressed model. Includes builder and controller states.\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input functions made with each compressed model input tensor in the underlying model's\n args/kwargs tuple, and these calls should be exactly the same as in the wrap_inputs_fn function code\n (see below); if dummy_forward_fn is specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's\n input tensors that are important for compression are not supplied as arguments to the model's forward call\n directly, but instead are located in a container (such as list), and the model receives the container as an\n argument. wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the\n underlying model's forward call, and a dict of keyword arguments to the same. The function should wrap each\n tensor among nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs\n to be traced by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args\n and kwargs are the same as were supplied in input, but each tensor in the original input. Must be specified\n if dummy_forward_fn is specified.\n :param wrap_outputs_fn: same as `wrap_inputs_fn`, but applies to model outputs\n :param dump_graphs: Whether to dump the internal graph representation of the\n original and compressed models in the .dot format into the log directory.\n :return: A controller for the compression algorithm (or algorithms, in which case the controller\n is an instance of CompositeCompressionController) and the model ready for compression parameter training wrapped\n as an object of NNCFNetwork.\n \"\"\"\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"The model object has already been compressed.\\n\"\n \"NNCF for PyTorch modifies the model object in-place, and repeat calls to \"\n \"`nncf.torch.create_compressed_model` with the same model object passed as argument \"\n \"will lead to an incorrect attempt to compress the model twice.\\n\"\n \"Make sure that the model object you are passing has not already been compressed (for \"\n \"instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\\n\"\n \"If you are encountering this in a Jupyter notebook context - make sure that when \"\n \"re-running cells involving `nncf.torch.create_compressed_model` the original model object \"\n \"is also re-created (via constructor call).\"\n )\n\n if config.get(\"target_device\") == \"VPU\":\n warning_deprecated(\"VPU device is deprecated and will no longer be supported in the future.\")\n\n set_debug_log_dir(config.get(\"log_dir\", \".\"))\n\n is_legacy_model_state_dict = (\n compression_state is not None\n and BaseController.BUILDER_STATE not in compression_state\n and BaseController.CONTROLLER_STATE not in compression_state\n )\n maybe_convert_legacy_names_in_compress_state(compression_state)\n\n should_init = compression_state is None\n\n nncf_network = create_nncf_network(model, config, dummy_forward_fn, wrap_inputs_fn, wrap_outputs_fn)\n\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\"log_dir\", \".\"), \"original_graph.dot\"))\n builder = create_compression_algorithm_builder(config, should_init)\n\n is_state_loadable = not is_legacy_model_state_dict and compression_state is not None\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.CONTROLLER_STATE])\n\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n\n # Required to ensure that the model leaving create_compressed_model has correct compressed graph.\n # In particular, this is currently required for correct functioning of RNNs.\n compressed_model.nncf.rebuild_graph()\n\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state # pylint: disable=cyclic-import\n\n state_dict_to_load = compression_state.get(\"state_dict\", compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\"log_dir\", \".\"), \"compressed_graph.dot\"))\n\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model\n\n\ndef create_nncf_network(\n model: torch.nn.Module,\n config: NNCFConfig,\n dummy_forward_fn: Callable[[Module], Any] = None,\n wrap_inputs_fn: Callable = None,\n wrap_outputs_fn: Callable = None,\n) -> NNCFNetwork:\n \"\"\"\n The main function used to produce a model ready for adding compression from an original PyTorch\n model and a configuration object.\n\n :param model: The original model. Should have its parameters already loaded from a checkpoint or another\n source.\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build\n the internal graph representation via tracing. Specifying this is useful when the original training pipeline\n has special formats of data loader output or has additional *forward* arguments other than input tensors.\n Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according\n to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to\n nncf.nncf_model_input\n functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these\n calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is\n specified, then wrap_inputs_fn also must be specified.\n :param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy\n forward call before passing the inputs to the underlying compressed model. This is required if the model's input\n tensors that are important for compression are not supplied as arguments to the model's forward call directly,\n but instead are located in a container (such as list), and the model receives the container as an argument.\n wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying\n model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among\n the supplied model's args and kwargs that is important for compression (e.g. quantization) with an\n nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced\n by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are\n the same as were supplied in input, but each tensor in the original input. Must be specified if\n dummy_forward_fn is specified.\n :param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.\n :return: A model wrapped by NNCFNetwork, which is ready for adding compression.\"\"\"\n\n if dummy_forward_fn is not None and wrap_inputs_fn is None:\n raise ValueError(\n \"A custom dummy forward function was specified, but the corresponding input wrapping function \"\n \"was not. In case a custom dummy forward function is specified for purposes of NNCF graph \"\n \"building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with \"\n \"the input wrapping done in dummy_forward_fn.\"\n )\n\n # Preserve `.training`/`.requires_grad` state since we will be building NNCFNetwork in `.eval` mode\n with training_mode_switcher(model, is_training=False):\n # Compress model that will be deployed for the inference on target device. No need to compress parts of the\n # model that are used on training stage only (e.g. AuxLogits of Inception-v3 model) or unused modules with\n # weights. As a consequence, no need to care about spoiling BN statistics, as they're disabled in eval mode.\n\n input_info_list = create_input_infos(config)\n scopes_without_shape_matching = config.get(\"scopes_without_shape_matching\", [])\n ignored_scopes = config.get(\"ignored_scopes\")\n target_scopes = config.get(\"target_scopes\")\n\n nncf_network = NNCFNetwork(\n model,\n input_infos=input_info_list,\n dummy_forward_fn=dummy_forward_fn,\n wrap_inputs_fn=wrap_inputs_fn,\n wrap_outputs_fn=wrap_outputs_fn,\n ignored_scopes=ignored_scopes,\n target_scopes=target_scopes,\n scopes_without_shape_matching=scopes_without_shape_matching,\n )\n\n nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()\n\n synchronize_all_processes_in_distributed_mode()\n return nncf_network\n\n\ndef synchronize_all_processes_in_distributed_mode():\n if is_dist_avail_and_initialized():\n try:\n barrier()\n # Exception can be raised during running barrier\n # if the backend not in the supported list https://pytorch.org/docs/stable/distributed.html\n except RuntimeError as err:\n nncf_logger.warning(\n \"Training pipeline spawned an error while synchronizing distributed training processes:\"\n )\n nncf_logger.warning(err)\n nncf_logger.warning(\"Desynchronization of distributed processes may occur.\")\n\n\ndef create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init)\n\n\ndef create_compression_algorithm_builder_from_algo_names(\n algo_names: List[str], config: NNCFConfig, should_init: bool\n) -> PTCompressionAlgorithmBuilder:\n \"\"\"\n Create compression algorithm builders by a given list of algorithm names.\n\n :param algo_names: list of algorithm names\n :param config: A configuration object used to determine the exact compression modifications to be applied\n to the model\n :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False)\n the training parameters of the model during model building.\n :return: compression algorithm builder\n \"\"\"\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init)\n return builder\n", "step-ids": [ 2, 3, 5, 6, 7 ] }
[ 2, 3, 5, 6, 7 ]
from utilities.MatplotlibUtility import * from utilities.PlotDefinitions.DrainSweep.OutputCurve import plot as importedOutputCurvePlot plotDescription = { 'name':'Chip Output Curves', 'plotCategory': 'chip', 'priority': 40, 'dataFileDependencies': ['DrainSweep.json'], 'plotDefaults': { 'figsize':(2,2.5), 'colorMap':'magma', }, } def plot(identifiers, chipIndexes, firstRunChipHistory, recentRunChipHistory, specificRunChipHistory, groupedChipHistory, mode_parameters=None): if(mode_parameters is None): mode_parameters = {} #mode_parameters['enableColorBar'] = False mode_parameters['colorsOverride'] = (plotDescription['plotDefaults']['colorMap'], 0.85, 0) if(mode_parameters['colorsOverride'] == []) else mode_parameters['colorsOverride'] mode_parameters['figureSizeOverride'] = plotDescription['plotDefaults']['figsize'] if(mode_parameters['figureSizeOverride'] is None) else mode_parameters['figureSizeOverride'] return importedOutputCurvePlot(specificRunChipHistory, identifiers=identifiers, mode_parameters=mode_parameters)
normal
{ "blob_id": "49ae9e90402d784fc3af3b47e96842fbfe842104", "index": 9480, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef plot(identifiers, chipIndexes, firstRunChipHistory,\n recentRunChipHistory, specificRunChipHistory, groupedChipHistory,\n mode_parameters=None):\n if mode_parameters is None:\n mode_parameters = {}\n mode_parameters['colorsOverride'] = (plotDescription['plotDefaults'][\n 'colorMap'], 0.85, 0) if mode_parameters['colorsOverride'] == [\n ] else mode_parameters['colorsOverride']\n mode_parameters['figureSizeOverride'] = plotDescription['plotDefaults'][\n 'figsize'] if mode_parameters['figureSizeOverride'\n ] is None else mode_parameters['figureSizeOverride']\n return importedOutputCurvePlot(specificRunChipHistory, identifiers=\n identifiers, mode_parameters=mode_parameters)\n", "step-3": "<mask token>\nplotDescription = {'name': 'Chip Output Curves', 'plotCategory': 'chip',\n 'priority': 40, 'dataFileDependencies': ['DrainSweep.json'],\n 'plotDefaults': {'figsize': (2, 2.5), 'colorMap': 'magma'}}\n\n\ndef plot(identifiers, chipIndexes, firstRunChipHistory,\n recentRunChipHistory, specificRunChipHistory, groupedChipHistory,\n mode_parameters=None):\n if mode_parameters is None:\n mode_parameters = {}\n mode_parameters['colorsOverride'] = (plotDescription['plotDefaults'][\n 'colorMap'], 0.85, 0) if mode_parameters['colorsOverride'] == [\n ] else mode_parameters['colorsOverride']\n mode_parameters['figureSizeOverride'] = plotDescription['plotDefaults'][\n 'figsize'] if mode_parameters['figureSizeOverride'\n ] is None else mode_parameters['figureSizeOverride']\n return importedOutputCurvePlot(specificRunChipHistory, identifiers=\n identifiers, mode_parameters=mode_parameters)\n", "step-4": "from utilities.MatplotlibUtility import *\nfrom utilities.PlotDefinitions.DrainSweep.OutputCurve import plot as importedOutputCurvePlot\nplotDescription = {'name': 'Chip Output Curves', 'plotCategory': 'chip',\n 'priority': 40, 'dataFileDependencies': ['DrainSweep.json'],\n 'plotDefaults': {'figsize': (2, 2.5), 'colorMap': 'magma'}}\n\n\ndef plot(identifiers, chipIndexes, firstRunChipHistory,\n recentRunChipHistory, specificRunChipHistory, groupedChipHistory,\n mode_parameters=None):\n if mode_parameters is None:\n mode_parameters = {}\n mode_parameters['colorsOverride'] = (plotDescription['plotDefaults'][\n 'colorMap'], 0.85, 0) if mode_parameters['colorsOverride'] == [\n ] else mode_parameters['colorsOverride']\n mode_parameters['figureSizeOverride'] = plotDescription['plotDefaults'][\n 'figsize'] if mode_parameters['figureSizeOverride'\n ] is None else mode_parameters['figureSizeOverride']\n return importedOutputCurvePlot(specificRunChipHistory, identifiers=\n identifiers, mode_parameters=mode_parameters)\n", "step-5": "from utilities.MatplotlibUtility import *\nfrom utilities.PlotDefinitions.DrainSweep.OutputCurve import plot as importedOutputCurvePlot\n\n\nplotDescription = {\n\t'name':'Chip Output Curves',\n\t'plotCategory': 'chip',\n\t'priority': 40,\n\t'dataFileDependencies': ['DrainSweep.json'],\n\t'plotDefaults': {\n\t\t'figsize':(2,2.5),\n\t\t'colorMap':'magma',\n\t},\n}\n\ndef plot(identifiers, chipIndexes, firstRunChipHistory, recentRunChipHistory, specificRunChipHistory, groupedChipHistory, mode_parameters=None):\n\tif(mode_parameters is None):\n\t\tmode_parameters = {}\n\t#mode_parameters['enableColorBar'] = False\n\tmode_parameters['colorsOverride'] = (plotDescription['plotDefaults']['colorMap'], 0.85, 0) if(mode_parameters['colorsOverride'] == []) else mode_parameters['colorsOverride']\n\tmode_parameters['figureSizeOverride'] = plotDescription['plotDefaults']['figsize'] \t\t if(mode_parameters['figureSizeOverride'] is None) else mode_parameters['figureSizeOverride']\n\t\n\treturn importedOutputCurvePlot(specificRunChipHistory, identifiers=identifiers, mode_parameters=mode_parameters)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python import socket import datetime as dt import matplotlib.pyplot as plt import matplotlib.animation as animation from matplotlib.animation import FuncAnimation from matplotlib import style import pickle # Create figure for plotting time_list = [] gain_list = [] HOST = '127.0.0.1' # Standard loopback interface address (localhost) PORT = 65432 # Port to listen on (non-privileged ports are > 1023) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((HOST, PORT)) s.listen(5) conn, addr = s.accept() fig, ax1 = plt.subplots() ax1.set_ylim(-.1, 1.1) ax1.set_xlim(0, 2) def recieve_data(): while True: data = conn.recv(1024) if not data: break conn.sendall(data) msg = pickle.loads(data) time = float(msg[0]) gain = float(msg[1]) yield time , gain conn.close() def animate(i): xs = [] ys = [] for line in recieve_data(): if len(xs) < 50: x, y = line #print(x,y) xs.append(float(x)) ys.append(float(y)) else:break print(xs,ys) ax1.clear() ax1.plot(xs, ys) ani = animation.FuncAnimation(fig, animate, interval=10) plt.show()
normal
{ "blob_id": "a4d5064decdc9963dae1712c7c6918b3e5902bf2", "index": 9825, "step-1": "<mask token>\n\n\ndef recieve_data():\n while True:\n data = conn.recv(1024)\n if not data:\n break\n conn.sendall(data)\n msg = pickle.loads(data)\n time = float(msg[0])\n gain = float(msg[1])\n yield time, gain\n conn.close()\n\n\ndef animate(i):\n xs = []\n ys = []\n for line in recieve_data():\n if len(xs) < 50:\n x, y = line\n xs.append(float(x))\n ys.append(float(y))\n else:\n break\n print(xs, ys)\n ax1.clear()\n ax1.plot(xs, ys)\n\n\n<mask token>\n", "step-2": "<mask token>\ns.bind((HOST, PORT))\ns.listen(5)\n<mask token>\nax1.set_ylim(-0.1, 1.1)\nax1.set_xlim(0, 2)\n\n\ndef recieve_data():\n while True:\n data = conn.recv(1024)\n if not data:\n break\n conn.sendall(data)\n msg = pickle.loads(data)\n time = float(msg[0])\n gain = float(msg[1])\n yield time, gain\n conn.close()\n\n\ndef animate(i):\n xs = []\n ys = []\n for line in recieve_data():\n if len(xs) < 50:\n x, y = line\n xs.append(float(x))\n ys.append(float(y))\n else:\n break\n print(xs, ys)\n ax1.clear()\n ax1.plot(xs, ys)\n\n\n<mask token>\nplt.show()\n", "step-3": "<mask token>\ntime_list = []\ngain_list = []\nHOST = '127.0.0.1'\nPORT = 65432\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\ns.listen(5)\nconn, addr = s.accept()\nfig, ax1 = plt.subplots()\nax1.set_ylim(-0.1, 1.1)\nax1.set_xlim(0, 2)\n\n\ndef recieve_data():\n while True:\n data = conn.recv(1024)\n if not data:\n break\n conn.sendall(data)\n msg = pickle.loads(data)\n time = float(msg[0])\n gain = float(msg[1])\n yield time, gain\n conn.close()\n\n\ndef animate(i):\n xs = []\n ys = []\n for line in recieve_data():\n if len(xs) < 50:\n x, y = line\n xs.append(float(x))\n ys.append(float(y))\n else:\n break\n print(xs, ys)\n ax1.clear()\n ax1.plot(xs, ys)\n\n\nani = animation.FuncAnimation(fig, animate, interval=10)\nplt.show()\n", "step-4": "import socket\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib import style\nimport pickle\ntime_list = []\ngain_list = []\nHOST = '127.0.0.1'\nPORT = 65432\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\ns.listen(5)\nconn, addr = s.accept()\nfig, ax1 = plt.subplots()\nax1.set_ylim(-0.1, 1.1)\nax1.set_xlim(0, 2)\n\n\ndef recieve_data():\n while True:\n data = conn.recv(1024)\n if not data:\n break\n conn.sendall(data)\n msg = pickle.loads(data)\n time = float(msg[0])\n gain = float(msg[1])\n yield time, gain\n conn.close()\n\n\ndef animate(i):\n xs = []\n ys = []\n for line in recieve_data():\n if len(xs) < 50:\n x, y = line\n xs.append(float(x))\n ys.append(float(y))\n else:\n break\n print(xs, ys)\n ax1.clear()\n ax1.plot(xs, ys)\n\n\nani = animation.FuncAnimation(fig, animate, interval=10)\nplt.show()\n", "step-5": "#!/usr/bin/env python\n\nimport socket\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib import style\nimport pickle\n# Create figure for plotting\n\ntime_list = []\ngain_list = []\n\nHOST = '127.0.0.1' # Standard loopback interface address (localhost)\nPORT = 65432 # Port to listen on (non-privileged ports are > 1023)\n\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\ns.listen(5)\nconn, addr = s.accept()\n\n\nfig, ax1 = plt.subplots()\nax1.set_ylim(-.1, 1.1)\nax1.set_xlim(0, 2)\n\ndef recieve_data():\n\twhile True:\n\t\t data = conn.recv(1024)\n\t\t if not data:\n\t\t\t break\n\t\t conn.sendall(data)\n\t\t msg = pickle.loads(data)\n\t\t time = float(msg[0])\n\t\t gain = float(msg[1])\n\t\t yield time , gain\n\tconn.close()\n\n\n\ndef animate(i):\n xs = []\n ys = []\n for line in recieve_data():\n if len(xs) < 50:\n x, y = line\n #print(x,y)\n xs.append(float(x))\n ys.append(float(y))\n else:break\n print(xs,ys)\n ax1.clear()\n ax1.plot(xs, ys)\n\nani = animation.FuncAnimation(fig, animate, interval=10)\nplt.show()\n\n\n\n\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from field import print_field from math_utilite import sign, col def start_parameter_2(par): global cell_king, castling_control, trans, take_on_aisle cell_king = par[0] castling_control = par[1] trans = par[2] take_on_aisle = par[3] def det_cell_king(field): global cell_king cell_king = {sign(fig):(x, y) for x, row in enumerate(field) for y, fig in enumerate(row) if abs(fig)==6} return cell_king def det_castling_control(field): global castling_control for color in (1, -1): hor = 0 if color == 1 else 7 dk = 0 if field[hor][4] == 6*color else 1 dlr = 0 if field[hor][0] == 2*color else 1 drr = 0 if field[hor][-1] == 2*color else 1 castling_control[color] = (dk, dlr, drr) return castling_control def king_and_castling(field, color, old, new, d): global cell_king, castling_control cell_king[color] = (new[0], new[1]) storlg=new[1]-old[1] if abs(storlg) == 2: storlg = sign(storlg) rp = 7 if storlg*d == 1 else 0 field[new[0]][new[1]-storlg] = 2*color if d == 1 else 0 field[new[0]][rp] = 0 if d == 1 else 2*color cont = castling_control[color] castling_control[color] = (cont[0], cont[1]-storlg+d, cont[2]+storlg+d) castling_control[color] = (castling_control[color][0]+d, castling_control[color][1], castling_control[color][2]) def rook(field, color, old, new, d): global castling_control hor = 0 if color == 1 else 7 cont = castling_control[color] x, y = old if d == 1 else new if x == hor and y % 7 == 0: castling_control[color] = (cont[0], cont[1] + d*(-sign(y-3)+1), cont[2] + d*(sign(y-3)+1)) def trans_pawn(color, old): return True if (old[0] * color) % 7 == 6 else False def take_on_aisle_pawn(color, old, new): global take_on_aisle if abs(new[0]-old[0]) == 2: take_on_aisle = (color, new[1]) else: take_on_aisle = ('l', 8) return take_on_aisle def take_on_aisle_move(field, color, old, new, fig, d, main): global take_on_aisle if main == 1: take_on_aisle_pawn(color, old, new) if abs(old[1]-new[1]) == 1: if field[new[0]][new[1]] == 0 and d == 1: field[old[0]][new[1]] = 0 if fig == 0 and d == -1: field[new[0]][old[1]] = -color def move(field, old, new, fig=0, d=1, trans_fig=1, main=0): global trans, take_on_aisle color = sign(field[old[0]][old[1]]) figure = abs(field[old[0]][old[1]]) if figure == 2: rook(field, color, old, new, d) if figure == 6: king_and_castling(field, color, old, new, d) if trans == True: figure = 1 trans = False if figure == 1: trans = trans_pawn(color, old) if d == 1 else False if trans == True: figure = trans_fig take_on_aisle_move(field, color, old, new, fig, d, main) if main == 1: trans = False field[new[0]][new[1]] = color*figure field[old[0]][old[1]] = fig
normal
{ "blob_id": "90c9456bf22745d99fa76dbc752beae1a3835682", "index": 7672, "step-1": "<mask token>\n\n\ndef det_cell_king(field):\n global cell_king\n cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,\n fig in enumerate(row) if abs(fig) == 6}\n return cell_king\n\n\n<mask token>\n\n\ndef rook(field, color, old, new, d):\n global castling_control\n hor = 0 if color == 1 else 7\n cont = castling_control[color]\n x, y = old if d == 1 else new\n if x == hor and y % 7 == 0:\n castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1\n ), cont[2] + d * (sign(y - 3) + 1)\n\n\ndef trans_pawn(color, old):\n return True if old[0] * color % 7 == 6 else False\n\n\ndef take_on_aisle_pawn(color, old, new):\n global take_on_aisle\n if abs(new[0] - old[0]) == 2:\n take_on_aisle = color, new[1]\n else:\n take_on_aisle = 'l', 8\n return take_on_aisle\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef start_parameter_2(par):\n global cell_king, castling_control, trans, take_on_aisle\n cell_king = par[0]\n castling_control = par[1]\n trans = par[2]\n take_on_aisle = par[3]\n\n\ndef det_cell_king(field):\n global cell_king\n cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,\n fig in enumerate(row) if abs(fig) == 6}\n return cell_king\n\n\ndef det_castling_control(field):\n global castling_control\n for color in (1, -1):\n hor = 0 if color == 1 else 7\n dk = 0 if field[hor][4] == 6 * color else 1\n dlr = 0 if field[hor][0] == 2 * color else 1\n drr = 0 if field[hor][-1] == 2 * color else 1\n castling_control[color] = dk, dlr, drr\n return castling_control\n\n\n<mask token>\n\n\ndef rook(field, color, old, new, d):\n global castling_control\n hor = 0 if color == 1 else 7\n cont = castling_control[color]\n x, y = old if d == 1 else new\n if x == hor and y % 7 == 0:\n castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1\n ), cont[2] + d * (sign(y - 3) + 1)\n\n\ndef trans_pawn(color, old):\n return True if old[0] * color % 7 == 6 else False\n\n\ndef take_on_aisle_pawn(color, old, new):\n global take_on_aisle\n if abs(new[0] - old[0]) == 2:\n take_on_aisle = color, new[1]\n else:\n take_on_aisle = 'l', 8\n return take_on_aisle\n\n\ndef take_on_aisle_move(field, color, old, new, fig, d, main):\n global take_on_aisle\n if main == 1:\n take_on_aisle_pawn(color, old, new)\n if abs(old[1] - new[1]) == 1:\n if field[new[0]][new[1]] == 0 and d == 1:\n field[old[0]][new[1]] = 0\n if fig == 0 and d == -1:\n field[new[0]][old[1]] = -color\n\n\ndef move(field, old, new, fig=0, d=1, trans_fig=1, main=0):\n global trans, take_on_aisle\n color = sign(field[old[0]][old[1]])\n figure = abs(field[old[0]][old[1]])\n if figure == 2:\n rook(field, color, old, new, d)\n if figure == 6:\n king_and_castling(field, color, old, new, d)\n if trans == True:\n figure = 1\n trans = False\n if figure == 1:\n trans = trans_pawn(color, old) if d == 1 else False\n if trans == True:\n figure = trans_fig\n take_on_aisle_move(field, color, old, new, fig, d, main)\n if main == 1:\n trans = False\n field[new[0]][new[1]] = color * figure\n field[old[0]][old[1]] = fig\n", "step-3": "<mask token>\n\n\ndef start_parameter_2(par):\n global cell_king, castling_control, trans, take_on_aisle\n cell_king = par[0]\n castling_control = par[1]\n trans = par[2]\n take_on_aisle = par[3]\n\n\ndef det_cell_king(field):\n global cell_king\n cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,\n fig in enumerate(row) if abs(fig) == 6}\n return cell_king\n\n\ndef det_castling_control(field):\n global castling_control\n for color in (1, -1):\n hor = 0 if color == 1 else 7\n dk = 0 if field[hor][4] == 6 * color else 1\n dlr = 0 if field[hor][0] == 2 * color else 1\n drr = 0 if field[hor][-1] == 2 * color else 1\n castling_control[color] = dk, dlr, drr\n return castling_control\n\n\ndef king_and_castling(field, color, old, new, d):\n global cell_king, castling_control\n cell_king[color] = new[0], new[1]\n storlg = new[1] - old[1]\n if abs(storlg) == 2:\n storlg = sign(storlg)\n rp = 7 if storlg * d == 1 else 0\n field[new[0]][new[1] - storlg] = 2 * color if d == 1 else 0\n field[new[0]][rp] = 0 if d == 1 else 2 * color\n cont = castling_control[color]\n castling_control[color] = cont[0], cont[1] - storlg + d, cont[2\n ] + storlg + d\n castling_control[color] = castling_control[color][0] + d, castling_control[\n color][1], castling_control[color][2]\n\n\ndef rook(field, color, old, new, d):\n global castling_control\n hor = 0 if color == 1 else 7\n cont = castling_control[color]\n x, y = old if d == 1 else new\n if x == hor and y % 7 == 0:\n castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1\n ), cont[2] + d * (sign(y - 3) + 1)\n\n\ndef trans_pawn(color, old):\n return True if old[0] * color % 7 == 6 else False\n\n\ndef take_on_aisle_pawn(color, old, new):\n global take_on_aisle\n if abs(new[0] - old[0]) == 2:\n take_on_aisle = color, new[1]\n else:\n take_on_aisle = 'l', 8\n return take_on_aisle\n\n\ndef take_on_aisle_move(field, color, old, new, fig, d, main):\n global take_on_aisle\n if main == 1:\n take_on_aisle_pawn(color, old, new)\n if abs(old[1] - new[1]) == 1:\n if field[new[0]][new[1]] == 0 and d == 1:\n field[old[0]][new[1]] = 0\n if fig == 0 and d == -1:\n field[new[0]][old[1]] = -color\n\n\ndef move(field, old, new, fig=0, d=1, trans_fig=1, main=0):\n global trans, take_on_aisle\n color = sign(field[old[0]][old[1]])\n figure = abs(field[old[0]][old[1]])\n if figure == 2:\n rook(field, color, old, new, d)\n if figure == 6:\n king_and_castling(field, color, old, new, d)\n if trans == True:\n figure = 1\n trans = False\n if figure == 1:\n trans = trans_pawn(color, old) if d == 1 else False\n if trans == True:\n figure = trans_fig\n take_on_aisle_move(field, color, old, new, fig, d, main)\n if main == 1:\n trans = False\n field[new[0]][new[1]] = color * figure\n field[old[0]][old[1]] = fig\n", "step-4": "from field import print_field\nfrom math_utilite import sign, col\n\n\ndef start_parameter_2(par):\n global cell_king, castling_control, trans, take_on_aisle\n cell_king = par[0]\n castling_control = par[1]\n trans = par[2]\n take_on_aisle = par[3]\n\n\ndef det_cell_king(field):\n global cell_king\n cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,\n fig in enumerate(row) if abs(fig) == 6}\n return cell_king\n\n\ndef det_castling_control(field):\n global castling_control\n for color in (1, -1):\n hor = 0 if color == 1 else 7\n dk = 0 if field[hor][4] == 6 * color else 1\n dlr = 0 if field[hor][0] == 2 * color else 1\n drr = 0 if field[hor][-1] == 2 * color else 1\n castling_control[color] = dk, dlr, drr\n return castling_control\n\n\ndef king_and_castling(field, color, old, new, d):\n global cell_king, castling_control\n cell_king[color] = new[0], new[1]\n storlg = new[1] - old[1]\n if abs(storlg) == 2:\n storlg = sign(storlg)\n rp = 7 if storlg * d == 1 else 0\n field[new[0]][new[1] - storlg] = 2 * color if d == 1 else 0\n field[new[0]][rp] = 0 if d == 1 else 2 * color\n cont = castling_control[color]\n castling_control[color] = cont[0], cont[1] - storlg + d, cont[2\n ] + storlg + d\n castling_control[color] = castling_control[color][0] + d, castling_control[\n color][1], castling_control[color][2]\n\n\ndef rook(field, color, old, new, d):\n global castling_control\n hor = 0 if color == 1 else 7\n cont = castling_control[color]\n x, y = old if d == 1 else new\n if x == hor and y % 7 == 0:\n castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1\n ), cont[2] + d * (sign(y - 3) + 1)\n\n\ndef trans_pawn(color, old):\n return True if old[0] * color % 7 == 6 else False\n\n\ndef take_on_aisle_pawn(color, old, new):\n global take_on_aisle\n if abs(new[0] - old[0]) == 2:\n take_on_aisle = color, new[1]\n else:\n take_on_aisle = 'l', 8\n return take_on_aisle\n\n\ndef take_on_aisle_move(field, color, old, new, fig, d, main):\n global take_on_aisle\n if main == 1:\n take_on_aisle_pawn(color, old, new)\n if abs(old[1] - new[1]) == 1:\n if field[new[0]][new[1]] == 0 and d == 1:\n field[old[0]][new[1]] = 0\n if fig == 0 and d == -1:\n field[new[0]][old[1]] = -color\n\n\ndef move(field, old, new, fig=0, d=1, trans_fig=1, main=0):\n global trans, take_on_aisle\n color = sign(field[old[0]][old[1]])\n figure = abs(field[old[0]][old[1]])\n if figure == 2:\n rook(field, color, old, new, d)\n if figure == 6:\n king_and_castling(field, color, old, new, d)\n if trans == True:\n figure = 1\n trans = False\n if figure == 1:\n trans = trans_pawn(color, old) if d == 1 else False\n if trans == True:\n figure = trans_fig\n take_on_aisle_move(field, color, old, new, fig, d, main)\n if main == 1:\n trans = False\n field[new[0]][new[1]] = color * figure\n field[old[0]][old[1]] = fig\n", "step-5": "from field import print_field\nfrom math_utilite import sign, col\n\n\ndef start_parameter_2(par):\n global cell_king, castling_control, trans, take_on_aisle\n cell_king = par[0]\n castling_control = par[1]\n trans = par[2]\n take_on_aisle = par[3]\n \ndef det_cell_king(field):\n global cell_king\n cell_king = {sign(fig):(x, y) for x, row in enumerate(field) for y, fig in enumerate(row) if abs(fig)==6}\n return cell_king\n\ndef det_castling_control(field):\n global castling_control\n for color in (1, -1):\n hor = 0 if color == 1 else 7\n dk = 0 if field[hor][4] == 6*color else 1\n dlr = 0 if field[hor][0] == 2*color else 1\n drr = 0 if field[hor][-1] == 2*color else 1\n castling_control[color] = (dk, dlr, drr)\n return castling_control\n \n \ndef king_and_castling(field, color, old, new, d):\n global cell_king, castling_control\n cell_king[color] = (new[0], new[1])\n storlg=new[1]-old[1]\n if abs(storlg) == 2:\n storlg = sign(storlg)\n rp = 7 if storlg*d == 1 else 0\n field[new[0]][new[1]-storlg] = 2*color if d == 1 else 0\n field[new[0]][rp] = 0 if d == 1 else 2*color\n cont = castling_control[color] \n castling_control[color] = (cont[0], cont[1]-storlg+d, cont[2]+storlg+d)\n castling_control[color] = (castling_control[color][0]+d, castling_control[color][1], castling_control[color][2])\n\ndef rook(field, color, old, new, d):\n global castling_control\n hor = 0 if color == 1 else 7\n cont = castling_control[color]\n x, y = old if d == 1 else new\n if x == hor and y % 7 == 0:\n castling_control[color] = (cont[0], cont[1] + d*(-sign(y-3)+1), cont[2] + d*(sign(y-3)+1))\n\ndef trans_pawn(color, old):\n return True if (old[0] * color) % 7 == 6 else False\n\ndef take_on_aisle_pawn(color, old, new):\n global take_on_aisle\n if abs(new[0]-old[0]) == 2:\n take_on_aisle = (color, new[1])\n else:\n take_on_aisle = ('l', 8)\n return take_on_aisle\n\ndef take_on_aisle_move(field, color, old, new, fig, d, main):\n global take_on_aisle\n if main == 1:\n take_on_aisle_pawn(color, old, new)\n if abs(old[1]-new[1]) == 1:\n if field[new[0]][new[1]] == 0 and d == 1:\n field[old[0]][new[1]] = 0\n if fig == 0 and d == -1:\n field[new[0]][old[1]] = -color\n\ndef move(field, old, new, fig=0, d=1, trans_fig=1, main=0):\n global trans, take_on_aisle\n color = sign(field[old[0]][old[1]])\n figure = abs(field[old[0]][old[1]])\n if figure == 2:\n rook(field, color, old, new, d)\n if figure == 6:\n king_and_castling(field, color, old, new, d)\n if trans == True:\n figure = 1\n trans = False\n if figure == 1:\n trans = trans_pawn(color, old) if d == 1 else False \n if trans == True: \n figure = trans_fig \n take_on_aisle_move(field, color, old, new, fig, d, main)\n if main == 1:\n trans = False\n field[new[0]][new[1]] = color*figure\n field[old[0]][old[1]] = fig\n\n\n\n", "step-ids": [ 4, 8, 9, 10, 11 ] }
[ 4, 8, 9, 10, 11 ]
import pandas as pd import os import openpyxl from collections import defaultdict,deque # 調節用パラメータ filename = 'kaito7.xlsx' # 入力ファイル名 Output = 'output7.xlsx' # 出力ディレクトリ wb = openpyxl.load_workbook(filename) sheets = wb.sheetnames days = [] names = [] dict = defaultdict(dict) for sheet in sheets: sh = wb[sheet] i = 3 while True: tmp = sh.cell(row=1,column=i).value if tmp: days.append(tmp) else: break i += 1 print(days) days.pop() i = 2 while True: tmp = sh.cell(row=i,column=2).value if tmp: names.append(tmp) else: break i += 1 W = len(days) H = len(names) for y in range(2,2+H): for x in range(3,3+W): tmp = sh.cell(row=y,column=x).value dict[names[y-2]][days[x-3]] = tmp times = dict['しまむら']['7/10(水)'].split(', ') ans = [[' ', ' '] + names] for d in days: for t in times: tmpl = [d,t] for n in names: if dict[n][d] and t in dict[n][d]: tmpl.append(1) else: tmpl.append(0) ans.append(tmpl) for a in ans: print(a) wb = openpyxl.load_workbook(Output) sheets = wb.sheetnames sheet = wb[sheets[0]] def write_list_2d(sheet, l_2d, start_row, start_col): for y, row in enumerate(l_2d): for x, cell in enumerate(row): #print(l_2d[y][x]) sheet.cell(row=start_row + y,column=start_col + x,value=l_2d[y][x]) #print(sheet.cell(row=start_row + y,column=start_col + x).value) write_list_2d(sheet,ans,1,1) wb.save(Output) print(sheets[0])
normal
{ "blob_id": "37d5696c402737bfafe21b20b90a49e2753fdc4f", "index": 7287, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1, column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n i = 2\n while True:\n tmp = sh.cell(row=i, column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n W = len(days)\n H = len(names)\n for y in range(2, 2 + H):\n for x in range(3, 3 + W):\n tmp = sh.cell(row=y, column=x).value\n dict[names[y - 2]][days[x - 3]] = tmp\n<mask token>\nfor d in days:\n for t in times:\n tmpl = [d, t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\nfor a in ans:\n print(a)\n<mask token>\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[\n y][x])\n\n\nwrite_list_2d(sheet, ans, 1, 1)\nwb.save(Output)\nprint(sheets[0])\n", "step-3": "<mask token>\nfilename = 'kaito7.xlsx'\nOutput = 'output7.xlsx'\nwb = openpyxl.load_workbook(filename)\nsheets = wb.sheetnames\ndays = []\nnames = []\ndict = defaultdict(dict)\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1, column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n i = 2\n while True:\n tmp = sh.cell(row=i, column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n W = len(days)\n H = len(names)\n for y in range(2, 2 + H):\n for x in range(3, 3 + W):\n tmp = sh.cell(row=y, column=x).value\n dict[names[y - 2]][days[x - 3]] = tmp\ntimes = dict['しまむら']['7/10(水)'].split(', ')\nans = [[' ', ' '] + names]\nfor d in days:\n for t in times:\n tmpl = [d, t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\nfor a in ans:\n print(a)\nwb = openpyxl.load_workbook(Output)\nsheets = wb.sheetnames\nsheet = wb[sheets[0]]\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[\n y][x])\n\n\nwrite_list_2d(sheet, ans, 1, 1)\nwb.save(Output)\nprint(sheets[0])\n", "step-4": "import pandas as pd\nimport os\nimport openpyxl\nfrom collections import defaultdict, deque\nfilename = 'kaito7.xlsx'\nOutput = 'output7.xlsx'\nwb = openpyxl.load_workbook(filename)\nsheets = wb.sheetnames\ndays = []\nnames = []\ndict = defaultdict(dict)\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1, column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n i = 2\n while True:\n tmp = sh.cell(row=i, column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n W = len(days)\n H = len(names)\n for y in range(2, 2 + H):\n for x in range(3, 3 + W):\n tmp = sh.cell(row=y, column=x).value\n dict[names[y - 2]][days[x - 3]] = tmp\ntimes = dict['しまむら']['7/10(水)'].split(', ')\nans = [[' ', ' '] + names]\nfor d in days:\n for t in times:\n tmpl = [d, t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\nfor a in ans:\n print(a)\nwb = openpyxl.load_workbook(Output)\nsheets = wb.sheetnames\nsheet = wb[sheets[0]]\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[\n y][x])\n\n\nwrite_list_2d(sheet, ans, 1, 1)\nwb.save(Output)\nprint(sheets[0])\n", "step-5": "import pandas as pd\nimport os\nimport openpyxl\nfrom collections import defaultdict,deque\n\n# 調節用パラメータ\nfilename = 'kaito7.xlsx' # 入力ファイル名\nOutput = 'output7.xlsx' # 出力ディレクトリ\n\n\nwb = openpyxl.load_workbook(filename)\nsheets = wb.sheetnames\n\ndays = []\nnames = []\ndict = defaultdict(dict)\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1,column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n\n i = 2\n while True:\n tmp = sh.cell(row=i,column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n\n W = len(days)\n H = len(names)\n for y in range(2,2+H):\n for x in range(3,3+W):\n tmp = sh.cell(row=y,column=x).value\n dict[names[y-2]][days[x-3]] = tmp\n\ntimes = dict['しまむら']['7/10(水)'].split(', ')\n\nans = [[' ', ' '] + names]\nfor d in days:\n for t in times:\n tmpl = [d,t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\n\nfor a in ans:\n print(a)\n\n\nwb = openpyxl.load_workbook(Output)\nsheets = wb.sheetnames\nsheet = wb[sheets[0]]\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n #print(l_2d[y][x])\n sheet.cell(row=start_row + y,column=start_col + x,value=l_2d[y][x])\n #print(sheet.cell(row=start_row + y,column=start_col + x).value)\n\nwrite_list_2d(sheet,ans,1,1)\n\nwb.save(Output)\n\nprint(sheets[0])\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
""" Iterations over :term:`hosts<host>`, :term:`roles<role>`, :term:`components<component>` and config files. """ from contextlib import contextmanager from fabric.api import env, settings, abort from os.path import join from pkg_resources import iter_entry_points from warnings import warn from fabric.network import ssh_config from confab.options import options from confab.validate import assert_exists from confab.loaders import FileSystemEnvironmentLoader from confab.data import DataLoader from confab.conffiles import ConfFiles @contextmanager def this_hostname(hostname): """ Context manager that uses the current SSH confg to switch Fabric to a specific hostname. Updates hostname and port. """ host_config = ssh_config(hostname) host_string = hostname port = host_config.get("port", env.default_port) with settings(host_string=host_string, port=port): yield def _get_environmentdef(): """ Retreive the EnvironmentDefinition from the fabric env. """ if 'environmentdef' not in env: abort("Environment needs to be configured") environmentdef = env.environmentdef # If we're running via `fab`, we should restrict the environment # to the current host. if env.host_string: environmentdef = environmentdef.with_hosts(env.host_string) return environmentdef def iter_hosts(): """ Iterate over all hosts in the configured environment. """ environmentdef = _get_environmentdef() for host in environmentdef.hosts(): # fabric needs the host if we're calling from main() with this_hostname(host.host): yield host def iter_hosts_and_roles(): """ Iterate over all hosts and roles in the configured environment. """ environmentdef = _get_environmentdef() for host_and_role in environmentdef.all(): # fabric needs the host if we're calling from main() with this_hostname(host_and_role.host): yield host_and_role def iter_conffiles(directory=None): """ Generate :class:`~confab.conffiles.ConfFiles` objects for each ``host_and_role`` in an :term:`environment`. Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and :class:`~confab.data.DataLoader`. :param directory: Path to templates and data directories. """ for host_and_role in iter_hosts_and_roles(): yield make_conffiles(host_and_role, directory) def make_conffiles(host_and_role, directory=None): """ Create a :class:`~confab.conffiles.ConfFiles` object for a ``host_and_role`` in an :term:`environment`. Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and :class:`~confab.data.DataLoader`. :param directory: Path to templates and data directories. """ directories = [directory or options.get_base_dir()] directories.extend(iter_extension_paths()) # Construct directories templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()), directories) assert_exists(*templates_dirs) data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories) assert_exists(*data_dirs) return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*templates_dirs), DataLoader(data_dirs)) def iter_extension_paths(): """ Get templates paths from confab extension entry points. entry points should point to a callable that returns the base path to the data and templates directories. """ for entry_point in iter_entry_points(group="confab.extensions"): try: path_func = entry_point.load() yield path_func() except ImportError as e: warn(str(e))
normal
{ "blob_id": "cc019c732003ed72db80a7893096a0bef0f12e47", "index": 4168, "step-1": "<mask token>\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\n<mask token>\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n", "step-2": "<mask token>\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host_and_role in environmentdef.all():\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n", "step-3": "<mask token>\n\n\n@contextmanager\ndef this_hostname(hostname):\n \"\"\"\n Context manager that uses the current SSH confg to switch Fabric to a specific hostname.\n\n Updates hostname and port.\n \"\"\"\n host_config = ssh_config(hostname)\n host_string = hostname\n port = host_config.get('port', env.default_port)\n with settings(host_string=host_string, port=port):\n yield\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host_and_role in environmentdef.all():\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n", "step-4": "<mask token>\nfrom contextlib import contextmanager\nfrom fabric.api import env, settings, abort\nfrom os.path import join\nfrom pkg_resources import iter_entry_points\nfrom warnings import warn\nfrom fabric.network import ssh_config\nfrom confab.options import options\nfrom confab.validate import assert_exists\nfrom confab.loaders import FileSystemEnvironmentLoader\nfrom confab.data import DataLoader\nfrom confab.conffiles import ConfFiles\n\n\n@contextmanager\ndef this_hostname(hostname):\n \"\"\"\n Context manager that uses the current SSH confg to switch Fabric to a specific hostname.\n\n Updates hostname and port.\n \"\"\"\n host_config = ssh_config(hostname)\n host_string = hostname\n port = host_config.get('port', env.default_port)\n with settings(host_string=host_string, port=port):\n yield\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host_and_role in environmentdef.all():\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n", "step-5": "\"\"\"\nIterations over :term:`hosts<host>`, :term:`roles<role>`,\n:term:`components<component>` and config files.\n\"\"\"\nfrom contextlib import contextmanager\nfrom fabric.api import env, settings, abort\nfrom os.path import join\nfrom pkg_resources import iter_entry_points\nfrom warnings import warn\n\nfrom fabric.network import ssh_config\n\nfrom confab.options import options\nfrom confab.validate import assert_exists\nfrom confab.loaders import FileSystemEnvironmentLoader\nfrom confab.data import DataLoader\nfrom confab.conffiles import ConfFiles\n\n\n@contextmanager\ndef this_hostname(hostname):\n \"\"\"\n Context manager that uses the current SSH confg to switch Fabric to a specific hostname.\n\n Updates hostname and port.\n \"\"\"\n host_config = ssh_config(hostname)\n\n host_string = hostname\n port = host_config.get(\"port\", env.default_port)\n\n with settings(host_string=host_string,\n port=port):\n yield\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort(\"Environment needs to be configured\")\n\n environmentdef = env.environmentdef\n\n # If we're running via `fab`, we should restrict the environment\n # to the current host.\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n\n for host in environmentdef.hosts():\n # fabric needs the host if we're calling from main()\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n\n for host_and_role in environmentdef.all():\n # fabric needs the host if we're calling from main()\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n\n # Construct directories\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()), directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n\n return ConfFiles(host_and_role,\n FileSystemEnvironmentLoader(*templates_dirs),\n DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
#!/usr/bin/python3 # -*- coding:utf-8 -*- import re def main(): s = input().strip() s = s.replace('BC', 'X') ans = 0 for ax in re.split(r'[BC]+', s): inds = [] for i in range(len(ax)): if ax[i] == 'A': inds.append(i) ans += sum([len(ax) - 1 - ind for ind in inds]) - sum(range(len(inds))) print(ans) if __name__=='__main__': main()
normal
{ "blob_id": "4100415b0df52e8e14b00dd66c7c53cd46c0ea6e", "index": 2378, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n s = input().strip()\n s = s.replace('BC', 'X')\n ans = 0\n for ax in re.split('[BC]+', s):\n inds = []\n for i in range(len(ax)):\n if ax[i] == 'A':\n inds.append(i)\n ans += sum([(len(ax) - 1 - ind) for ind in inds]) - sum(range(len(\n inds)))\n print(ans)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n s = input().strip()\n s = s.replace('BC', 'X')\n ans = 0\n for ax in re.split('[BC]+', s):\n inds = []\n for i in range(len(ax)):\n if ax[i] == 'A':\n inds.append(i)\n ans += sum([(len(ax) - 1 - ind) for ind in inds]) - sum(range(len(\n inds)))\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import re\n\n\ndef main():\n s = input().strip()\n s = s.replace('BC', 'X')\n ans = 0\n for ax in re.split('[BC]+', s):\n inds = []\n for i in range(len(ax)):\n if ax[i] == 'A':\n inds.append(i)\n ans += sum([(len(ax) - 1 - ind) for ind in inds]) - sum(range(len(\n inds)))\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\nimport re\n\ndef main():\n s = input().strip()\n s = s.replace('BC', 'X')\n ans = 0\n for ax in re.split(r'[BC]+', s):\n inds = []\n for i in range(len(ax)):\n if ax[i] == 'A':\n inds.append(i)\n ans += sum([len(ax) - 1 - ind for ind in inds]) - sum(range(len(inds)))\n print(ans)\n\nif __name__=='__main__':\n main()\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
INITIAL_B = 0.15062677711161448 B_FACTOR = 5.0 INITIAL_GE = 0.22581915788215678 GE_BOUNDS = [1.0 / 10.0, 1.0 / 4.0] FIXED_P = 0.9401234488501574 INITIAL_GU = 0.2145066414796447 GU_BOUNDS = [1.0 / 15.0, 1.0 / 2.0] INITIAL_GI = 0.19235137989123863 GI_BOUNDS = [1.0 / 15.0, 1.0 / 5.0] INITIAL_GH = 0.044937075878220795 GH_BOUNDS = [1.0 / 20.0, 1.0 / 5.0] INITIAL_MU = 0.002840331041978459 MU_BOUNDS = [0.0, 0.1] INITIAL_PARAMETERS = [ INITIAL_B, INITIAL_GE, FIXED_P, INITIAL_GU, INITIAL_GI, INITIAL_GH, None, # rH INITIAL_MU, ] E_FACTOR = 5.0 U_FACTOR = 5.0 I_FACTOR = 5.0
normal
{ "blob_id": "47cf3045f2fa0f69759e09b1599e4afe953c06d8", "index": 5138, "step-1": "<mask token>\n", "step-2": "INITIAL_B = 0.15062677711161448\nB_FACTOR = 5.0\nINITIAL_GE = 0.22581915788215678\nGE_BOUNDS = [1.0 / 10.0, 1.0 / 4.0]\nFIXED_P = 0.9401234488501574\nINITIAL_GU = 0.2145066414796447\nGU_BOUNDS = [1.0 / 15.0, 1.0 / 2.0]\nINITIAL_GI = 0.19235137989123863\nGI_BOUNDS = [1.0 / 15.0, 1.0 / 5.0]\nINITIAL_GH = 0.044937075878220795\nGH_BOUNDS = [1.0 / 20.0, 1.0 / 5.0]\nINITIAL_MU = 0.002840331041978459\nMU_BOUNDS = [0.0, 0.1]\nINITIAL_PARAMETERS = [INITIAL_B, INITIAL_GE, FIXED_P, INITIAL_GU,\n INITIAL_GI, INITIAL_GH, None, INITIAL_MU]\nE_FACTOR = 5.0\nU_FACTOR = 5.0\nI_FACTOR = 5.0\n", "step-3": "INITIAL_B = 0.15062677711161448\nB_FACTOR = 5.0\n\nINITIAL_GE = 0.22581915788215678\nGE_BOUNDS = [1.0 / 10.0, 1.0 / 4.0]\n\nFIXED_P = 0.9401234488501574\n\nINITIAL_GU = 0.2145066414796447\nGU_BOUNDS = [1.0 / 15.0, 1.0 / 2.0]\n\nINITIAL_GI = 0.19235137989123863\nGI_BOUNDS = [1.0 / 15.0, 1.0 / 5.0]\n\nINITIAL_GH = 0.044937075878220795\nGH_BOUNDS = [1.0 / 20.0, 1.0 / 5.0]\n\nINITIAL_MU = 0.002840331041978459\nMU_BOUNDS = [0.0, 0.1]\n\nINITIAL_PARAMETERS = [\n INITIAL_B,\n INITIAL_GE,\n FIXED_P,\n INITIAL_GU,\n INITIAL_GI,\n INITIAL_GH,\n None, # rH\n INITIAL_MU,\n]\n\nE_FACTOR = 5.0\nU_FACTOR = 5.0\nI_FACTOR = 5.0\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# -*- coding: utf-8 -*- # import time from openerp.osv import osv, fields import logging import openerp.addons.decimal_precision as dp logger = logging.getLogger(__name__) class ebiz_supplier_account_create(osv.osv_memory): _name = 'ebiz.supplier.account.create.wizard' _description = "Ebiz Supplier Account" def create_supplier_action(self, cr, uid, ids, context=None): active_ids = context.get('active_ids',False) supplier_ids = self.pool['ebiz.supplier.account.line'].create_ebiz_supplier_account_line(cr, uid, active_ids, context=context) return { 'view_type': 'form', 'view_mode': 'tree', 'res_model': 'ebiz.supplier.account.line', 'type': 'ir.actions.act_window', 'domain':[('id','in',supplier_ids or [0])], } ebiz_supplier_account_create()
normal
{ "blob_id": "309f8016dfebcc3595291b127edb4634f72298ec", "index": 4387, "step-1": "<mask token>\n\n\nclass ebiz_supplier_account_create(osv.osv_memory):\n <mask token>\n <mask token>\n\n def create_supplier_action(self, cr, uid, ids, context=None):\n active_ids = context.get('active_ids', False)\n supplier_ids = self.pool['ebiz.supplier.account.line'\n ].create_ebiz_supplier_account_line(cr, uid, active_ids,\n context=context)\n return {'view_type': 'form', 'view_mode': 'tree', 'res_model':\n 'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', supplier_ids or [0])]}\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ebiz_supplier_account_create(osv.osv_memory):\n _name = 'ebiz.supplier.account.create.wizard'\n _description = 'Ebiz Supplier Account'\n\n def create_supplier_action(self, cr, uid, ids, context=None):\n active_ids = context.get('active_ids', False)\n supplier_ids = self.pool['ebiz.supplier.account.line'\n ].create_ebiz_supplier_account_line(cr, uid, active_ids,\n context=context)\n return {'view_type': 'form', 'view_mode': 'tree', 'res_model':\n 'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', supplier_ids or [0])]}\n\n\nebiz_supplier_account_create()\n", "step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\nclass ebiz_supplier_account_create(osv.osv_memory):\n _name = 'ebiz.supplier.account.create.wizard'\n _description = 'Ebiz Supplier Account'\n\n def create_supplier_action(self, cr, uid, ids, context=None):\n active_ids = context.get('active_ids', False)\n supplier_ids = self.pool['ebiz.supplier.account.line'\n ].create_ebiz_supplier_account_line(cr, uid, active_ids,\n context=context)\n return {'view_type': 'form', 'view_mode': 'tree', 'res_model':\n 'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', supplier_ids or [0])]}\n\n\nebiz_supplier_account_create()\n", "step-4": "import time\nfrom openerp.osv import osv, fields\nimport logging\nimport openerp.addons.decimal_precision as dp\nlogger = logging.getLogger(__name__)\n\n\nclass ebiz_supplier_account_create(osv.osv_memory):\n _name = 'ebiz.supplier.account.create.wizard'\n _description = 'Ebiz Supplier Account'\n\n def create_supplier_action(self, cr, uid, ids, context=None):\n active_ids = context.get('active_ids', False)\n supplier_ids = self.pool['ebiz.supplier.account.line'\n ].create_ebiz_supplier_account_line(cr, uid, active_ids,\n context=context)\n return {'view_type': 'form', 'view_mode': 'tree', 'res_model':\n 'ebiz.supplier.account.line', 'type': 'ir.actions.act_window',\n 'domain': [('id', 'in', supplier_ids or [0])]}\n\n\nebiz_supplier_account_create()\n", "step-5": "# -*- coding: utf-8 -*- #\nimport time\nfrom openerp.osv import osv, fields\nimport logging\nimport openerp.addons.decimal_precision as dp\n\nlogger = logging.getLogger(__name__)\n\nclass ebiz_supplier_account_create(osv.osv_memory):\n _name = 'ebiz.supplier.account.create.wizard'\n _description = \"Ebiz Supplier Account\"\n\n def create_supplier_action(self, cr, uid, ids, context=None):\n active_ids = context.get('active_ids',False)\n supplier_ids = self.pool['ebiz.supplier.account.line'].create_ebiz_supplier_account_line(cr, uid, active_ids, context=context)\n return {\n 'view_type': 'form',\n 'view_mode': 'tree',\n 'res_model': 'ebiz.supplier.account.line',\n 'type': 'ir.actions.act_window',\n 'domain':[('id','in',supplier_ids or [0])],\n }\nebiz_supplier_account_create()\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
#!/usr/bin/env python import fileinput #open the file with the matched DNA short reads #create a file with the modified version f1 = open('CompleteDNAsequence.txt', 'r') f2 = open('CompleteDNAsequence.txt.tmp', 'w') for line in f1: f2.write(line.replace('_', '\n')) #replaces _ with tab f1.close() f2.close() #opens modified file, reads first line and saves it to new file lines = open('CompleteDNAsequence.txt.tmp').readlines() open('ANSWER.txt', 'w').writelines(lines[:+1])
normal
{ "blob_id": "d02ef5fc27cde353e90dda4090905b89b5be5c49", "index": 2897, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor line in f1:\n f2.write(line.replace('_', '\\n'))\nf1.close()\nf2.close()\n<mask token>\nopen('ANSWER.txt', 'w').writelines(lines[:+1])\n", "step-3": "<mask token>\nf1 = open('CompleteDNAsequence.txt', 'r')\nf2 = open('CompleteDNAsequence.txt.tmp', 'w')\nfor line in f1:\n f2.write(line.replace('_', '\\n'))\nf1.close()\nf2.close()\nlines = open('CompleteDNAsequence.txt.tmp').readlines()\nopen('ANSWER.txt', 'w').writelines(lines[:+1])\n", "step-4": "import fileinput\nf1 = open('CompleteDNAsequence.txt', 'r')\nf2 = open('CompleteDNAsequence.txt.tmp', 'w')\nfor line in f1:\n f2.write(line.replace('_', '\\n'))\nf1.close()\nf2.close()\nlines = open('CompleteDNAsequence.txt.tmp').readlines()\nopen('ANSWER.txt', 'w').writelines(lines[:+1])\n", "step-5": "#!/usr/bin/env python\n\nimport fileinput\n\n#open the file with the matched DNA short reads\n#create a file with the modified version\nf1 = open('CompleteDNAsequence.txt', 'r')\nf2 = open('CompleteDNAsequence.txt.tmp', 'w')\nfor line in f1:\n f2.write(line.replace('_', '\\n')) #replaces _ with tab\nf1.close()\nf2.close()\n\n#opens modified file, reads first line and saves it to new file\nlines = open('CompleteDNAsequence.txt.tmp').readlines()\nopen('ANSWER.txt', 'w').writelines(lines[:+1])\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os from flask import Flask, jsonify, request, abort, make_response from flask_sqlalchemy import SQLAlchemy from .models import User from .config import app_config app = Flask(__name__) app.config.from_object(app_config[os.getenv('FLASK_ENV', 'production')]) db = SQLAlchemy(app) @app.route('/api/v1/users/<int:user_id>', methods=['GET']) def get_user(user_id): try: user = User.query.filter_by(id=user_id).first() return jsonify({'user': user.serialize}) except: abort(404) @app.route('/api/v1/users', methods=['POST']) def create_user(): if not request.json or not 'firstName' or not 'lastName' in request.json: abort(400) user = User(request.get_json()['firstName'], request.get_json()['lastName'] ) db.session.add(user) db.session.commit() return jsonify({'user': user.serialize}), 201 @app.errorhandler(404) def not_found(error): return make_response(jsonify({'error': 'Not found'}), 404) @app.errorhandler(400) def not_found(error): return make_response(jsonify({'error': 'Bad Request'}), 400) @app.errorhandler(405) def not_found(error): return make_response(jsonify({'error': 'Method Not Allowed'}), 405)
normal
{ "blob_id": "f4519fa82ffc6bf945c7bb36d3761a708a06f641", "index": 5933, "step-1": "<mask token>\n\n\[email protected]('/api/v1/users/<int:user_id>', methods=['GET'])\ndef get_user(user_id):\n try:\n user = User.query.filter_by(id=user_id).first()\n return jsonify({'user': user.serialize})\n except:\n abort(404)\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n if not request.json or not 'firstName' or not 'lastName' in request.json:\n abort(400)\n user = User(request.get_json()['firstName'], request.get_json()['lastName']\n )\n db.session.add(user)\n db.session.commit()\n return jsonify({'user': user.serialize}), 201\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n<mask token>\n\n\[email protected](405)\ndef not_found(error):\n return make_response(jsonify({'error': 'Method Not Allowed'}), 405)\n", "step-2": "<mask token>\napp.config.from_object(app_config[os.getenv('FLASK_ENV', 'production')])\n<mask token>\n\n\[email protected]('/api/v1/users/<int:user_id>', methods=['GET'])\ndef get_user(user_id):\n try:\n user = User.query.filter_by(id=user_id).first()\n return jsonify({'user': user.serialize})\n except:\n abort(404)\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n if not request.json or not 'firstName' or not 'lastName' in request.json:\n abort(400)\n user = User(request.get_json()['firstName'], request.get_json()['lastName']\n )\n db.session.add(user)\n db.session.commit()\n return jsonify({'user': user.serialize}), 201\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\[email protected](400)\ndef not_found(error):\n return make_response(jsonify({'error': 'Bad Request'}), 400)\n\n\[email protected](405)\ndef not_found(error):\n return make_response(jsonify({'error': 'Method Not Allowed'}), 405)\n", "step-3": "<mask token>\napp = Flask(__name__)\napp.config.from_object(app_config[os.getenv('FLASK_ENV', 'production')])\ndb = SQLAlchemy(app)\n\n\[email protected]('/api/v1/users/<int:user_id>', methods=['GET'])\ndef get_user(user_id):\n try:\n user = User.query.filter_by(id=user_id).first()\n return jsonify({'user': user.serialize})\n except:\n abort(404)\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n if not request.json or not 'firstName' or not 'lastName' in request.json:\n abort(400)\n user = User(request.get_json()['firstName'], request.get_json()['lastName']\n )\n db.session.add(user)\n db.session.commit()\n return jsonify({'user': user.serialize}), 201\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\[email protected](400)\ndef not_found(error):\n return make_response(jsonify({'error': 'Bad Request'}), 400)\n\n\[email protected](405)\ndef not_found(error):\n return make_response(jsonify({'error': 'Method Not Allowed'}), 405)\n", "step-4": "import os\nfrom flask import Flask, jsonify, request, abort, make_response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom .models import User\nfrom .config import app_config\napp = Flask(__name__)\napp.config.from_object(app_config[os.getenv('FLASK_ENV', 'production')])\ndb = SQLAlchemy(app)\n\n\[email protected]('/api/v1/users/<int:user_id>', methods=['GET'])\ndef get_user(user_id):\n try:\n user = User.query.filter_by(id=user_id).first()\n return jsonify({'user': user.serialize})\n except:\n abort(404)\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n if not request.json or not 'firstName' or not 'lastName' in request.json:\n abort(400)\n user = User(request.get_json()['firstName'], request.get_json()['lastName']\n )\n db.session.add(user)\n db.session.commit()\n return jsonify({'user': user.serialize}), 201\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\[email protected](400)\ndef not_found(error):\n return make_response(jsonify({'error': 'Bad Request'}), 400)\n\n\[email protected](405)\ndef not_found(error):\n return make_response(jsonify({'error': 'Method Not Allowed'}), 405)\n", "step-5": null, "step-ids": [ 4, 6, 7, 8 ] }
[ 4, 6, 7, 8 ]
import os from django.conf import settings from chamber.importers import BulkCSVImporter, CSVImporter from .models import CSVRecord class BulkCSVRecordImporter(BulkCSVImporter): model_class = CSVRecord fields = ('id', 'name', 'number') csv_path = os.path.join(settings.PROJECT_DIR, 'data', 'all_fields_filled.csv') def clean_number(self, value): # Just to test clean methods are called return 888 class CSVRecordImporter(CSVImporter): model_class = CSVRecord fields = ('id', 'name', 'number') csv_path = os.path.join(settings.PROJECT_DIR, 'data', 'all_fields_filled.csv') def clean_number(self, value): # Just to test clean methods are called return 888
normal
{ "blob_id": "559bd0c1821f405d21cdacba55f129ee5220bb5d", "index": 3751, "step-1": "<mask token>\n\n\nclass CSVRecordImporter(CSVImporter):\n model_class = CSVRecord\n fields = 'id', 'name', 'number'\n csv_path = os.path.join(settings.PROJECT_DIR, 'data',\n 'all_fields_filled.csv')\n\n def clean_number(self, value):\n return 888\n", "step-2": "<mask token>\n\n\nclass BulkCSVRecordImporter(BulkCSVImporter):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CSVRecordImporter(CSVImporter):\n model_class = CSVRecord\n fields = 'id', 'name', 'number'\n csv_path = os.path.join(settings.PROJECT_DIR, 'data',\n 'all_fields_filled.csv')\n\n def clean_number(self, value):\n return 888\n", "step-3": "<mask token>\n\n\nclass BulkCSVRecordImporter(BulkCSVImporter):\n model_class = CSVRecord\n fields = 'id', 'name', 'number'\n csv_path = os.path.join(settings.PROJECT_DIR, 'data',\n 'all_fields_filled.csv')\n\n def clean_number(self, value):\n return 888\n\n\nclass CSVRecordImporter(CSVImporter):\n model_class = CSVRecord\n fields = 'id', 'name', 'number'\n csv_path = os.path.join(settings.PROJECT_DIR, 'data',\n 'all_fields_filled.csv')\n\n def clean_number(self, value):\n return 888\n", "step-4": "import os\nfrom django.conf import settings\nfrom chamber.importers import BulkCSVImporter, CSVImporter\nfrom .models import CSVRecord\n\n\nclass BulkCSVRecordImporter(BulkCSVImporter):\n model_class = CSVRecord\n fields = 'id', 'name', 'number'\n csv_path = os.path.join(settings.PROJECT_DIR, 'data',\n 'all_fields_filled.csv')\n\n def clean_number(self, value):\n return 888\n\n\nclass CSVRecordImporter(CSVImporter):\n model_class = CSVRecord\n fields = 'id', 'name', 'number'\n csv_path = os.path.join(settings.PROJECT_DIR, 'data',\n 'all_fields_filled.csv')\n\n def clean_number(self, value):\n return 888\n", "step-5": "import os\n\nfrom django.conf import settings\n\nfrom chamber.importers import BulkCSVImporter, CSVImporter\n\nfrom .models import CSVRecord\n\n\nclass BulkCSVRecordImporter(BulkCSVImporter):\n model_class = CSVRecord\n fields = ('id', 'name', 'number')\n csv_path = os.path.join(settings.PROJECT_DIR, 'data', 'all_fields_filled.csv')\n\n def clean_number(self, value):\n # Just to test clean methods are called\n return 888\n\n\nclass CSVRecordImporter(CSVImporter):\n model_class = CSVRecord\n fields = ('id', 'name', 'number')\n csv_path = os.path.join(settings.PROJECT_DIR, 'data', 'all_fields_filled.csv')\n\n def clean_number(self, value):\n # Just to test clean methods are called\n return 888\n", "step-ids": [ 3, 4, 6, 7, 8 ] }
[ 3, 4, 6, 7, 8 ]
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: NVLGPSStatus.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='NVLGPSStatus.proto', package='', syntax='proto2', serialized_options=None, serialized_pb=_b('\n\x12NVLGPSStatus.proto\"\x8d\x03\n\x0cNVLGPSStatus\x12\x12\n\ntracker_id\x18\x01 \x02(\x0c\x12\x12\n\ngps_active\x18\x02 \x02(\x08\x12\x10\n\x08\x64\x61te_day\x18\x03 \x01(\x05\x12\x12\n\ndate_month\x18\x04 \x01(\x05\x12\x11\n\tdate_year\x18\x05 \x01(\x05\x12\x12\n\ntime_hours\x18\x06 \x01(\x05\x12\x14\n\x0ctime_minutes\x18\x07 \x01(\x05\x12\x14\n\x0ctime_seconds\x18\x08 \x01(\x05\x12\x19\n\x11time_microseconds\x18\t \x01(\x05\x12\x10\n\x08latitude\x18\n \x01(\x01\x12\x11\n\tlongitude\x18\x0b \x01(\x01\x12\x1f\n\x17speed_over_ground_knots\x18\x0c \x01(\x02\x12\x1b\n\x13track_angle_degrees\x18\r \x01(\x02\x12\x1a\n\x12magnetic_variation\x18\x0e \x01(\x02\x12\x12\n\nfuel_level\x18\x0f \x01(\x05\x12\x15\n\rvoltage_level\x18\x10 \x01(\x02\x12\x17\n\x0fvehicle_running\x18\x11 \x01(\x08') ) _NVLGPSSTATUS = _descriptor.Descriptor( name='NVLGPSStatus', full_name='NVLGPSStatus', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='tracker_id', full_name='NVLGPSStatus.tracker_id', index=0, number=1, type=12, cpp_type=9, label=2, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='gps_active', full_name='NVLGPSStatus.gps_active', index=1, number=2, type=8, cpp_type=7, label=2, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='date_day', full_name='NVLGPSStatus.date_day', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='date_month', full_name='NVLGPSStatus.date_month', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='date_year', full_name='NVLGPSStatus.date_year', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='time_hours', full_name='NVLGPSStatus.time_hours', index=5, number=6, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='time_minutes', full_name='NVLGPSStatus.time_minutes', index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='time_seconds', full_name='NVLGPSStatus.time_seconds', index=7, number=8, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='time_microseconds', full_name='NVLGPSStatus.time_microseconds', index=8, number=9, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='latitude', full_name='NVLGPSStatus.latitude', index=9, number=10, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='longitude', full_name='NVLGPSStatus.longitude', index=10, number=11, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='speed_over_ground_knots', full_name='NVLGPSStatus.speed_over_ground_knots', index=11, number=12, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='track_angle_degrees', full_name='NVLGPSStatus.track_angle_degrees', index=12, number=13, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='magnetic_variation', full_name='NVLGPSStatus.magnetic_variation', index=13, number=14, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='fuel_level', full_name='NVLGPSStatus.fuel_level', index=14, number=15, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='voltage_level', full_name='NVLGPSStatus.voltage_level', index=15, number=16, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='vehicle_running', full_name='NVLGPSStatus.vehicle_running', index=16, number=17, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=23, serialized_end=420, ) DESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS _sym_db.RegisterFileDescriptor(DESCRIPTOR) NVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (_message.Message,), dict( DESCRIPTOR = _NVLGPSSTATUS, __module__ = 'NVLGPSStatus_pb2' # @@protoc_insertion_point(class_scope:NVLGPSStatus) )) _sym_db.RegisterMessage(NVLGPSStatus) # @@protoc_insertion_point(module_scope)
normal
{ "blob_id": "98d2196439a8dc3d511d176e61897aa67663a0b5", "index": 4922, "step-1": "<mask token>\n", "step-2": "<mask token>\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n<mask token>\n_sym_db.RegisterMessage(NVLGPSStatus)\n", "step-3": "<mask token>\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')\n )\n<mask token>\n_sym_db = _symbol_database.Default()\nDESCRIPTOR = _descriptor.FileDescriptor(name='NVLGPSStatus.proto', package=\n '', syntax='proto2', serialized_options=None, serialized_pb=_b(\n '\\n\\x12NVLGPSStatus.proto\"\\x8d\\x03\\n\\x0cNVLGPSStatus\\x12\\x12\\n\\ntracker_id\\x18\\x01 \\x02(\\x0c\\x12\\x12\\n\\ngps_active\\x18\\x02 \\x02(\\x08\\x12\\x10\\n\\x08date_day\\x18\\x03 \\x01(\\x05\\x12\\x12\\n\\ndate_month\\x18\\x04 \\x01(\\x05\\x12\\x11\\n\\tdate_year\\x18\\x05 \\x01(\\x05\\x12\\x12\\n\\ntime_hours\\x18\\x06 \\x01(\\x05\\x12\\x14\\n\\x0ctime_minutes\\x18\\x07 \\x01(\\x05\\x12\\x14\\n\\x0ctime_seconds\\x18\\x08 \\x01(\\x05\\x12\\x19\\n\\x11time_microseconds\\x18\\t \\x01(\\x05\\x12\\x10\\n\\x08latitude\\x18\\n \\x01(\\x01\\x12\\x11\\n\\tlongitude\\x18\\x0b \\x01(\\x01\\x12\\x1f\\n\\x17speed_over_ground_knots\\x18\\x0c \\x01(\\x02\\x12\\x1b\\n\\x13track_angle_degrees\\x18\\r \\x01(\\x02\\x12\\x1a\\n\\x12magnetic_variation\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nfuel_level\\x18\\x0f \\x01(\\x05\\x12\\x15\\n\\rvoltage_level\\x18\\x10 \\x01(\\x02\\x12\\x17\\n\\x0fvehicle_running\\x18\\x11 \\x01(\\x08'\n ))\n_NVLGPSSTATUS = _descriptor.Descriptor(name='NVLGPSStatus', full_name=\n 'NVLGPSStatus', filename=None, file=DESCRIPTOR, containing_type=None,\n fields=[_descriptor.FieldDescriptor(name='tracker_id', full_name=\n 'NVLGPSStatus.tracker_id', index=0, number=1, type=12, cpp_type=9,\n label=2, has_default_value=False, default_value=_b(''), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='gps_active', full_name=\n 'NVLGPSStatus.gps_active', index=1, number=2, type=8, cpp_type=7, label\n =2, has_default_value=False, default_value=False, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='date_day', full_name=\n 'NVLGPSStatus.date_day', index=2, number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0, message_type=None, enum_type=\n None, containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(\n name='date_month', full_name='NVLGPSStatus.date_month', index=3, number\n =4, type=5, cpp_type=1, label=1, has_default_value=False, default_value\n =0, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR), _descriptor.FieldDescriptor(name='date_year', full_name=\n 'NVLGPSStatus.date_year', index=4, number=5, type=5, cpp_type=1, label=\n 1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_hours', full_name=\n 'NVLGPSStatus.time_hours', index=5, number=6, type=5, cpp_type=1, label\n =1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_minutes', full_name=\n 'NVLGPSStatus.time_minutes', index=6, number=7, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_seconds', full_name=\n 'NVLGPSStatus.time_seconds', index=7, number=8, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_microseconds', full_name=\n 'NVLGPSStatus.time_microseconds', index=8, number=9, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='latitude', full_name=\n 'NVLGPSStatus.latitude', index=9, number=10, type=1, cpp_type=5, label=\n 1, has_default_value=False, default_value=float(0), message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='longitude', full_name=\n 'NVLGPSStatus.longitude', index=10, number=11, type=1, cpp_type=5,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='speed_over_ground_knots', full_name=\n 'NVLGPSStatus.speed_over_ground_knots', index=11, number=12, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='track_angle_degrees', full_name=\n 'NVLGPSStatus.track_angle_degrees', index=12, number=13, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='magnetic_variation', full_name=\n 'NVLGPSStatus.magnetic_variation', index=13, number=14, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='fuel_level', full_name=\n 'NVLGPSStatus.fuel_level', index=14, number=15, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='voltage_level', full_name=\n 'NVLGPSStatus.voltage_level', index=15, number=16, type=2, cpp_type=6,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='vehicle_running', full_name=\n 'NVLGPSStatus.vehicle_running', index=16, number=17, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)],\n extensions=[], nested_types=[], enum_types=[], serialized_options=None,\n is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[],\n serialized_start=23, serialized_end=420)\nDESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nNVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (\n _message.Message,), dict(DESCRIPTOR=_NVLGPSSTATUS, __module__=\n 'NVLGPSStatus_pb2'))\n_sym_db.RegisterMessage(NVLGPSStatus)\n", "step-4": "import sys\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')\n )\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n_sym_db = _symbol_database.Default()\nDESCRIPTOR = _descriptor.FileDescriptor(name='NVLGPSStatus.proto', package=\n '', syntax='proto2', serialized_options=None, serialized_pb=_b(\n '\\n\\x12NVLGPSStatus.proto\"\\x8d\\x03\\n\\x0cNVLGPSStatus\\x12\\x12\\n\\ntracker_id\\x18\\x01 \\x02(\\x0c\\x12\\x12\\n\\ngps_active\\x18\\x02 \\x02(\\x08\\x12\\x10\\n\\x08date_day\\x18\\x03 \\x01(\\x05\\x12\\x12\\n\\ndate_month\\x18\\x04 \\x01(\\x05\\x12\\x11\\n\\tdate_year\\x18\\x05 \\x01(\\x05\\x12\\x12\\n\\ntime_hours\\x18\\x06 \\x01(\\x05\\x12\\x14\\n\\x0ctime_minutes\\x18\\x07 \\x01(\\x05\\x12\\x14\\n\\x0ctime_seconds\\x18\\x08 \\x01(\\x05\\x12\\x19\\n\\x11time_microseconds\\x18\\t \\x01(\\x05\\x12\\x10\\n\\x08latitude\\x18\\n \\x01(\\x01\\x12\\x11\\n\\tlongitude\\x18\\x0b \\x01(\\x01\\x12\\x1f\\n\\x17speed_over_ground_knots\\x18\\x0c \\x01(\\x02\\x12\\x1b\\n\\x13track_angle_degrees\\x18\\r \\x01(\\x02\\x12\\x1a\\n\\x12magnetic_variation\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nfuel_level\\x18\\x0f \\x01(\\x05\\x12\\x15\\n\\rvoltage_level\\x18\\x10 \\x01(\\x02\\x12\\x17\\n\\x0fvehicle_running\\x18\\x11 \\x01(\\x08'\n ))\n_NVLGPSSTATUS = _descriptor.Descriptor(name='NVLGPSStatus', full_name=\n 'NVLGPSStatus', filename=None, file=DESCRIPTOR, containing_type=None,\n fields=[_descriptor.FieldDescriptor(name='tracker_id', full_name=\n 'NVLGPSStatus.tracker_id', index=0, number=1, type=12, cpp_type=9,\n label=2, has_default_value=False, default_value=_b(''), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='gps_active', full_name=\n 'NVLGPSStatus.gps_active', index=1, number=2, type=8, cpp_type=7, label\n =2, has_default_value=False, default_value=False, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='date_day', full_name=\n 'NVLGPSStatus.date_day', index=2, number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0, message_type=None, enum_type=\n None, containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(\n name='date_month', full_name='NVLGPSStatus.date_month', index=3, number\n =4, type=5, cpp_type=1, label=1, has_default_value=False, default_value\n =0, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR), _descriptor.FieldDescriptor(name='date_year', full_name=\n 'NVLGPSStatus.date_year', index=4, number=5, type=5, cpp_type=1, label=\n 1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_hours', full_name=\n 'NVLGPSStatus.time_hours', index=5, number=6, type=5, cpp_type=1, label\n =1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_minutes', full_name=\n 'NVLGPSStatus.time_minutes', index=6, number=7, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_seconds', full_name=\n 'NVLGPSStatus.time_seconds', index=7, number=8, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_microseconds', full_name=\n 'NVLGPSStatus.time_microseconds', index=8, number=9, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='latitude', full_name=\n 'NVLGPSStatus.latitude', index=9, number=10, type=1, cpp_type=5, label=\n 1, has_default_value=False, default_value=float(0), message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='longitude', full_name=\n 'NVLGPSStatus.longitude', index=10, number=11, type=1, cpp_type=5,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='speed_over_ground_knots', full_name=\n 'NVLGPSStatus.speed_over_ground_knots', index=11, number=12, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='track_angle_degrees', full_name=\n 'NVLGPSStatus.track_angle_degrees', index=12, number=13, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='magnetic_variation', full_name=\n 'NVLGPSStatus.magnetic_variation', index=13, number=14, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='fuel_level', full_name=\n 'NVLGPSStatus.fuel_level', index=14, number=15, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='voltage_level', full_name=\n 'NVLGPSStatus.voltage_level', index=15, number=16, type=2, cpp_type=6,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='vehicle_running', full_name=\n 'NVLGPSStatus.vehicle_running', index=16, number=17, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)],\n extensions=[], nested_types=[], enum_types=[], serialized_options=None,\n is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[],\n serialized_start=23, serialized_end=420)\nDESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nNVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (\n _message.Message,), dict(DESCRIPTOR=_NVLGPSSTATUS, __module__=\n 'NVLGPSStatus_pb2'))\n_sym_db.RegisterMessage(NVLGPSStatus)\n", "step-5": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: NVLGPSStatus.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='NVLGPSStatus.proto',\n package='',\n syntax='proto2',\n serialized_options=None,\n serialized_pb=_b('\\n\\x12NVLGPSStatus.proto\\\"\\x8d\\x03\\n\\x0cNVLGPSStatus\\x12\\x12\\n\\ntracker_id\\x18\\x01 \\x02(\\x0c\\x12\\x12\\n\\ngps_active\\x18\\x02 \\x02(\\x08\\x12\\x10\\n\\x08\\x64\\x61te_day\\x18\\x03 \\x01(\\x05\\x12\\x12\\n\\ndate_month\\x18\\x04 \\x01(\\x05\\x12\\x11\\n\\tdate_year\\x18\\x05 \\x01(\\x05\\x12\\x12\\n\\ntime_hours\\x18\\x06 \\x01(\\x05\\x12\\x14\\n\\x0ctime_minutes\\x18\\x07 \\x01(\\x05\\x12\\x14\\n\\x0ctime_seconds\\x18\\x08 \\x01(\\x05\\x12\\x19\\n\\x11time_microseconds\\x18\\t \\x01(\\x05\\x12\\x10\\n\\x08latitude\\x18\\n \\x01(\\x01\\x12\\x11\\n\\tlongitude\\x18\\x0b \\x01(\\x01\\x12\\x1f\\n\\x17speed_over_ground_knots\\x18\\x0c \\x01(\\x02\\x12\\x1b\\n\\x13track_angle_degrees\\x18\\r \\x01(\\x02\\x12\\x1a\\n\\x12magnetic_variation\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nfuel_level\\x18\\x0f \\x01(\\x05\\x12\\x15\\n\\rvoltage_level\\x18\\x10 \\x01(\\x02\\x12\\x17\\n\\x0fvehicle_running\\x18\\x11 \\x01(\\x08')\n)\n\n\n\n\n_NVLGPSSTATUS = _descriptor.Descriptor(\n name='NVLGPSStatus',\n full_name='NVLGPSStatus',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='tracker_id', full_name='NVLGPSStatus.tracker_id', index=0,\n number=1, type=12, cpp_type=9, label=2,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='gps_active', full_name='NVLGPSStatus.gps_active', index=1,\n number=2, type=8, cpp_type=7, label=2,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='date_day', full_name='NVLGPSStatus.date_day', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='date_month', full_name='NVLGPSStatus.date_month', index=3,\n number=4, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='date_year', full_name='NVLGPSStatus.date_year', index=4,\n number=5, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_hours', full_name='NVLGPSStatus.time_hours', index=5,\n number=6, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_minutes', full_name='NVLGPSStatus.time_minutes', index=6,\n number=7, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_seconds', full_name='NVLGPSStatus.time_seconds', index=7,\n number=8, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_microseconds', full_name='NVLGPSStatus.time_microseconds', index=8,\n number=9, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='latitude', full_name='NVLGPSStatus.latitude', index=9,\n number=10, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='longitude', full_name='NVLGPSStatus.longitude', index=10,\n number=11, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='speed_over_ground_knots', full_name='NVLGPSStatus.speed_over_ground_knots', index=11,\n number=12, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='track_angle_degrees', full_name='NVLGPSStatus.track_angle_degrees', index=12,\n number=13, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='magnetic_variation', full_name='NVLGPSStatus.magnetic_variation', index=13,\n number=14, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='fuel_level', full_name='NVLGPSStatus.fuel_level', index=14,\n number=15, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='voltage_level', full_name='NVLGPSStatus.voltage_level', index=15,\n number=16, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='vehicle_running', full_name='NVLGPSStatus.vehicle_running', index=16,\n number=17, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n serialized_options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=23,\n serialized_end=420,\n)\n\nDESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nNVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (_message.Message,), dict(\n DESCRIPTOR = _NVLGPSSTATUS,\n __module__ = 'NVLGPSStatus_pb2'\n # @@protoc_insertion_point(class_scope:NVLGPSStatus)\n ))\n_sym_db.RegisterMessage(NVLGPSStatus)\n\n\n# @@protoc_insertion_point(module_scope)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# from django.urls import path,include from django.conf.urls import include, url from . import views urlpatterns = [ url('buy',views.BuyPage,name='BuyPage'), url('sell',views.SellPage,name='SellPage'), url('',views.TradePage,name='TradePage'), ]
normal
{ "blob_id": "5bbaffb35a89558b5cf0b4364f78d68ff2d69a01", "index": 5726, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [url('buy', views.BuyPage, name='BuyPage'), url('sell', views\n .SellPage, name='SellPage'), url('', views.TradePage, name='TradePage')]\n", "step-3": "from django.conf.urls import include, url\nfrom . import views\nurlpatterns = [url('buy', views.BuyPage, name='BuyPage'), url('sell', views\n .SellPage, name='SellPage'), url('', views.TradePage, name='TradePage')]\n", "step-4": "# from django.urls import path,include\nfrom django.conf.urls import include, url\n\nfrom . import views\n\nurlpatterns = [\n url('buy',views.BuyPage,name='BuyPage'),\n url('sell',views.SellPage,name='SellPage'),\n url('',views.TradePage,name='TradePage'),\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]