code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
from django.contrib import admin from .models import CarouselImage, Budget admin.site.register(CarouselImage) admin.site.register(Budget)
normal
{ "blob_id": "98fb70e1911522365292c86603481656e7b86d73", "index": 8337, "step-1": "<mask token>\n", "step-2": "<mask token>\nadmin.site.register(CarouselImage)\nadmin.site.register(Budget)\n", "step-3": "from django.contrib import admin\nfrom .models import CarouselImage, Budget\nadmin.site.register(CarouselImage)\nadmin.site.register(Budget)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
""" Copyright 2019 Enzo Busseti, Walaa Moursi, and Stephen Boyd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ #__all__ = ['solve'] import numpy as np import scs import ecos import time from .problem import * from .refine import * class SolverError(Exception): pass def scs_solve(A, b, c, dim_dict, init_z=None, **kwargs): """Wraps scs.solve for convenience.""" scs_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q': dim_dict['q'] if 'q' in dim_dict else [], 's': dim_dict['s'] if 's' in dim_dict else [], 'ep': dim_dict['ep'] if 'ep' in dim_dict else 0, 'ed': dim_dict['ed'] if 'ed' in dim_dict else 0, 'f': dim_dict['z'] if 'z' in dim_dict else 0} #print('scs_cones', scs_cones) sol = scs.solve({'A': A, 'b': b, 'c': c}, cone=scs_cones, **kwargs) info = sol['info'] if info['statusVal'] > 0: z = xsy2z(sol['x'], sol['s'], sol['y'], tau=1., kappa=0.) if info['statusVal'] < 0: x = np.zeros_like(sol['x']) \ if np.any(np.isnan(sol['x'])) else sol['x'] s = np.zeros_like(sol['s']) \ if np.any(np.isnan(sol['s'])) else sol['s'] y = np.zeros_like(sol['y']) \ if np.any(np.isnan(sol['y'])) else sol['y'] if np.allclose(y, 0.) and c@x < 0: obj = c@x # assert obj < 0 x /= -obj s /= -obj # print('primal res:', np.linalg.norm(A@x + s)) if np.allclose(s, 0.) and b@y < 0: obj = b@y # assert obj < 0 y /= -obj # print('dual res:', np.linalg.norm(A.T@y)) # print('SCS NONSOLVED') # print('x', x) # print('s', s) # print('y', y) z = xsy2z(x, s, y, tau=0., kappa=1.) return z, info def ecos_solve(A, b, c, dim_dict, **kwargs): """Wraps ecos.solve for convenience.""" ### # ECOS uses a different definition of the exp cone, # with y and z switched. In the future I might wrap it # (i.e., switch rows of A and elements of b, and switch # elements of the solutions s and y) but for now # I'm not supporting exp cones in ecos. ### ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q': dim_dict['q'] if 'q' in dim_dict else []} # , # 'e': dim_dict['ep'] if 'ep' in dim_dict else 0} # print(ecos_cones) if ('ep' in dim_dict and dim_dict['ep'] > 0 or 's' in dim_dict and len(dim_dict['s']) > 0): raise SolverError( 'Only zero, linear, and second order cones supported.') zero = 0 if 'z' not in dim_dict else dim_dict['z'] ecos_A, ecos_G = A[:zero, :], A[zero:, :] ecos_b, ecos_h = b[:zero], b[zero:] sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones, A=ecos_A, b=ecos_b, **kwargs) solution = True x = sol['x'] s = np.concatenate([np.zeros(zero), sol['s']]) # not sure we can trust this # s = b - A@x y = np.concatenate([sol['y'], sol['z']]) if sol['info']['exitFlag'] == 0: # check that things make sense print('prim abs res.', np.linalg.norm(A@x + s - b)) print('dua abs res.', np.linalg.norm(A.T@y + c)) print('s^T y', s@y) if sol['info']['exitFlag'] in [1, 11]: # infeas solution = False obj = b@y assert (obj < 0) y /= -obj print('primal infeas. cert residual norm', np.linalg.norm(A.T@y)) #cones = dim2cones(dim_dict) proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict)) print('primal infeas dist from cone', np.linalg.norm(proj)) # if not (np.linalg.norm(proj) == 0.) and sol['info']['exitFlag'] == 1.: # raise SolverError x = np.zeros_like(x) s = np.zeros_like(s) if sol['info']['exitFlag'] in [2, 12]: # unbound solution = False obj = c@x assert (obj < 0) x /= -obj s /= -obj print('dual infeas. cert residual norm', np.linalg.norm(A@x + s)) proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict)) print('dual infeas cert dist from cone', np.linalg.norm(s - proj)) # if not (np.linalg.norm(s - proj) == 0.) and sol['info']['exitFlag'] == 2.: # raise SolverError y = np.zeros_like(y) # print('ECOS SOLUTION') # print('solution', solution) # print('x', x) # print('s', s) # print('y', y) z = xsy2z(x, s, y, tau=solution, kappa=not solution) return z, sol['info'] def solve(A, b, c, dim_dict, solver='scs', solver_options={}, refine_solver_time_ratio=1., max_iters=10, verbose=False, max_lsqr_iters=20, return_z=False): solver_start = time.time() if solver == 'scs': z, info = scs_solve(A, b, c, dim_dict, **solver_options) elif solver == 'ecos': z, info = ecos_solve(A, b, c, dim_dict, **solver_options) else: raise Exception('The only supported solvers are ecos and scs') solver_time = time.time() - solver_start A = sp.csc_matrix(A) #A_tr = sp.csc_matrix(A.T) new_residual, u, v = residual_and_uv( z, (A.indptr, A.indices, A.data), b, c, make_prod_cone_cache(dim_dict)) x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1]) pres = np.linalg.norm(A@x + s - b) / (1 + np.linalg.norm(b)) dres = np.linalg.norm(A.T@y + c) / (1 + np.linalg.norm(c)) gap = np.abs(c@x + b@y) / (1 + np.abs(c@x) + np.abs(b@y)) print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap)) z_plus = refine(A, b, c, dim_dict, z, verbose=verbose, iters=max_iters, lsqr_iters=max_lsqr_iters) # , # max_runtime=solver_time * refine_solver_time_ratio) if return_z: return z_plus, info else: new_residual, u, v =\ residual_and_uv(z_plus, (A.indptr, A.indices, A.data), b, c, make_prod_cone_cache(dim_dict)) x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1]) pres = np.linalg.norm(A@x + s - b) / (1 + np.linalg.norm(b)) dres = np.linalg.norm(A.T@y + c) / (1 + np.linalg.norm(c)) gap = np.abs(c@x + b@y) / (1 + np.abs(c@x) + np.abs(b@y)) print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap)) return x, s, y, info
normal
{ "blob_id": "00a0668d5fcb8358b4bd7736c48e4867afc0f5b6", "index": 780, "step-1": "<mask token>\n\n\nclass SolverError(Exception):\n pass\n\n\n<mask token>\n\n\ndef ecos_solve(A, b, c, dim_dict, **kwargs):\n \"\"\"Wraps ecos.solve for convenience.\"\"\"\n ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q': \n dim_dict['q'] if 'q' in dim_dict else []}\n if 'ep' in dim_dict and dim_dict['ep'] > 0 or 's' in dim_dict and len(\n dim_dict['s']) > 0:\n raise SolverError(\n 'Only zero, linear, and second order cones supported.')\n zero = 0 if 'z' not in dim_dict else dim_dict['z']\n ecos_A, ecos_G = A[:zero, :], A[zero:, :]\n ecos_b, ecos_h = b[:zero], b[zero:]\n sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones, A=ecos_A, b=\n ecos_b, **kwargs)\n solution = True\n x = sol['x']\n s = np.concatenate([np.zeros(zero), sol['s']])\n y = np.concatenate([sol['y'], sol['z']])\n if sol['info']['exitFlag'] == 0:\n print('prim abs res.', np.linalg.norm(A @ x + s - b))\n print('dua abs res.', np.linalg.norm(A.T @ y + c))\n print('s^T y', s @ y)\n if sol['info']['exitFlag'] in [1, 11]:\n solution = False\n obj = b @ y\n assert obj < 0\n y /= -obj\n print('primal infeas. cert residual norm', np.linalg.norm(A.T @ y))\n proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict))\n print('primal infeas dist from cone', np.linalg.norm(proj))\n x = np.zeros_like(x)\n s = np.zeros_like(s)\n if sol['info']['exitFlag'] in [2, 12]:\n solution = False\n obj = c @ x\n assert obj < 0\n x /= -obj\n s /= -obj\n print('dual infeas. cert residual norm', np.linalg.norm(A @ x + s))\n proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict))\n print('dual infeas cert dist from cone', np.linalg.norm(s - proj))\n y = np.zeros_like(y)\n z = xsy2z(x, s, y, tau=solution, kappa=not solution)\n return z, sol['info']\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass SolverError(Exception):\n pass\n\n\ndef scs_solve(A, b, c, dim_dict, init_z=None, **kwargs):\n \"\"\"Wraps scs.solve for convenience.\"\"\"\n scs_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q': \n dim_dict['q'] if 'q' in dim_dict else [], 's': dim_dict['s'] if 's' in\n dim_dict else [], 'ep': dim_dict['ep'] if 'ep' in dim_dict else 0,\n 'ed': dim_dict['ed'] if 'ed' in dim_dict else 0, 'f': dim_dict['z'] if\n 'z' in dim_dict else 0}\n sol = scs.solve({'A': A, 'b': b, 'c': c}, cone=scs_cones, **kwargs)\n info = sol['info']\n if info['statusVal'] > 0:\n z = xsy2z(sol['x'], sol['s'], sol['y'], tau=1.0, kappa=0.0)\n if info['statusVal'] < 0:\n x = np.zeros_like(sol['x']) if np.any(np.isnan(sol['x'])) else sol['x']\n s = np.zeros_like(sol['s']) if np.any(np.isnan(sol['s'])) else sol['s']\n y = np.zeros_like(sol['y']) if np.any(np.isnan(sol['y'])) else sol['y']\n if np.allclose(y, 0.0) and c @ x < 0:\n obj = c @ x\n x /= -obj\n s /= -obj\n if np.allclose(s, 0.0) and b @ y < 0:\n obj = b @ y\n y /= -obj\n z = xsy2z(x, s, y, tau=0.0, kappa=1.0)\n return z, info\n\n\ndef ecos_solve(A, b, c, dim_dict, **kwargs):\n \"\"\"Wraps ecos.solve for convenience.\"\"\"\n ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q': \n dim_dict['q'] if 'q' in dim_dict else []}\n if 'ep' in dim_dict and dim_dict['ep'] > 0 or 's' in dim_dict and len(\n dim_dict['s']) > 0:\n raise SolverError(\n 'Only zero, linear, and second order cones supported.')\n zero = 0 if 'z' not in dim_dict else dim_dict['z']\n ecos_A, ecos_G = A[:zero, :], A[zero:, :]\n ecos_b, ecos_h = b[:zero], b[zero:]\n sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones, A=ecos_A, b=\n ecos_b, **kwargs)\n solution = True\n x = sol['x']\n s = np.concatenate([np.zeros(zero), sol['s']])\n y = np.concatenate([sol['y'], sol['z']])\n if sol['info']['exitFlag'] == 0:\n print('prim abs res.', np.linalg.norm(A @ x + s - b))\n print('dua abs res.', np.linalg.norm(A.T @ y + c))\n print('s^T y', s @ y)\n if sol['info']['exitFlag'] in [1, 11]:\n solution = False\n obj = b @ y\n assert obj < 0\n y /= -obj\n print('primal infeas. cert residual norm', np.linalg.norm(A.T @ y))\n proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict))\n print('primal infeas dist from cone', np.linalg.norm(proj))\n x = np.zeros_like(x)\n s = np.zeros_like(s)\n if sol['info']['exitFlag'] in [2, 12]:\n solution = False\n obj = c @ x\n assert obj < 0\n x /= -obj\n s /= -obj\n print('dual infeas. cert residual norm', np.linalg.norm(A @ x + s))\n proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict))\n print('dual infeas cert dist from cone', np.linalg.norm(s - proj))\n y = np.zeros_like(y)\n z = xsy2z(x, s, y, tau=solution, kappa=not solution)\n return z, sol['info']\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass SolverError(Exception):\n pass\n\n\ndef scs_solve(A, b, c, dim_dict, init_z=None, **kwargs):\n \"\"\"Wraps scs.solve for convenience.\"\"\"\n scs_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q': \n dim_dict['q'] if 'q' in dim_dict else [], 's': dim_dict['s'] if 's' in\n dim_dict else [], 'ep': dim_dict['ep'] if 'ep' in dim_dict else 0,\n 'ed': dim_dict['ed'] if 'ed' in dim_dict else 0, 'f': dim_dict['z'] if\n 'z' in dim_dict else 0}\n sol = scs.solve({'A': A, 'b': b, 'c': c}, cone=scs_cones, **kwargs)\n info = sol['info']\n if info['statusVal'] > 0:\n z = xsy2z(sol['x'], sol['s'], sol['y'], tau=1.0, kappa=0.0)\n if info['statusVal'] < 0:\n x = np.zeros_like(sol['x']) if np.any(np.isnan(sol['x'])) else sol['x']\n s = np.zeros_like(sol['s']) if np.any(np.isnan(sol['s'])) else sol['s']\n y = np.zeros_like(sol['y']) if np.any(np.isnan(sol['y'])) else sol['y']\n if np.allclose(y, 0.0) and c @ x < 0:\n obj = c @ x\n x /= -obj\n s /= -obj\n if np.allclose(s, 0.0) and b @ y < 0:\n obj = b @ y\n y /= -obj\n z = xsy2z(x, s, y, tau=0.0, kappa=1.0)\n return z, info\n\n\ndef ecos_solve(A, b, c, dim_dict, **kwargs):\n \"\"\"Wraps ecos.solve for convenience.\"\"\"\n ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q': \n dim_dict['q'] if 'q' in dim_dict else []}\n if 'ep' in dim_dict and dim_dict['ep'] > 0 or 's' in dim_dict and len(\n dim_dict['s']) > 0:\n raise SolverError(\n 'Only zero, linear, and second order cones supported.')\n zero = 0 if 'z' not in dim_dict else dim_dict['z']\n ecos_A, ecos_G = A[:zero, :], A[zero:, :]\n ecos_b, ecos_h = b[:zero], b[zero:]\n sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones, A=ecos_A, b=\n ecos_b, **kwargs)\n solution = True\n x = sol['x']\n s = np.concatenate([np.zeros(zero), sol['s']])\n y = np.concatenate([sol['y'], sol['z']])\n if sol['info']['exitFlag'] == 0:\n print('prim abs res.', np.linalg.norm(A @ x + s - b))\n print('dua abs res.', np.linalg.norm(A.T @ y + c))\n print('s^T y', s @ y)\n if sol['info']['exitFlag'] in [1, 11]:\n solution = False\n obj = b @ y\n assert obj < 0\n y /= -obj\n print('primal infeas. cert residual norm', np.linalg.norm(A.T @ y))\n proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict))\n print('primal infeas dist from cone', np.linalg.norm(proj))\n x = np.zeros_like(x)\n s = np.zeros_like(s)\n if sol['info']['exitFlag'] in [2, 12]:\n solution = False\n obj = c @ x\n assert obj < 0\n x /= -obj\n s /= -obj\n print('dual infeas. cert residual norm', np.linalg.norm(A @ x + s))\n proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict))\n print('dual infeas cert dist from cone', np.linalg.norm(s - proj))\n y = np.zeros_like(y)\n z = xsy2z(x, s, y, tau=solution, kappa=not solution)\n return z, sol['info']\n\n\ndef solve(A, b, c, dim_dict, solver='scs', solver_options={},\n refine_solver_time_ratio=1.0, max_iters=10, verbose=False,\n max_lsqr_iters=20, return_z=False):\n solver_start = time.time()\n if solver == 'scs':\n z, info = scs_solve(A, b, c, dim_dict, **solver_options)\n elif solver == 'ecos':\n z, info = ecos_solve(A, b, c, dim_dict, **solver_options)\n else:\n raise Exception('The only supported solvers are ecos and scs')\n solver_time = time.time() - solver_start\n A = sp.csc_matrix(A)\n new_residual, u, v = residual_and_uv(z, (A.indptr, A.indices, A.data),\n b, c, make_prod_cone_cache(dim_dict))\n x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])\n pres = np.linalg.norm(A @ x + s - b) / (1 + np.linalg.norm(b))\n dres = np.linalg.norm(A.T @ y + c) / (1 + np.linalg.norm(c))\n gap = np.abs(c @ x + b @ y) / (1 + np.abs(c @ x) + np.abs(b @ y))\n print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))\n z_plus = refine(A, b, c, dim_dict, z, verbose=verbose, iters=max_iters,\n lsqr_iters=max_lsqr_iters)\n if return_z:\n return z_plus, info\n else:\n new_residual, u, v = residual_and_uv(z_plus, (A.indptr, A.indices,\n A.data), b, c, make_prod_cone_cache(dim_dict))\n x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])\n pres = np.linalg.norm(A @ x + s - b) / (1 + np.linalg.norm(b))\n dres = np.linalg.norm(A.T @ y + c) / (1 + np.linalg.norm(c))\n gap = np.abs(c @ x + b @ y) / (1 + np.abs(c @ x) + np.abs(b @ y))\n print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))\n return x, s, y, info\n", "step-4": "<mask token>\nimport numpy as np\nimport scs\nimport ecos\nimport time\nfrom .problem import *\nfrom .refine import *\n\n\nclass SolverError(Exception):\n pass\n\n\ndef scs_solve(A, b, c, dim_dict, init_z=None, **kwargs):\n \"\"\"Wraps scs.solve for convenience.\"\"\"\n scs_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q': \n dim_dict['q'] if 'q' in dim_dict else [], 's': dim_dict['s'] if 's' in\n dim_dict else [], 'ep': dim_dict['ep'] if 'ep' in dim_dict else 0,\n 'ed': dim_dict['ed'] if 'ed' in dim_dict else 0, 'f': dim_dict['z'] if\n 'z' in dim_dict else 0}\n sol = scs.solve({'A': A, 'b': b, 'c': c}, cone=scs_cones, **kwargs)\n info = sol['info']\n if info['statusVal'] > 0:\n z = xsy2z(sol['x'], sol['s'], sol['y'], tau=1.0, kappa=0.0)\n if info['statusVal'] < 0:\n x = np.zeros_like(sol['x']) if np.any(np.isnan(sol['x'])) else sol['x']\n s = np.zeros_like(sol['s']) if np.any(np.isnan(sol['s'])) else sol['s']\n y = np.zeros_like(sol['y']) if np.any(np.isnan(sol['y'])) else sol['y']\n if np.allclose(y, 0.0) and c @ x < 0:\n obj = c @ x\n x /= -obj\n s /= -obj\n if np.allclose(s, 0.0) and b @ y < 0:\n obj = b @ y\n y /= -obj\n z = xsy2z(x, s, y, tau=0.0, kappa=1.0)\n return z, info\n\n\ndef ecos_solve(A, b, c, dim_dict, **kwargs):\n \"\"\"Wraps ecos.solve for convenience.\"\"\"\n ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0, 'q': \n dim_dict['q'] if 'q' in dim_dict else []}\n if 'ep' in dim_dict and dim_dict['ep'] > 0 or 's' in dim_dict and len(\n dim_dict['s']) > 0:\n raise SolverError(\n 'Only zero, linear, and second order cones supported.')\n zero = 0 if 'z' not in dim_dict else dim_dict['z']\n ecos_A, ecos_G = A[:zero, :], A[zero:, :]\n ecos_b, ecos_h = b[:zero], b[zero:]\n sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones, A=ecos_A, b=\n ecos_b, **kwargs)\n solution = True\n x = sol['x']\n s = np.concatenate([np.zeros(zero), sol['s']])\n y = np.concatenate([sol['y'], sol['z']])\n if sol['info']['exitFlag'] == 0:\n print('prim abs res.', np.linalg.norm(A @ x + s - b))\n print('dua abs res.', np.linalg.norm(A.T @ y + c))\n print('s^T y', s @ y)\n if sol['info']['exitFlag'] in [1, 11]:\n solution = False\n obj = b @ y\n assert obj < 0\n y /= -obj\n print('primal infeas. cert residual norm', np.linalg.norm(A.T @ y))\n proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict))\n print('primal infeas dist from cone', np.linalg.norm(proj))\n x = np.zeros_like(x)\n s = np.zeros_like(s)\n if sol['info']['exitFlag'] in [2, 12]:\n solution = False\n obj = c @ x\n assert obj < 0\n x /= -obj\n s /= -obj\n print('dual infeas. cert residual norm', np.linalg.norm(A @ x + s))\n proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict))\n print('dual infeas cert dist from cone', np.linalg.norm(s - proj))\n y = np.zeros_like(y)\n z = xsy2z(x, s, y, tau=solution, kappa=not solution)\n return z, sol['info']\n\n\ndef solve(A, b, c, dim_dict, solver='scs', solver_options={},\n refine_solver_time_ratio=1.0, max_iters=10, verbose=False,\n max_lsqr_iters=20, return_z=False):\n solver_start = time.time()\n if solver == 'scs':\n z, info = scs_solve(A, b, c, dim_dict, **solver_options)\n elif solver == 'ecos':\n z, info = ecos_solve(A, b, c, dim_dict, **solver_options)\n else:\n raise Exception('The only supported solvers are ecos and scs')\n solver_time = time.time() - solver_start\n A = sp.csc_matrix(A)\n new_residual, u, v = residual_and_uv(z, (A.indptr, A.indices, A.data),\n b, c, make_prod_cone_cache(dim_dict))\n x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])\n pres = np.linalg.norm(A @ x + s - b) / (1 + np.linalg.norm(b))\n dres = np.linalg.norm(A.T @ y + c) / (1 + np.linalg.norm(c))\n gap = np.abs(c @ x + b @ y) / (1 + np.abs(c @ x) + np.abs(b @ y))\n print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))\n z_plus = refine(A, b, c, dim_dict, z, verbose=verbose, iters=max_iters,\n lsqr_iters=max_lsqr_iters)\n if return_z:\n return z_plus, info\n else:\n new_residual, u, v = residual_and_uv(z_plus, (A.indptr, A.indices,\n A.data), b, c, make_prod_cone_cache(dim_dict))\n x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])\n pres = np.linalg.norm(A @ x + s - b) / (1 + np.linalg.norm(b))\n dres = np.linalg.norm(A.T @ y + c) / (1 + np.linalg.norm(c))\n gap = np.abs(c @ x + b @ y) / (1 + np.abs(c @ x) + np.abs(b @ y))\n print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))\n return x, s, y, info\n", "step-5": "\"\"\"\nCopyright 2019 Enzo Busseti, Walaa Moursi, and Stephen Boyd\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n#__all__ = ['solve']\n\nimport numpy as np\nimport scs\nimport ecos\nimport time\n\nfrom .problem import *\nfrom .refine import *\n\n\nclass SolverError(Exception):\n pass\n\n\ndef scs_solve(A, b, c, dim_dict, init_z=None, **kwargs):\n \"\"\"Wraps scs.solve for convenience.\"\"\"\n scs_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0,\n 'q': dim_dict['q'] if 'q' in dim_dict else [],\n 's': dim_dict['s'] if 's' in dim_dict else [],\n 'ep': dim_dict['ep'] if 'ep' in dim_dict else 0,\n 'ed': dim_dict['ed'] if 'ed' in dim_dict else 0,\n 'f': dim_dict['z'] if 'z' in dim_dict else 0}\n #print('scs_cones', scs_cones)\n sol = scs.solve({'A': A, 'b': b,\n 'c': c},\n cone=scs_cones,\n **kwargs)\n info = sol['info']\n\n if info['statusVal'] > 0:\n z = xsy2z(sol['x'], sol['s'], sol['y'], tau=1., kappa=0.)\n\n if info['statusVal'] < 0:\n x = np.zeros_like(sol['x']) \\\n if np.any(np.isnan(sol['x'])) else sol['x']\n\n s = np.zeros_like(sol['s']) \\\n if np.any(np.isnan(sol['s'])) else sol['s']\n\n y = np.zeros_like(sol['y']) \\\n if np.any(np.isnan(sol['y'])) else sol['y']\n\n if np.allclose(y, 0.) and c@x < 0:\n obj = c@x\n # assert obj < 0\n x /= -obj\n s /= -obj\n # print('primal res:', np.linalg.norm(A@x + s))\n\n if np.allclose(s, 0.) and b@y < 0:\n obj = b@y\n # assert obj < 0\n y /= -obj\n # print('dual res:', np.linalg.norm(A.T@y))\n\n # print('SCS NONSOLVED')\n # print('x', x)\n # print('s', s)\n # print('y', y)\n\n z = xsy2z(x, s, y, tau=0., kappa=1.)\n\n return z, info\n\n\ndef ecos_solve(A, b, c, dim_dict, **kwargs):\n \"\"\"Wraps ecos.solve for convenience.\"\"\"\n\n ###\n # ECOS uses a different definition of the exp cone,\n # with y and z switched. In the future I might wrap it\n # (i.e., switch rows of A and elements of b, and switch\n # elements of the solutions s and y) but for now\n # I'm not supporting exp cones in ecos.\n ###\n\n ecos_cones = {'l': dim_dict['l'] if 'l' in dim_dict else 0,\n 'q': dim_dict['q'] if 'q' in dim_dict else []} # ,\n # 'e': dim_dict['ep'] if 'ep' in dim_dict else 0}\n # print(ecos_cones)\n if ('ep' in dim_dict and dim_dict['ep'] > 0\n or 's' in dim_dict and len(dim_dict['s']) > 0):\n raise SolverError(\n 'Only zero, linear, and second order cones supported.')\n zero = 0 if 'z' not in dim_dict else dim_dict['z']\n ecos_A, ecos_G = A[:zero, :], A[zero:, :]\n ecos_b, ecos_h = b[:zero], b[zero:]\n sol = ecos.solve(c=c, G=ecos_G, h=ecos_h, dims=ecos_cones,\n A=ecos_A, b=ecos_b, **kwargs)\n\n solution = True\n\n x = sol['x']\n s = np.concatenate([np.zeros(zero), sol['s']])\n # not sure we can trust this\n # s = b - A@x\n y = np.concatenate([sol['y'], sol['z']])\n\n if sol['info']['exitFlag'] == 0: # check that things make sense\n print('prim abs res.', np.linalg.norm(A@x + s - b))\n print('dua abs res.', np.linalg.norm(A.T@y + c))\n print('s^T y', s@y)\n\n if sol['info']['exitFlag'] in [1, 11]: # infeas\n solution = False\n obj = b@y\n assert (obj < 0)\n y /= -obj\n\n print('primal infeas. cert residual norm', np.linalg.norm(A.T@y))\n #cones = dim2cones(dim_dict)\n proj = prod_cone.Pi(-y, *make_prod_cone_cache(dim_dict))\n print('primal infeas dist from cone', np.linalg.norm(proj))\n # if not (np.linalg.norm(proj) == 0.) and sol['info']['exitFlag'] == 1.:\n # raise SolverError\n\n x = np.zeros_like(x)\n s = np.zeros_like(s)\n\n if sol['info']['exitFlag'] in [2, 12]: # unbound\n solution = False\n obj = c@x\n assert (obj < 0)\n x /= -obj\n s /= -obj\n\n print('dual infeas. cert residual norm', np.linalg.norm(A@x + s))\n proj = prod_cone.Pi(s, *make_prod_cone_cache(dim_dict))\n print('dual infeas cert dist from cone', np.linalg.norm(s - proj))\n # if not (np.linalg.norm(s - proj) == 0.) and sol['info']['exitFlag'] == 2.:\n # raise SolverError\n y = np.zeros_like(y)\n\n # print('ECOS SOLUTION')\n # print('solution', solution)\n # print('x', x)\n # print('s', s)\n # print('y', y)\n\n z = xsy2z(x, s, y, tau=solution, kappa=not solution)\n\n return z, sol['info']\n\n\ndef solve(A, b, c, dim_dict,\n solver='scs',\n solver_options={},\n refine_solver_time_ratio=1.,\n max_iters=10,\n verbose=False,\n max_lsqr_iters=20,\n return_z=False):\n\n solver_start = time.time()\n if solver == 'scs':\n z, info = scs_solve(A, b, c, dim_dict, **solver_options)\n elif solver == 'ecos':\n z, info = ecos_solve(A, b, c, dim_dict, **solver_options)\n else:\n raise Exception('The only supported solvers are ecos and scs')\n\n solver_time = time.time() - solver_start\n A = sp.csc_matrix(A)\n #A_tr = sp.csc_matrix(A.T)\n new_residual, u, v = residual_and_uv(\n z, (A.indptr, A.indices, A.data), b, c, make_prod_cone_cache(dim_dict))\n x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])\n\n pres = np.linalg.norm(A@x + s - b) / (1 + np.linalg.norm(b))\n dres = np.linalg.norm(A.T@y + c) / (1 + np.linalg.norm(c))\n gap = np.abs(c@x + b@y) / (1 + np.abs(c@x) + np.abs(b@y))\n\n print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))\n\n z_plus = refine(A, b, c, dim_dict, z,\n verbose=verbose,\n iters=max_iters,\n lsqr_iters=max_lsqr_iters) # ,\n # max_runtime=solver_time * refine_solver_time_ratio)\n\n if return_z:\n return z_plus, info\n else:\n new_residual, u, v =\\\n residual_and_uv(z_plus, (A.indptr, A.indices, A.data), b, c,\n make_prod_cone_cache(dim_dict))\n x, s, y, tau, kappa = uv2xsytaukappa(u, v, A.shape[1])\n pres = np.linalg.norm(A@x + s - b) / (1 + np.linalg.norm(b))\n dres = np.linalg.norm(A.T@y + c) / (1 + np.linalg.norm(c))\n gap = np.abs(c@x + b@y) / (1 + np.abs(c@x) + np.abs(b@y))\n print('pres %.2e, dres %.2e, gap %.2e' % (pres, dres, gap))\n return x, s, y, info\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis import pandas as pd import numpy as np from sklearn import datasets from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split # a = pd.read_csv('sample20170117_labeled_0207.csv') # X = a.values[0: 100, 0: 110] # y = a.values[0: 100, 110] # y = np.array([1 if i == 1. else -1 for i in y]) iris = datasets.load_iris() X = iris.data y = iris.target target_names = iris.target_names # X, y = make_classification(n_samples=1000, n_features=100, n_classes=2) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) pca = PCA(n_components=2) X_r = pca.fit(X).transform(X) lda = LinearDiscriminantAnalysis(n_components=2) X_r2 = lda.fit(X, y).transform(X) plt.figure() colors = ['navy', 'turquoise', 'darkorange'] lw = 2 for color, i, target_name in zip(colors, [0, 1, 2], target_names): plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=target_name) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.title('pca') plt.figure() for color, i, target_name in zip(colors, [0, 1, 2], target_names): plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color, label=target_name) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.title('lda') plt.show()
normal
{ "blob_id": "d0448ca8e3fd2f3bb8a3a7ec052e29ab0be6351a", "index": 471, "step-1": "<mask token>\n", "step-2": "<mask token>\nplt.figure()\n<mask token>\nfor color, i, target_name in zip(colors, [0, 1, 2], target_names):\n plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=0.8, lw=\n lw, label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('pca')\nplt.figure()\nfor color, i, target_name in zip(colors, [0, 1, 2], target_names):\n plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=0.8, color=color,\n label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('lda')\nplt.show()\n", "step-3": "<mask token>\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\ntarget_names = iris.target_names\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\npca = PCA(n_components=2)\nX_r = pca.fit(X).transform(X)\nlda = LinearDiscriminantAnalysis(n_components=2)\nX_r2 = lda.fit(X, y).transform(X)\nplt.figure()\ncolors = ['navy', 'turquoise', 'darkorange']\nlw = 2\nfor color, i, target_name in zip(colors, [0, 1, 2], target_names):\n plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=0.8, lw=\n lw, label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('pca')\nplt.figure()\nfor color, i, target_name in zip(colors, [0, 1, 2], target_names):\n plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=0.8, color=color,\n label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('lda')\nplt.show()\n", "step-4": "import matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nimport pandas as pd\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\ntarget_names = iris.target_names\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\npca = PCA(n_components=2)\nX_r = pca.fit(X).transform(X)\nlda = LinearDiscriminantAnalysis(n_components=2)\nX_r2 = lda.fit(X, y).transform(X)\nplt.figure()\ncolors = ['navy', 'turquoise', 'darkorange']\nlw = 2\nfor color, i, target_name in zip(colors, [0, 1, 2], target_names):\n plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=0.8, lw=\n lw, label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('pca')\nplt.figure()\nfor color, i, target_name in zip(colors, [0, 1, 2], target_names):\n plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=0.8, color=color,\n label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('lda')\nplt.show()\n", "step-5": "import matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\n\n# a = pd.read_csv('sample20170117_labeled_0207.csv')\n# X = a.values[0: 100, 0: 110]\n# y = a.values[0: 100, 110]\n# y = np.array([1 if i == 1. else -1 for i in y])\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\ntarget_names = iris.target_names\n\n# X, y = make_classification(n_samples=1000, n_features=100, n_classes=2)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\npca = PCA(n_components=2)\nX_r = pca.fit(X).transform(X)\n\nlda = LinearDiscriminantAnalysis(n_components=2)\nX_r2 = lda.fit(X, y).transform(X)\n\nplt.figure()\ncolors = ['navy', 'turquoise', 'darkorange']\nlw = 2\n\nfor color, i, target_name in zip(colors, [0, 1, 2], target_names):\n plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('pca')\n\nplt.figure()\nfor color, i, target_name in zip(colors, [0, 1, 2], target_names):\n plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color, label=target_name)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('lda')\n\nplt.show()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_configuration(host): sshd = host.file('/etc/ssh/sshd_config') assert sshd.contains(r'^PermitRootLogin no$') assert sshd.contains(r'^X11Forwarding no$') assert sshd.contains(r'^UsePAM yes$') assert sshd.contains(r'\sPermitTTY no$') ssh = host.file('/etc/ssh/ssh_config') assert ssh.contains(r'^User test$') assert ssh.contains(r'^Host \*$') assert ssh.contains(r'\sPort 23$') def test_service(host): ssh = host.service('ssh') assert ssh.is_running assert ssh.is_enabled assert host.socket('tcp://0.0.0.0:22').is_listening
normal
{ "blob_id": "2345d1f72fb695ccec5af0ed157c0606f197009c", "index": 3398, "step-1": "<mask token>\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n", "step-3": "<mask token>\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ[\n 'MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n", "step-4": "import os\nimport testinfra.utils.ansible_runner\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ[\n 'MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n", "step-5": "import os\n\nimport testinfra.utils.ansible_runner\n\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains(r'^PermitRootLogin no$')\n assert sshd.contains(r'^X11Forwarding no$')\n assert sshd.contains(r'^UsePAM yes$')\n assert sshd.contains(r'\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains(r'^User test$')\n assert ssh.contains(r'^Host \\*$')\n assert ssh.contains(r'\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from argparse import ArgumentParser, Namespace def parse_arguments() ->Namespace: """ Parse arguments :return: Arguments """ parser = ArgumentParser(description= 'DLP project: Stock Prediction using Transformer') parser.add_argument('-e', '--epochs', default=10, type=int, help= 'Number of epochs') parser.add_argument('-w', '--warmup', default=2, type=int, help= 'Number of epochs for warmup') parser.add_argument('-l', '--learning_rate', default=0.001, type=float, help='Learning rate') parser.add_argument('-b', '--batch_size', default=64, type=int, help= 'Batch size') parser.add_argument('-s', '--seq_len', default=128, type=int, help= 'Sequence length (consecutive days)') parser.add_argument('-ne', '--num_encoders', default=3, type=int, help= 'Number of transformer encoder in the network') parser.add_argument('-a', '--attn_dim', default=96, type=int, help= 'Dimension of single attention output') parser.add_argument('-nh', '--num_heads', default=12, type=int, help= 'Number of heads for multi-attention') parser.add_argument('-d', '--dropout_rate', default=0.3, type=float, help='Dropout rate') parser.add_argument('-hs', '--hidden_size', default=256, type=int, help ='Hidden size between the linear layers in the encoder') parser.add_argument('-loss', '--loss_function', default='l2', type=str, choices=['l1', 'l2'], help='Loss function') parser.add_argument('-i', '--inference_only', action='store_true', help ='Inference only or not') parser.add_argument('-r', '--root_dir', default='archive', type=str, help='Directory containing the downloaded data') parser.add_argument('-v', '--verbosity', default=0, type=int, choices=[ 0, 1, 2], help='Verbosity level') return parser.parse_args()
normal
{ "blob_id": "81573b4a57f540733ff2faaf82bab78381b9dd46", "index": 1194, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef parse_arguments() ->Namespace:\n \"\"\"\n Parse arguments\n :return: Arguments\n \"\"\"\n parser = ArgumentParser(description=\n 'DLP project: Stock Prediction using Transformer')\n parser.add_argument('-e', '--epochs', default=10, type=int, help=\n 'Number of epochs')\n parser.add_argument('-w', '--warmup', default=2, type=int, help=\n 'Number of epochs for warmup')\n parser.add_argument('-l', '--learning_rate', default=0.001, type=float,\n help='Learning rate')\n parser.add_argument('-b', '--batch_size', default=64, type=int, help=\n 'Batch size')\n parser.add_argument('-s', '--seq_len', default=128, type=int, help=\n 'Sequence length (consecutive days)')\n parser.add_argument('-ne', '--num_encoders', default=3, type=int, help=\n 'Number of transformer encoder in the network')\n parser.add_argument('-a', '--attn_dim', default=96, type=int, help=\n 'Dimension of single attention output')\n parser.add_argument('-nh', '--num_heads', default=12, type=int, help=\n 'Number of heads for multi-attention')\n parser.add_argument('-d', '--dropout_rate', default=0.3, type=float,\n help='Dropout rate')\n parser.add_argument('-hs', '--hidden_size', default=256, type=int, help\n ='Hidden size between the linear layers in the encoder')\n parser.add_argument('-loss', '--loss_function', default='l2', type=str,\n choices=['l1', 'l2'], help='Loss function')\n parser.add_argument('-i', '--inference_only', action='store_true', help\n ='Inference only or not')\n parser.add_argument('-r', '--root_dir', default='archive', type=str,\n help='Directory containing the downloaded data')\n parser.add_argument('-v', '--verbosity', default=0, type=int, choices=[\n 0, 1, 2], help='Verbosity level')\n return parser.parse_args()\n", "step-3": "from argparse import ArgumentParser, Namespace\n\n\ndef parse_arguments() ->Namespace:\n \"\"\"\n Parse arguments\n :return: Arguments\n \"\"\"\n parser = ArgumentParser(description=\n 'DLP project: Stock Prediction using Transformer')\n parser.add_argument('-e', '--epochs', default=10, type=int, help=\n 'Number of epochs')\n parser.add_argument('-w', '--warmup', default=2, type=int, help=\n 'Number of epochs for warmup')\n parser.add_argument('-l', '--learning_rate', default=0.001, type=float,\n help='Learning rate')\n parser.add_argument('-b', '--batch_size', default=64, type=int, help=\n 'Batch size')\n parser.add_argument('-s', '--seq_len', default=128, type=int, help=\n 'Sequence length (consecutive days)')\n parser.add_argument('-ne', '--num_encoders', default=3, type=int, help=\n 'Number of transformer encoder in the network')\n parser.add_argument('-a', '--attn_dim', default=96, type=int, help=\n 'Dimension of single attention output')\n parser.add_argument('-nh', '--num_heads', default=12, type=int, help=\n 'Number of heads for multi-attention')\n parser.add_argument('-d', '--dropout_rate', default=0.3, type=float,\n help='Dropout rate')\n parser.add_argument('-hs', '--hidden_size', default=256, type=int, help\n ='Hidden size between the linear layers in the encoder')\n parser.add_argument('-loss', '--loss_function', default='l2', type=str,\n choices=['l1', 'l2'], help='Loss function')\n parser.add_argument('-i', '--inference_only', action='store_true', help\n ='Inference only or not')\n parser.add_argument('-r', '--root_dir', default='archive', type=str,\n help='Directory containing the downloaded data')\n parser.add_argument('-v', '--verbosity', default=0, type=int, choices=[\n 0, 1, 2], help='Verbosity level')\n return parser.parse_args()\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from kivy.app import App from kivy.lang import Builder from kivy.uix.screenmanager import ScreenManager, Screen import subprocess import socket from kivy.uix.button import Button from kivy.uix.button import Label from kivy.uix.boxlayout import BoxLayout Builder.load_string(""" <MenuScreen>: BoxLayout: orientation: "vertical" <SettingsScreen>: BoxLayout: orientation: "vertical" Button: text: 'Scan For Networks' on_release: root.manager.current = 'networks' root.scan() Button: text: 'Back to menu' on_release: root.manager.transition.direction = 'right' root.manager.current = 'menu' <NetworksScreen>: BoxLayout: orientation: "vertical" """) ssids = [] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Declare both screens class MenuScreen(Screen): def __init__(self, **kwargs): super(Screen, self).__init__(**kwargs) vLayout2 = BoxLayout(orientation='vertical') self.add_widget(vLayout2) settings_button = Button(text='Settings') vLayout2.add_widget(settings_button) settings_button.bind(on_press=self.forwardFunction) test_button = Button(text='Test') vLayout2.add_widget(test_button) test_button.bind(on_press=self.forwardFunction2) quit_button = Button(text='Quit') vLayout2.add_widget(quit_button) quit_button.bind(on_press=self.closeButton) def closeButton(self, placeholder): s.close() App.get_running_app().stop() def forwardFunction(self, next_screen): sm.transition.direction = 'left' sm.current = 'settings' def forwardFunction2(self, next_screen): sm.transition.direction = 'left' sm.current = 'testing' class TestScreen(Screen): def __init__(self, **kwargs): super(Screen, self).__init__(**kwargs) vLayout3 = BoxLayout(orientation='vertical') self.add_widget(vLayout3) test_button = Button(text='Send Message',pos = (100,25), size=(100, 25), size_hint=(.15, None)) self.add_widget(test_button) test_button.bind(on_press=self.sendData) back_button = Button(text='Back to Menu', size=(100, 25), size_hint=(.15, None)) vLayout3.add_widget(back_button) back_button.bind(on_press=self.backFunction) def sendData(self, placeholder): data = 'Test Worked' try: s.send(data.encode('utf-8')) except socket.error: print("An error has occurred... closing connection to server") finally: s.shutdown(socket.SHUT_RDWR) s.close() def backFunction(self, next_screen): sm.transition.direction = 'right' sm.current = 'menu' class NetworksScreen(Screen): #def settings_release(self): def __init__(self, **kwargs): super(Screen, self).__init__(**kwargs) def backFunction(self, next_screen): sm.transition.direction = 'right' sm.current = 'settings' def connectWifi(self, placeholder): #s = socket.socket() # Create a socket object host = socket.gethostname() # Get local machine name port = 12345 # Reserve a port for your service. try: s.connect((host, port)) print(s.recv(1024)) except socket.error: print("An error has occurred... closing connection to server") finally: #s.shutdown(socket.SHUT_RDWR) #s.close() def printButtons(self): y = 0 s2 = self.manager.get_screen('settings') vLayout = BoxLayout(orientation='vertical') self.add_widget(vLayout) while y < len(ssids) - 1: button = Button(text=ssids[y]) button.bind(on_press=self.connectWifi) vLayout.add_widget(button) y += 1 back_button = Button(text='Back to Settings') vLayout.add_widget(back_button) back_button.bind(on_press=self.backFunction) class SettingsScreen(Screen): def scan(self): results = subprocess.check_output(["netsh", "wlan", "show", "network"]) results = results.decode("ascii") # needed in python 3 results = results.replace("\r", "") ls = results.split("\n") ls = ls[4:] x = 0 y = 0 while x < len(ls): if x % 5 == 0: ssids.append(ls[x]) x += 1 while y < len(ssids)-1: y += 1 s2 = self.manager.get_screen('networks') s2.printButtons() # Create the screen manager sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(SettingsScreen(name='settings')) sm.add_widget(TestScreen(name='testing')) sm.add_widget(NetworksScreen(name='networks')) class TestApp(App): def build(self): return sm if __name__ == '__main__': TestApp().run()
normal
{ "blob_id": "237a647e7bf0b1c12abd78b1ef6e293e73232a6c", "index": 2217, "step-1": "from kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nimport subprocess\nimport socket\nfrom kivy.uix.button import Button\nfrom kivy.uix.button import Label\nfrom kivy.uix.boxlayout import BoxLayout\n\nBuilder.load_string(\"\"\"\n<MenuScreen>:\n BoxLayout:\n orientation: \"vertical\"\n\n<SettingsScreen>:\n BoxLayout:\n orientation: \"vertical\"\n Button:\n text: 'Scan For Networks'\n on_release:\n root.manager.current = 'networks'\n root.scan()\n\n\n Button:\n text: 'Back to menu'\n on_release:\n root.manager.transition.direction = 'right'\n root.manager.current = 'menu'\n\n<NetworksScreen>:\n BoxLayout:\n orientation: \"vertical\"\n\"\"\")\n\nssids = []\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Declare both screens\nclass MenuScreen(Screen):\n\n def __init__(self, **kwargs):\n super(Screen, self).__init__(**kwargs)\n vLayout2 = BoxLayout(orientation='vertical')\n self.add_widget(vLayout2)\n\n settings_button = Button(text='Settings')\n vLayout2.add_widget(settings_button)\n settings_button.bind(on_press=self.forwardFunction)\n\n test_button = Button(text='Test')\n vLayout2.add_widget(test_button)\n test_button.bind(on_press=self.forwardFunction2)\n\n quit_button = Button(text='Quit')\n vLayout2.add_widget(quit_button)\n quit_button.bind(on_press=self.closeButton)\n\n def closeButton(self, placeholder):\n s.close()\n App.get_running_app().stop()\n\n def forwardFunction(self, next_screen):\n sm.transition.direction = 'left'\n sm.current = 'settings'\n\n def forwardFunction2(self, next_screen):\n sm.transition.direction = 'left'\n sm.current = 'testing'\n\n\n\nclass TestScreen(Screen):\n\n def __init__(self, **kwargs):\n super(Screen, self).__init__(**kwargs)\n\n vLayout3 = BoxLayout(orientation='vertical')\n self.add_widget(vLayout3)\n test_button = Button(text='Send Message',pos = (100,25), size=(100, 25), size_hint=(.15, None))\n self.add_widget(test_button)\n test_button.bind(on_press=self.sendData)\n back_button = Button(text='Back to Menu', size=(100, 25), size_hint=(.15, None))\n vLayout3.add_widget(back_button)\n back_button.bind(on_press=self.backFunction)\n\n\n def sendData(self, placeholder):\n data = 'Test Worked'\n try:\n s.send(data.encode('utf-8'))\n except socket.error:\n print(\"An error has occurred... closing connection to server\")\n finally:\n s.shutdown(socket.SHUT_RDWR)\n s.close()\n\n\n def backFunction(self, next_screen):\n sm.transition.direction = 'right'\n sm.current = 'menu'\n\nclass NetworksScreen(Screen):\n #def settings_release(self):\n def __init__(self, **kwargs):\n super(Screen, self).__init__(**kwargs)\n\n def backFunction(self, next_screen):\n sm.transition.direction = 'right'\n sm.current = 'settings'\n\n def connectWifi(self, placeholder):\n #s = socket.socket() # Create a socket object\n host = socket.gethostname() # Get local machine name\n port = 12345 # Reserve a port for your service.\n\n try:\n s.connect((host, port))\n print(s.recv(1024))\n except socket.error:\n print(\"An error has occurred... closing connection to server\")\n finally:\n #s.shutdown(socket.SHUT_RDWR)\n #s.close()\n\n\n def printButtons(self):\n y = 0\n s2 = self.manager.get_screen('settings')\n vLayout = BoxLayout(orientation='vertical')\n self.add_widget(vLayout)\n while y < len(ssids) - 1:\n button = Button(text=ssids[y])\n button.bind(on_press=self.connectWifi)\n vLayout.add_widget(button)\n y += 1\n\n back_button = Button(text='Back to Settings')\n vLayout.add_widget(back_button)\n back_button.bind(on_press=self.backFunction)\n\nclass SettingsScreen(Screen):\n\n def scan(self):\n\n results = subprocess.check_output([\"netsh\", \"wlan\", \"show\", \"network\"])\n results = results.decode(\"ascii\") # needed in python 3\n results = results.replace(\"\\r\", \"\")\n ls = results.split(\"\\n\")\n ls = ls[4:]\n x = 0\n y = 0\n\n while x < len(ls):\n if x % 5 == 0:\n ssids.append(ls[x])\n x += 1\n\n while y < len(ssids)-1:\n y += 1\n\n s2 = self.manager.get_screen('networks')\n s2.printButtons()\n\n\n# Create the screen manager\nsm = ScreenManager()\nsm.add_widget(MenuScreen(name='menu'))\nsm.add_widget(SettingsScreen(name='settings'))\nsm.add_widget(TestScreen(name='testing'))\nsm.add_widget(NetworksScreen(name='networks'))\n\nclass TestApp(App):\n\n def build(self):\n return sm\n\nif __name__ == '__main__':\n TestApp().run()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# Generated by Django 2.2.6 on 2020-05-21 09:44 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('DHOPD', '0015_auto_20200515_0126'), ] operations = [ migrations.CreateModel( name='Patient_c', fields=[ ('patient_id', models.AutoField(max_length=200, primary_key=True, serialize=False)), ('patient_fname', models.CharField(max_length=200)), ('patient_mname', models.CharField(max_length=200)), ('patient_lname', models.CharField(max_length=200)), ('patient_title', models.CharField(max_length=20)), ('patient_address', models.CharField(max_length=500)), ('patient_town', models.CharField(max_length=200)), ('patient_phone', models.CharField(max_length=15)), ('patient_services', models.CharField(max_length=500)), ('patient_status', models.CharField(max_length=2)), ('patient_cost', models.CharField(max_length=100)), ('patient_date', models.DateField(default=datetime.date.today)), ('patient_time', models.TimeField(auto_now_add=True)), ('patient_comment', models.CharField(max_length=200)), ], ), migrations.CreateModel( name='Receipt_c', fields=[ ('receipt_id', models.AutoField(max_length=200, primary_key=True, serialize=False)), ('receipt_patient', models.CharField(max_length=200)), ('receipt_cost', models.CharField(max_length=200)), ('receipt_time', models.TimeField(auto_now=True)), ('receipt_status', models.CharField(default='-1', max_length=10)), ], ), ]
normal
{ "blob_id": "52da8608e43b2d8dfe00f0956a1187fcf2e7b1ff", "index": 41, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('DHOPD', '0015_auto_20200515_0126')]\n operations = [migrations.CreateModel(name='Patient_c', fields=[(\n 'patient_id', models.AutoField(max_length=200, primary_key=True,\n serialize=False)), ('patient_fname', models.CharField(max_length=\n 200)), ('patient_mname', models.CharField(max_length=200)), (\n 'patient_lname', models.CharField(max_length=200)), (\n 'patient_title', models.CharField(max_length=20)), (\n 'patient_address', models.CharField(max_length=500)), (\n 'patient_town', models.CharField(max_length=200)), ('patient_phone',\n models.CharField(max_length=15)), ('patient_services', models.\n CharField(max_length=500)), ('patient_status', models.CharField(\n max_length=2)), ('patient_cost', models.CharField(max_length=100)),\n ('patient_date', models.DateField(default=datetime.date.today)), (\n 'patient_time', models.TimeField(auto_now_add=True)), (\n 'patient_comment', models.CharField(max_length=200))]), migrations.\n CreateModel(name='Receipt_c', fields=[('receipt_id', models.\n AutoField(max_length=200, primary_key=True, serialize=False)), (\n 'receipt_patient', models.CharField(max_length=200)), (\n 'receipt_cost', models.CharField(max_length=200)), ('receipt_time',\n models.TimeField(auto_now=True)), ('receipt_status', models.\n CharField(default='-1', max_length=10))])]\n", "step-4": "import datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('DHOPD', '0015_auto_20200515_0126')]\n operations = [migrations.CreateModel(name='Patient_c', fields=[(\n 'patient_id', models.AutoField(max_length=200, primary_key=True,\n serialize=False)), ('patient_fname', models.CharField(max_length=\n 200)), ('patient_mname', models.CharField(max_length=200)), (\n 'patient_lname', models.CharField(max_length=200)), (\n 'patient_title', models.CharField(max_length=20)), (\n 'patient_address', models.CharField(max_length=500)), (\n 'patient_town', models.CharField(max_length=200)), ('patient_phone',\n models.CharField(max_length=15)), ('patient_services', models.\n CharField(max_length=500)), ('patient_status', models.CharField(\n max_length=2)), ('patient_cost', models.CharField(max_length=100)),\n ('patient_date', models.DateField(default=datetime.date.today)), (\n 'patient_time', models.TimeField(auto_now_add=True)), (\n 'patient_comment', models.CharField(max_length=200))]), migrations.\n CreateModel(name='Receipt_c', fields=[('receipt_id', models.\n AutoField(max_length=200, primary_key=True, serialize=False)), (\n 'receipt_patient', models.CharField(max_length=200)), (\n 'receipt_cost', models.CharField(max_length=200)), ('receipt_time',\n models.TimeField(auto_now=True)), ('receipt_status', models.\n CharField(default='-1', max_length=10))])]\n", "step-5": "# Generated by Django 2.2.6 on 2020-05-21 09:44\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('DHOPD', '0015_auto_20200515_0126'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Patient_c',\n fields=[\n ('patient_id', models.AutoField(max_length=200, primary_key=True, serialize=False)),\n ('patient_fname', models.CharField(max_length=200)),\n ('patient_mname', models.CharField(max_length=200)),\n ('patient_lname', models.CharField(max_length=200)),\n ('patient_title', models.CharField(max_length=20)),\n ('patient_address', models.CharField(max_length=500)),\n ('patient_town', models.CharField(max_length=200)),\n ('patient_phone', models.CharField(max_length=15)),\n ('patient_services', models.CharField(max_length=500)),\n ('patient_status', models.CharField(max_length=2)),\n ('patient_cost', models.CharField(max_length=100)),\n ('patient_date', models.DateField(default=datetime.date.today)),\n ('patient_time', models.TimeField(auto_now_add=True)),\n ('patient_comment', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Receipt_c',\n fields=[\n ('receipt_id', models.AutoField(max_length=200, primary_key=True, serialize=False)),\n ('receipt_patient', models.CharField(max_length=200)),\n ('receipt_cost', models.CharField(max_length=200)),\n ('receipt_time', models.TimeField(auto_now=True)),\n ('receipt_status', models.CharField(default='-1', max_length=10)),\n ],\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
class Queue(object): def __init__(self, val_list=None): self.stack_one = [] self.stack_two = [] if val_list: for item in val_list: self.stack_one.append(item) def push(self, val=None): if val: self.stack_one.append(val) def pop(self): for index in range(0, len(self.stack_one)): self.stack_two.append(self.stack_one.pop()) self.stack_two.pop() def main(): a = Queue() if __name__ == '__main__': main()
normal
{ "blob_id": "d4d8d800b81a50f2c520f0394412935738d1a8ee", "index": 2986, "step-1": "class Queue(object):\n\n def __init__(self, val_list=None):\n self.stack_one = []\n self.stack_two = []\n if val_list:\n for item in val_list:\n self.stack_one.append(item)\n\n def push(self, val=None):\n if val:\n self.stack_one.append(val)\n <mask token>\n\n\n<mask token>\n", "step-2": "class Queue(object):\n\n def __init__(self, val_list=None):\n self.stack_one = []\n self.stack_two = []\n if val_list:\n for item in val_list:\n self.stack_one.append(item)\n\n def push(self, val=None):\n if val:\n self.stack_one.append(val)\n\n def pop(self):\n for index in range(0, len(self.stack_one)):\n self.stack_two.append(self.stack_one.pop())\n self.stack_two.pop()\n\n\n<mask token>\n", "step-3": "class Queue(object):\n\n def __init__(self, val_list=None):\n self.stack_one = []\n self.stack_two = []\n if val_list:\n for item in val_list:\n self.stack_one.append(item)\n\n def push(self, val=None):\n if val:\n self.stack_one.append(val)\n\n def pop(self):\n for index in range(0, len(self.stack_one)):\n self.stack_two.append(self.stack_one.pop())\n self.stack_two.pop()\n\n\ndef main():\n a = Queue()\n\n\n<mask token>\n", "step-4": "class Queue(object):\n\n def __init__(self, val_list=None):\n self.stack_one = []\n self.stack_two = []\n if val_list:\n for item in val_list:\n self.stack_one.append(item)\n\n def push(self, val=None):\n if val:\n self.stack_one.append(val)\n\n def pop(self):\n for index in range(0, len(self.stack_one)):\n self.stack_two.append(self.stack_one.pop())\n self.stack_two.pop()\n\n\ndef main():\n a = Queue()\n\n\nif __name__ == '__main__':\n main()\n", "step-5": null, "step-ids": [ 3, 4, 5, 6 ] }
[ 3, 4, 5, 6 ]
print('SYL_2整型数组_12 合并排序数组')
normal
{ "blob_id": "571636be9d213d19bddfd1d04688bc0955c9eae5", "index": 4427, "step-1": "<mask token>\n", "step-2": "print('SYL_2整型数组_12 合并排序数组')\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
from setuptools import Command class decl_cmd1(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): pass class decl_cmd2(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): pass
normal
{ "blob_id": "70b8efa844395592131382d1d1e2c39150804f99", "index": 4111, "step-1": "<mask token>\n\n\nclass decl_cmd1(Command):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass decl_cmd2(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n", "step-2": "<mask token>\n\n\nclass decl_cmd1(Command):\n <mask token>\n <mask token>\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\nclass decl_cmd2(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n", "step-3": "<mask token>\n\n\nclass decl_cmd1(Command):\n <mask token>\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\nclass decl_cmd2(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n", "step-4": "<mask token>\n\n\nclass decl_cmd1(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\nclass decl_cmd2(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n", "step-5": "from setuptools import Command\n\n\nclass decl_cmd1(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\nclass decl_cmd2(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n", "step-ids": [ 6, 8, 9, 10, 11 ] }
[ 6, 8, 9, 10, 11 ]
# socket_address_packing.py import binascii import socket import struct import sys for string_address in ['192.168.1.1', '127.0.0.1']: packed = socket.inet_aton(string_address) print('Originale :', string_address) print('Impacchettato:', binascii.hexlify(packed)) print('Spacchettato :', socket.inet_ntoa(packed)) print()
normal
{ "blob_id": "01626772b0f47987157e9f92ba2ce66a0ec2dcb4", "index": 4379, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor string_address in ['192.168.1.1', '127.0.0.1']:\n packed = socket.inet_aton(string_address)\n print('Originale :', string_address)\n print('Impacchettato:', binascii.hexlify(packed))\n print('Spacchettato :', socket.inet_ntoa(packed))\n print()\n", "step-3": "import binascii\nimport socket\nimport struct\nimport sys\nfor string_address in ['192.168.1.1', '127.0.0.1']:\n packed = socket.inet_aton(string_address)\n print('Originale :', string_address)\n print('Impacchettato:', binascii.hexlify(packed))\n print('Spacchettato :', socket.inet_ntoa(packed))\n print()\n", "step-4": "# socket_address_packing.py\n\nimport binascii\nimport socket\nimport struct\nimport sys\n\nfor string_address in ['192.168.1.1', '127.0.0.1']:\n packed = socket.inet_aton(string_address)\n print('Originale :', string_address)\n print('Impacchettato:', binascii.hexlify(packed))\n print('Spacchettato :', socket.inet_ntoa(packed))\n print()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python3 """ Calculates the maximization step in the EM algorithm for a GMM """ import numpy as np def maximization(X, g): """ Returns: pi, m, S, or None, None, None on failure """ if type(X) is not np.ndarray or len(X.shape) != 2: return None, None, None if type(g) is not np.ndarray or len(g.shape) != 2: return None, None, None n, d = X.shape if n != g.shape[1]: return None, None, None k = g.shape[0] # sum of gi equal to 1 probs = np.sum(g, axis=0) validation = np.ones((n,)) if not np.isclose(probs, validation).all(): return None, None, None pi = np.zeros((k,)) m = np.zeros((k, d)) S = np.zeros((k, d, d)) for i in range(k): pi[i] = np.sum(g[i]) / n m[i] = np.matmul(g[i], X) / np.sum(g[i]) S[i] = np.matmul(g[i] * (X - m[i]).T, X - m[i]) / np.sum(g[i]) return pi, m, S
normal
{ "blob_id": "a55daebd85002640db5e08c2cf6d3e937b883f01", "index": 1611, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef maximization(X, g):\n \"\"\"\n Returns: pi, m, S, or None, None, None on failure\n \"\"\"\n if type(X) is not np.ndarray or len(X.shape) != 2:\n return None, None, None\n if type(g) is not np.ndarray or len(g.shape) != 2:\n return None, None, None\n n, d = X.shape\n if n != g.shape[1]:\n return None, None, None\n k = g.shape[0]\n probs = np.sum(g, axis=0)\n validation = np.ones((n,))\n if not np.isclose(probs, validation).all():\n return None, None, None\n pi = np.zeros((k,))\n m = np.zeros((k, d))\n S = np.zeros((k, d, d))\n for i in range(k):\n pi[i] = np.sum(g[i]) / n\n m[i] = np.matmul(g[i], X) / np.sum(g[i])\n S[i] = np.matmul(g[i] * (X - m[i]).T, X - m[i]) / np.sum(g[i])\n return pi, m, S\n", "step-3": "<mask token>\nimport numpy as np\n\n\ndef maximization(X, g):\n \"\"\"\n Returns: pi, m, S, or None, None, None on failure\n \"\"\"\n if type(X) is not np.ndarray or len(X.shape) != 2:\n return None, None, None\n if type(g) is not np.ndarray or len(g.shape) != 2:\n return None, None, None\n n, d = X.shape\n if n != g.shape[1]:\n return None, None, None\n k = g.shape[0]\n probs = np.sum(g, axis=0)\n validation = np.ones((n,))\n if not np.isclose(probs, validation).all():\n return None, None, None\n pi = np.zeros((k,))\n m = np.zeros((k, d))\n S = np.zeros((k, d, d))\n for i in range(k):\n pi[i] = np.sum(g[i]) / n\n m[i] = np.matmul(g[i], X) / np.sum(g[i])\n S[i] = np.matmul(g[i] * (X - m[i]).T, X - m[i]) / np.sum(g[i])\n return pi, m, S\n", "step-4": "#!/usr/bin/env python3\n\"\"\"\nCalculates the maximization step in the EM algorithm for a GMM\n\"\"\"\n\n\nimport numpy as np\n\n\ndef maximization(X, g):\n \"\"\"\n Returns: pi, m, S, or None, None, None on failure\n \"\"\"\n if type(X) is not np.ndarray or len(X.shape) != 2:\n return None, None, None\n\n if type(g) is not np.ndarray or len(g.shape) != 2:\n return None, None, None\n\n n, d = X.shape\n\n if n != g.shape[1]:\n return None, None, None\n\n k = g.shape[0]\n\n # sum of gi equal to 1\n probs = np.sum(g, axis=0)\n validation = np.ones((n,))\n if not np.isclose(probs, validation).all():\n return None, None, None\n\n pi = np.zeros((k,))\n m = np.zeros((k, d))\n S = np.zeros((k, d, d))\n\n for i in range(k):\n pi[i] = np.sum(g[i]) / n\n m[i] = np.matmul(g[i], X) / np.sum(g[i])\n S[i] = np.matmul(g[i] * (X - m[i]).T, X - m[i]) / np.sum(g[i])\n return pi, m, S\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
""" Looks up values in createresistorvaluesdbm.py. Outputs string value ( cmd ). """ import dbm # Open a DB. The c option opens in read/write mode and creates the file if needed. db = dbm.open( 'resistorvalues', 'c' ) with open( "dummyoutput.txt", "r" ) as file_object: #print (file_object.readline(6)) data = file_object.readlines() # Go through serial string line by line for line in data: # parse on semi-colon words = line.split( ";" ) #print (line.rsplit(";")) # Ignore position information and pull out resistor values # Note every fourth item to compensate for word pairs for i in range( 1, len( words ), 4 ): # print(words[i]) # the get method has 2 vlues lookup, and what to return is no match in this case is `0` if db.get( words[ i ], 0 ) != 0: # Direction, i.e. "f" cmd1 = db.get( words[ i ] ) # Value, i.e. "10" cmd2 = db.get( words[ i + 2 ] ) # Formatting space space = b( ' ' ) cmd = cmd1 + space + cmd2 #print (cmd.decode('ascii')) print ( cmd )
normal
{ "blob_id": "69eb62ba47a63cf007334c777709b0513d75f396", "index": 1504, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('dummyoutput.txt', 'r') as file_object:\n data = file_object.readlines()\n for line in data:\n words = line.split(';')\n for i in range(1, len(words), 4):\n if db.get(words[i], 0) != 0:\n cmd1 = db.get(words[i])\n cmd2 = db.get(words[i + 2])\n space = b(' ')\n cmd = cmd1 + space + cmd2\n print(cmd)\n", "step-3": "<mask token>\ndb = dbm.open('resistorvalues', 'c')\nwith open('dummyoutput.txt', 'r') as file_object:\n data = file_object.readlines()\n for line in data:\n words = line.split(';')\n for i in range(1, len(words), 4):\n if db.get(words[i], 0) != 0:\n cmd1 = db.get(words[i])\n cmd2 = db.get(words[i + 2])\n space = b(' ')\n cmd = cmd1 + space + cmd2\n print(cmd)\n", "step-4": "<mask token>\nimport dbm\ndb = dbm.open('resistorvalues', 'c')\nwith open('dummyoutput.txt', 'r') as file_object:\n data = file_object.readlines()\n for line in data:\n words = line.split(';')\n for i in range(1, len(words), 4):\n if db.get(words[i], 0) != 0:\n cmd1 = db.get(words[i])\n cmd2 = db.get(words[i + 2])\n space = b(' ')\n cmd = cmd1 + space + cmd2\n print(cmd)\n", "step-5": "\"\"\"\r\n Looks up values in createresistorvaluesdbm.py.\r\n Outputs string value ( cmd ).\r\n\"\"\"\r\n\r\nimport dbm\r\n\r\n# Open a DB. The c option opens in read/write mode and creates the file if needed.\r\ndb = dbm.open( 'resistorvalues', 'c' )\r\n\r\n\r\nwith open( \"dummyoutput.txt\", \"r\" ) as file_object:\r\n#print (file_object.readline(6))\r\n data = file_object.readlines()\r\n # Go through serial string line by line\r\n for line in data:\r\n # parse on semi-colon\r\n words = line.split( \";\" )\r\n #print (line.rsplit(\";\"))\r\n # Ignore position information and pull out resistor values\r\n # Note every fourth item to compensate for word pairs\r\n for i in range( 1, len( words ), 4 ):\r\n # print(words[i])\r\n # the get method has 2 vlues lookup, and what to return is no match in this case is `0`\r\n if db.get( words[ i ], 0 ) != 0:\r\n # Direction, i.e. \"f\"\r\n cmd1 = db.get( words[ i ] )\r\n # Value, i.e. \"10\"\r\n cmd2 = db.get( words[ i + 2 ] )\r\n # Formatting space\r\n space = b( ' ' )\r\n cmd = cmd1 + space + cmd2\r\n #print (cmd.decode('ascii'))\r\n print ( cmd )\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/bin/python import sys import notify2 import subprocess from time import sleep def notification(message: str): """ Display notification to the desktop Task: 1. show() -> it will generate a complete new pop 2. update() -> it will update the payload part of same notification pop-up, not issuing any new one. Usage : python <filename.py> typeObj:str value:int objective:str typeObj: RAM/SWAP/NORMAL value: current usage of RAM or SWAP (for NORMAL, the value = 0) objective: show/update """ # initialize the notification notify2.init("notifywhenLOAD") notifyObj = notify2.Notification("Emergency Alert!", message) notifyObj.set_timeout(12000) return notifyObj def main(): a = notification(f"{sys.argv[1]} exceeds {sys.argv[2]}") if sys.argv[1] in ["RAM", "SWAP"] and sys.argv[3] == "update": a.update(f"{sys.argv[1]} Alert!! Warning for death") # a.update('river') a.set_urgency(2) a.show() elif sys.argv[1] in ["RAM", "SWAP"] and sys.argv[3] == "show": a.set_timeout(10000) a.set_urgency(1) a.show() elif sys.argv[1] == "NORMAL": a.update("ChiLLax!!! Nothing to worry about") a.set_urgency(0) a.show() main()
normal
{ "blob_id": "8a7904881d936a3cb421ed5550856b600894fcee", "index": 5397, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef notification(message: str):\n \"\"\"\n Display notification to the desktop\n Task:\n 1. show() -> it will generate a complete new pop\n 2. update() -> it will update the payload part of same notification pop-up, not issuing any new one.\n Usage : python <filename.py> typeObj:str value:int objective:str\n typeObj: RAM/SWAP/NORMAL\n value: current usage of RAM or SWAP (for NORMAL, the value = 0)\n objective: show/update \n \"\"\"\n notify2.init('notifywhenLOAD')\n notifyObj = notify2.Notification('Emergency Alert!', message)\n notifyObj.set_timeout(12000)\n return notifyObj\n\n\ndef main():\n a = notification(f'{sys.argv[1]} exceeds {sys.argv[2]}')\n if sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'update':\n a.update(f'{sys.argv[1]} Alert!! Warning for death')\n a.set_urgency(2)\n a.show()\n elif sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'show':\n a.set_timeout(10000)\n a.set_urgency(1)\n a.show()\n elif sys.argv[1] == 'NORMAL':\n a.update('ChiLLax!!! Nothing to worry about')\n a.set_urgency(0)\n a.show()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef notification(message: str):\n \"\"\"\n Display notification to the desktop\n Task:\n 1. show() -> it will generate a complete new pop\n 2. update() -> it will update the payload part of same notification pop-up, not issuing any new one.\n Usage : python <filename.py> typeObj:str value:int objective:str\n typeObj: RAM/SWAP/NORMAL\n value: current usage of RAM or SWAP (for NORMAL, the value = 0)\n objective: show/update \n \"\"\"\n notify2.init('notifywhenLOAD')\n notifyObj = notify2.Notification('Emergency Alert!', message)\n notifyObj.set_timeout(12000)\n return notifyObj\n\n\ndef main():\n a = notification(f'{sys.argv[1]} exceeds {sys.argv[2]}')\n if sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'update':\n a.update(f'{sys.argv[1]} Alert!! Warning for death')\n a.set_urgency(2)\n a.show()\n elif sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'show':\n a.set_timeout(10000)\n a.set_urgency(1)\n a.show()\n elif sys.argv[1] == 'NORMAL':\n a.update('ChiLLax!!! Nothing to worry about')\n a.set_urgency(0)\n a.show()\n\n\nmain()\n", "step-4": "import sys\nimport notify2\nimport subprocess\nfrom time import sleep\n\n\ndef notification(message: str):\n \"\"\"\n Display notification to the desktop\n Task:\n 1. show() -> it will generate a complete new pop\n 2. update() -> it will update the payload part of same notification pop-up, not issuing any new one.\n Usage : python <filename.py> typeObj:str value:int objective:str\n typeObj: RAM/SWAP/NORMAL\n value: current usage of RAM or SWAP (for NORMAL, the value = 0)\n objective: show/update \n \"\"\"\n notify2.init('notifywhenLOAD')\n notifyObj = notify2.Notification('Emergency Alert!', message)\n notifyObj.set_timeout(12000)\n return notifyObj\n\n\ndef main():\n a = notification(f'{sys.argv[1]} exceeds {sys.argv[2]}')\n if sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'update':\n a.update(f'{sys.argv[1]} Alert!! Warning for death')\n a.set_urgency(2)\n a.show()\n elif sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'show':\n a.set_timeout(10000)\n a.set_urgency(1)\n a.show()\n elif sys.argv[1] == 'NORMAL':\n a.update('ChiLLax!!! Nothing to worry about')\n a.set_urgency(0)\n a.show()\n\n\nmain()\n", "step-5": "#!/bin/python\nimport sys\nimport notify2\nimport subprocess\nfrom time import sleep\n\n\ndef notification(message: str):\n \"\"\"\n Display notification to the desktop\n Task:\n 1. show() -> it will generate a complete new pop\n 2. update() -> it will update the payload part of same notification pop-up, not issuing any new one.\n Usage : python <filename.py> typeObj:str value:int objective:str\n typeObj: RAM/SWAP/NORMAL\n value: current usage of RAM or SWAP (for NORMAL, the value = 0)\n objective: show/update \n \"\"\"\n # initialize the notification\n notify2.init(\"notifywhenLOAD\")\n notifyObj = notify2.Notification(\"Emergency Alert!\", message)\n notifyObj.set_timeout(12000)\n return notifyObj\n\n\ndef main():\n a = notification(f\"{sys.argv[1]} exceeds {sys.argv[2]}\")\n if sys.argv[1] in [\"RAM\", \"SWAP\"] and sys.argv[3] == \"update\":\n a.update(f\"{sys.argv[1]} Alert!! Warning for death\")\n # a.update('river')\n a.set_urgency(2)\n a.show()\n elif sys.argv[1] in [\"RAM\", \"SWAP\"] and sys.argv[3] == \"show\":\n a.set_timeout(10000)\n a.set_urgency(1)\n a.show()\n elif sys.argv[1] == \"NORMAL\":\n a.update(\"ChiLLax!!! Nothing to worry about\")\n a.set_urgency(0)\n a.show()\n\n\nmain()\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- """Module for mimic explainer and explainable surrogate models.""" from .mimic_explainer import MimicExplainer __all__ = ["MimicExplainer"]
normal
{ "blob_id": "0b8cb522c531ac84d363b569a3ea4bfe47f61993", "index": 5390, "step-1": "<mask token>\n", "step-2": "<mask token>\n__all__ = ['MimicExplainer']\n", "step-3": "<mask token>\nfrom .mimic_explainer import MimicExplainer\n__all__ = ['MimicExplainer']\n", "step-4": "# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\n\"\"\"Module for mimic explainer and explainable surrogate models.\"\"\"\nfrom .mimic_explainer import MimicExplainer\n\n__all__ = [\"MimicExplainer\"]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import os import redis class Carteiro(): if os.environ.get("REDIS_URL") != None: redis_pool = redis.ConnectionPool.from_url(os.environ.get("REDIS_URL")) else: redis_pool = '' def __init__(self, id, pacote): if os.environ.get("REDIS_URL") != None: self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool) else: self.redis_bd = redis.Redis() self.user_id = str(id) self.pacote = bytes(str(pacote), 'ascii') self.user_dict = self.redis_bd.hgetall(self.user_id) def guardar_status_encomenda(self, status): if self.redis_bd.exists(self.user_id): self.user_dict[self.pacote] = status self.redis_bd.hmset(self.user_id, self.user_dict) else: novo_user_dict = {self.pacote: status} self.redis_bd.hmset(self.user_id, novo_user_dict) def ler_carta(self): carta = self.user_dict.get(self.pacote) carta = carta.decode(encoding='UTF-8') return carta def roubar_pacote(self): if self.pacote in self.user_dict: if len(self.user_dict) == 1: self.redis_bd.delete(self.user_id) else: self.redis_bd.hdel(self.user_id, self.pacote) del self.user_dict[self.pacote] else: raise ValueError('codigo nao existente na base de dados') def checar_existencia_pacote(self): return self.user_dict.get(self.pacote)
normal
{ "blob_id": "dd95d14f35b6a92b3363d99a616678da18733a61", "index": 7839, "step-1": "<mask token>\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n <mask token>\n <mask token>\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n", "step-2": "<mask token>\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n <mask token>\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n", "step-3": "<mask token>\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n\n def ler_carta(self):\n carta = self.user_dict.get(self.pacote)\n carta = carta.decode(encoding='UTF-8')\n return carta\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n", "step-4": "import os\nimport redis\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n\n def ler_carta(self):\n carta = self.user_dict.get(self.pacote)\n carta = carta.decode(encoding='UTF-8')\n return carta\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n", "step-5": "import os\nimport redis\n\nclass Carteiro():\n\n if os.environ.get(\"REDIS_URL\") != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get(\"REDIS_URL\"))\n else:\n redis_pool = ''\n \n def __init__(self, id, pacote):\n if os.environ.get(\"REDIS_URL\") != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n \n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n \n def ler_carta(self):\n carta = self.user_dict.get(self.pacote)\n carta = carta.decode(encoding='UTF-8')\n return carta\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
from django.shortcuts import render, get_object_or_404 from django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView from accounts.models import Employee from leave.models import ApplyLeave from departments.models import Department, Position from django.contrib.auth.models import User from hrm.models import Performance from django.urls import reverse_lazy, reverse from hrm.forms import PerformanceForm from django.http import HttpResponseRedirect from django.contrib.messages.views import SuccessMessageMixin from django.contrib import messages from helpers.help import check_user_login # Create your views here. def index(request): if not request.session.get('username'): return HttpResponseRedirect(reverse("accounts:login")) applied_leaves = ApplyLeave.objects.count() employees = Employee.objects.count() positions = Position.objects.count() departments = Department.objects.count() user = User.objects.get(username = request.session['username']) employee = Employee.objects.get(user = user.id) return render(request, "hrm/dashboard.html", {'employees': employees, 'positions': positions, 'departments': departments, 'applied_leaves': applied_leaves, "employee": employee, "user":user}) ''' Perfomance Control ''' class CreatePerformanceView(SuccessMessageMixin, CreateView): model = Performance fields = ('employee', 'start_date', 'finish_date', 'objective') success_message = "Successfully! Created employee and appraisal..." template_name = "hrm/performance/performance_form.html" def get_context_data(self, **kwargs): context = super(CreatePerformanceView, self).get_context_data(**kwargs) context['employee'] = Employee.objects.get(user = self.request.user.id) return context class ListPerformanceView(ListView): model = Performance context_object_name = "performances" template_name = "hrm/performance/performance_list.html" def get_context_data(self, **kwargs): context = super(ListPerformanceView, self).get_context_data(**kwargs) context['employee'] = Employee.objects.get(user = self.request.user.id) return context class UpdatePerformanceView(SuccessMessageMixin, UpdateView): model = Performance fields = ('employee', 'start_date', 'finish_date', 'objective') success_message = "Successfully! Updated an appraisal" context_object_name = "performance" template_name = "hrm/performance/performance_form.html" def get_context_data(self, **kwargs): context = super(UpdatePerformanceView, self).get_context_data(**kwargs) context['employee'] = Employee.objects.get(user = self.request.user.id) return context class DetailPerformanceView(DetailView): model = Performance context_object_name = "performance" template_name = "hrm/performance/performance_details.html" def get_context_data(self, **kwargs): context = super(DetailPerformanceView, self).get_context_data(**kwargs) context['employee'] = Employee.objects.get(user = self.request.user.id) return context class DeletePerformanceView(SuccessMessageMixin, DeleteView): model = Performance success_message = "Successfully! Deleted an appraisal." success_url = reverse_lazy("hrm:perfom_list") template_name = "hrm/performance/performance_delete.html" def get_context_data(self, **kwargs): context = super(DeletePerformanceView, self).get_context_data(**kwargs) context['employee'] = Employee.objects.get(user = self.request.user.id) return context ''' Showing an employees perfomance control ''' def show_employee_perfomance_control(request): check_user_login(request) employee = Employee.objects.get(user= User.objects.get(username = request.session['username']).id) perform = Performance.objects.filter(employee = employee.id) print(perform) if perform is None: return HttpResponseRedirect(reverse("hrm:hrm_index")) return render(request, "hrm/performance/employee_performance.html", {'employee': employee, 'performances': perform}) ''' Employee Provide Notes for his perfomance ''' def perfomance_notes(request, pk): form = PerformanceForm(request.POST or None,instance = get_object_or_404(Performance, pk=pk)) employee = Employee.objects.get(user= User.objects.get(username = request.session['username']).id) if request.method == "POST": if form.is_valid(): form.save() messages.success(request, "Successfully! Added notes on what you have done.") return HttpResponseRedirect(reverse('hrm:perfom_employee')) return render(request, "hrm/performance/performance_notes.html", {'form': form, 'employee': employee}) def appraisal(request, pk): perform = Performance.objects.get(id = pk) perform.status = 1 perform.save() messages.success(request, "Successfully! Appraised employee work....") return HttpResponseRedirect(reverse('hrm:perfom_list'))
normal
{ "blob_id": "7c6ac2837751703ac4582ee81c29ccf67b8277bc", "index": 1632, "step-1": "<mask token>\n\n\nclass UpdatePerformanceView(SuccessMessageMixin, UpdateView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DetailPerformanceView(DetailView):\n model = Performance\n context_object_name = 'performance'\n template_name = 'hrm/performance/performance_details.html'\n\n def get_context_data(self, **kwargs):\n context = super(DetailPerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass DeletePerformanceView(SuccessMessageMixin, DeleteView):\n model = Performance\n success_message = 'Successfully! Deleted an appraisal.'\n success_url = reverse_lazy('hrm:perfom_list')\n template_name = 'hrm/performance/performance_delete.html'\n\n def get_context_data(self, **kwargs):\n context = super(DeletePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ListPerformanceView(ListView):\n model = Performance\n context_object_name = 'performances'\n template_name = 'hrm/performance/performance_list.html'\n\n def get_context_data(self, **kwargs):\n context = super(ListPerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass UpdatePerformanceView(SuccessMessageMixin, UpdateView):\n model = Performance\n fields = 'employee', 'start_date', 'finish_date', 'objective'\n success_message = 'Successfully! Updated an appraisal'\n context_object_name = 'performance'\n template_name = 'hrm/performance/performance_form.html'\n\n def get_context_data(self, **kwargs):\n context = super(UpdatePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass DetailPerformanceView(DetailView):\n model = Performance\n context_object_name = 'performance'\n template_name = 'hrm/performance/performance_details.html'\n\n def get_context_data(self, **kwargs):\n context = super(DetailPerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass DeletePerformanceView(SuccessMessageMixin, DeleteView):\n model = Performance\n success_message = 'Successfully! Deleted an appraisal.'\n success_url = reverse_lazy('hrm:perfom_list')\n template_name = 'hrm/performance/performance_delete.html'\n\n def get_context_data(self, **kwargs):\n context = super(DeletePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef index(request):\n if not request.session.get('username'):\n return HttpResponseRedirect(reverse('accounts:login'))\n applied_leaves = ApplyLeave.objects.count()\n employees = Employee.objects.count()\n positions = Position.objects.count()\n departments = Department.objects.count()\n user = User.objects.get(username=request.session['username'])\n employee = Employee.objects.get(user=user.id)\n return render(request, 'hrm/dashboard.html', {'employees': employees,\n 'positions': positions, 'departments': departments,\n 'applied_leaves': applied_leaves, 'employee': employee, 'user': user})\n\n\n<mask token>\n\n\nclass CreatePerformanceView(SuccessMessageMixin, CreateView):\n model = Performance\n fields = 'employee', 'start_date', 'finish_date', 'objective'\n success_message = 'Successfully! Created employee and appraisal...'\n template_name = 'hrm/performance/performance_form.html'\n\n def get_context_data(self, **kwargs):\n context = super(CreatePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass ListPerformanceView(ListView):\n model = Performance\n context_object_name = 'performances'\n template_name = 'hrm/performance/performance_list.html'\n\n def get_context_data(self, **kwargs):\n context = super(ListPerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass UpdatePerformanceView(SuccessMessageMixin, UpdateView):\n model = Performance\n fields = 'employee', 'start_date', 'finish_date', 'objective'\n success_message = 'Successfully! Updated an appraisal'\n context_object_name = 'performance'\n template_name = 'hrm/performance/performance_form.html'\n\n def get_context_data(self, **kwargs):\n context = super(UpdatePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass DetailPerformanceView(DetailView):\n model = Performance\n context_object_name = 'performance'\n template_name = 'hrm/performance/performance_details.html'\n\n def get_context_data(self, **kwargs):\n context = super(DetailPerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass DeletePerformanceView(SuccessMessageMixin, DeleteView):\n model = Performance\n success_message = 'Successfully! Deleted an appraisal.'\n success_url = reverse_lazy('hrm:perfom_list')\n template_name = 'hrm/performance/performance_delete.html'\n\n def get_context_data(self, **kwargs):\n context = super(DeletePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\n<mask token>\n\n\ndef show_employee_perfomance_control(request):\n check_user_login(request)\n employee = Employee.objects.get(user=User.objects.get(username=request.\n session['username']).id)\n perform = Performance.objects.filter(employee=employee.id)\n print(perform)\n if perform is None:\n return HttpResponseRedirect(reverse('hrm:hrm_index'))\n return render(request, 'hrm/performance/employee_performance.html', {\n 'employee': employee, 'performances': perform})\n\n\n<mask token>\n", "step-4": "from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView\nfrom accounts.models import Employee\nfrom leave.models import ApplyLeave\nfrom departments.models import Department, Position\nfrom django.contrib.auth.models import User\nfrom hrm.models import Performance\nfrom django.urls import reverse_lazy, reverse\nfrom hrm.forms import PerformanceForm\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib import messages\nfrom helpers.help import check_user_login\n\n\ndef index(request):\n if not request.session.get('username'):\n return HttpResponseRedirect(reverse('accounts:login'))\n applied_leaves = ApplyLeave.objects.count()\n employees = Employee.objects.count()\n positions = Position.objects.count()\n departments = Department.objects.count()\n user = User.objects.get(username=request.session['username'])\n employee = Employee.objects.get(user=user.id)\n return render(request, 'hrm/dashboard.html', {'employees': employees,\n 'positions': positions, 'departments': departments,\n 'applied_leaves': applied_leaves, 'employee': employee, 'user': user})\n\n\n<mask token>\n\n\nclass CreatePerformanceView(SuccessMessageMixin, CreateView):\n model = Performance\n fields = 'employee', 'start_date', 'finish_date', 'objective'\n success_message = 'Successfully! Created employee and appraisal...'\n template_name = 'hrm/performance/performance_form.html'\n\n def get_context_data(self, **kwargs):\n context = super(CreatePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass ListPerformanceView(ListView):\n model = Performance\n context_object_name = 'performances'\n template_name = 'hrm/performance/performance_list.html'\n\n def get_context_data(self, **kwargs):\n context = super(ListPerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass UpdatePerformanceView(SuccessMessageMixin, UpdateView):\n model = Performance\n fields = 'employee', 'start_date', 'finish_date', 'objective'\n success_message = 'Successfully! Updated an appraisal'\n context_object_name = 'performance'\n template_name = 'hrm/performance/performance_form.html'\n\n def get_context_data(self, **kwargs):\n context = super(UpdatePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass DetailPerformanceView(DetailView):\n model = Performance\n context_object_name = 'performance'\n template_name = 'hrm/performance/performance_details.html'\n\n def get_context_data(self, **kwargs):\n context = super(DetailPerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\nclass DeletePerformanceView(SuccessMessageMixin, DeleteView):\n model = Performance\n success_message = 'Successfully! Deleted an appraisal.'\n success_url = reverse_lazy('hrm:perfom_list')\n template_name = 'hrm/performance/performance_delete.html'\n\n def get_context_data(self, **kwargs):\n context = super(DeletePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user=self.request.user.id)\n return context\n\n\n<mask token>\n\n\ndef show_employee_perfomance_control(request):\n check_user_login(request)\n employee = Employee.objects.get(user=User.objects.get(username=request.\n session['username']).id)\n perform = Performance.objects.filter(employee=employee.id)\n print(perform)\n if perform is None:\n return HttpResponseRedirect(reverse('hrm:hrm_index'))\n return render(request, 'hrm/performance/employee_performance.html', {\n 'employee': employee, 'performances': perform})\n\n\n<mask token>\n\n\ndef perfomance_notes(request, pk):\n form = PerformanceForm(request.POST or None, instance=get_object_or_404\n (Performance, pk=pk))\n employee = Employee.objects.get(user=User.objects.get(username=request.\n session['username']).id)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n messages.success(request,\n 'Successfully! Added notes on what you have done.')\n return HttpResponseRedirect(reverse('hrm:perfom_employee'))\n return render(request, 'hrm/performance/performance_notes.html', {\n 'form': form, 'employee': employee})\n\n\ndef appraisal(request, pk):\n perform = Performance.objects.get(id=pk)\n perform.status = 1\n perform.save()\n messages.success(request, 'Successfully! Appraised employee work....')\n return HttpResponseRedirect(reverse('hrm:perfom_list'))\n", "step-5": "from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView\nfrom accounts.models import Employee\nfrom leave.models import ApplyLeave \nfrom departments.models import Department, Position \nfrom django.contrib.auth.models import User\nfrom hrm.models import Performance \nfrom django.urls import reverse_lazy, reverse\nfrom hrm.forms import PerformanceForm\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib import messages\nfrom helpers.help import check_user_login\n# Create your views here.\n\ndef index(request):\n if not request.session.get('username'):\n return HttpResponseRedirect(reverse(\"accounts:login\"))\n\n applied_leaves = ApplyLeave.objects.count()\n employees = Employee.objects.count()\n positions = Position.objects.count()\n departments = Department.objects.count()\n user = User.objects.get(username = request.session['username'])\n employee = Employee.objects.get(user = user.id)\n\n return render(request, \"hrm/dashboard.html\", \n {'employees': employees, 'positions': positions, 'departments': departments,\n 'applied_leaves': applied_leaves, \"employee\": employee, \"user\":user})\n\n\n'''\nPerfomance Control \n'''\n\nclass CreatePerformanceView(SuccessMessageMixin, CreateView):\n model = Performance \n fields = ('employee', 'start_date', 'finish_date', 'objective')\n success_message = \"Successfully! Created employee and appraisal...\"\n template_name = \"hrm/performance/performance_form.html\"\n def get_context_data(self, **kwargs):\n context = super(CreatePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user = self.request.user.id)\n return context \n\n\nclass ListPerformanceView(ListView):\n model = Performance \n context_object_name = \"performances\"\n template_name = \"hrm/performance/performance_list.html\"\n def get_context_data(self, **kwargs):\n context = super(ListPerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user = self.request.user.id)\n return context \n\n\nclass UpdatePerformanceView(SuccessMessageMixin, UpdateView):\n model = Performance\n fields = ('employee', 'start_date', 'finish_date', 'objective')\n success_message = \"Successfully! Updated an appraisal\"\n context_object_name = \"performance\"\n template_name = \"hrm/performance/performance_form.html\"\n def get_context_data(self, **kwargs):\n context = super(UpdatePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user = self.request.user.id)\n return context\n\n\nclass DetailPerformanceView(DetailView):\n model = Performance\n context_object_name = \"performance\"\n template_name = \"hrm/performance/performance_details.html\"\n def get_context_data(self, **kwargs):\n context = super(DetailPerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user = self.request.user.id)\n return context\n\n\nclass DeletePerformanceView(SuccessMessageMixin, DeleteView):\n model = Performance\n success_message = \"Successfully! Deleted an appraisal.\"\n success_url = reverse_lazy(\"hrm:perfom_list\")\n template_name = \"hrm/performance/performance_delete.html\"\n def get_context_data(self, **kwargs):\n context = super(DeletePerformanceView, self).get_context_data(**kwargs)\n context['employee'] = Employee.objects.get(user = self.request.user.id)\n return context\n\n'''\nShowing an employees perfomance control\n'''\ndef show_employee_perfomance_control(request):\n check_user_login(request)\n employee = Employee.objects.get(user= User.objects.get(username = request.session['username']).id)\n perform = Performance.objects.filter(employee = employee.id)\n print(perform)\n if perform is None:\n return HttpResponseRedirect(reverse(\"hrm:hrm_index\"))\n return render(request, \"hrm/performance/employee_performance.html\", {'employee': employee, 'performances': perform})\n\n\n'''\nEmployee Provide Notes for his perfomance\n'''\n\ndef perfomance_notes(request, pk):\n form = PerformanceForm(request.POST or None,instance = get_object_or_404(Performance, pk=pk))\n employee = Employee.objects.get(user= User.objects.get(username = request.session['username']).id)\n if request.method == \"POST\":\n if form.is_valid():\n form.save()\n messages.success(request, \"Successfully! Added notes on what you have done.\")\n return HttpResponseRedirect(reverse('hrm:perfom_employee'))\n return render(request, \"hrm/performance/performance_notes.html\", {'form': form, 'employee': employee})\n\ndef appraisal(request, pk):\n perform = Performance.objects.get(id = pk)\n perform.status = 1\n perform.save()\n messages.success(request, \"Successfully! Appraised employee work....\")\n return HttpResponseRedirect(reverse('hrm:perfom_list'))\n", "step-ids": [ 7, 12, 17, 20, 21 ] }
[ 7, 12, 17, 20, 21 ]
#-*- coding: utf-8 -*- # Copyright (C) 2011 by # Jordi Torrents <[email protected]> # Aric Hagberg <[email protected]> # All rights reserved. # BSD license. import itertools import networkx as nx __author__ = """\n""".join(['Jordi Torrents <[email protected]>', 'Aric Hagberg ([email protected])']) __all__ = ['clustering', 'average_clustering', 'latapy_clustering', 'robins_alexander_clustering'] # functions for computing clustering of pairs def cc_dot(nu, nv): return float(len(nu & nv)) / len(nu | nv) def cc_max(nu, nv): return float(len(nu & nv)) / max(len(nu), len(nv)) def cc_min(nu, nv): return float(len(nu & nv)) / min(len(nu), len(nv)) modes = {'dot': cc_dot, 'min': cc_min, 'max': cc_max} def latapy_clustering(G, nodes=None, mode='dot'): r"""Compute a bipartite clustering coefficient for nodes. The bipartie clustering coefficient is a measure of local density of connections defined as [1]_: .. math:: c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|} where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, and `c_{uv}` is the pairwise clustering coefficient between nodes `u` and `v`. The mode selects the function for `c_{uv}` which can be: `dot`: .. math:: c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|} `min`: .. math:: c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)} `max`: .. math:: c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)} Parameters ---------- G : graph A bipartite graph nodes : list or iterable (optional) Compute bipartite clustering for these nodes. The default is all nodes in G. mode : string The pariwise bipartite clustering method to be used in the computation. It must be "dot", "max", or "min". Returns ------- clustering : dictionary A dictionary keyed by node with the clustering coefficient value. Examples -------- >>> from networkx.algorithms import bipartite >>> G = nx.path_graph(4) # path graphs are bipartite >>> c = bipartite.clustering(G) >>> c[0] 0.5 >>> c = bipartite.clustering(G,mode='min') >>> c[0] 1.0 See Also -------- robins_alexander_clustering square_clustering average_clustering References ---------- .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). Basic notions for the analysis of large two-mode networks. Social Networks 30(1), 31--48. """ if not nx.algorithms.bipartite.is_bipartite(G): raise nx.NetworkXError("Graph is not bipartite") try: cc_func = modes[mode] except KeyError: raise nx.NetworkXError( "Mode for bipartite clustering must be: dot, min or max") if nodes is None: nodes = G ccs = {} for v in nodes: cc = 0.0 nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v]) for u in nbrs2: cc += cc_func(set(G[u]), set(G[v])) if cc > 0.0: # len(nbrs2)>0 cc /= len(nbrs2) ccs[v] = cc return ccs clustering = latapy_clustering def average_clustering(G, nodes=None, mode='dot'): r"""Compute the average bipartite clustering coefficient. A clustering coefficient for the whole graph is the average, .. math:: C = \frac{1}{n}\sum_{v \in G} c_v, where `n` is the number of nodes in `G`. Similar measures for the two bipartite sets can be defined [1]_ .. math:: C_X = \frac{1}{|X|}\sum_{v \in X} c_v, where `X` is a bipartite set of `G`. Parameters ---------- G : graph a bipartite graph nodes : list or iterable, optional A container of nodes to use in computing the average. The nodes should be either the entire graph (the default) or one of the bipartite sets. mode : string The pariwise bipartite clustering method. It must be "dot", "max", or "min" Returns ------- clustering : float The average bipartite clustering for the given set of nodes or the entire graph if no nodes are specified. Examples -------- >>> from networkx.algorithms import bipartite >>> G=nx.star_graph(3) # star graphs are bipartite >>> bipartite.average_clustering(G) 0.75 >>> X,Y=bipartite.sets(G) >>> bipartite.average_clustering(G,X) 0.0 >>> bipartite.average_clustering(G,Y) 1.0 See Also -------- clustering Notes ----- The container of nodes passed to this function must contain all of the nodes in one of the bipartite sets ("top" or "bottom") in order to compute the correct average bipartite clustering coefficients. See :mod:`bipartite documentation <networkx.algorithms.bipartite>` for further details on how bipartite graphs are handled in NetworkX. References ---------- .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). Basic notions for the analysis of large two-mode networks. Social Networks 30(1), 31--48. """ if nodes is None: nodes = G ccs = latapy_clustering(G, nodes=nodes, mode=mode) return float(sum(ccs[v] for v in nodes)) / len(nodes) def robins_alexander_clustering(G): r"""Compute the bipartite clustering of G. Robins and Alexander [1]_ defined bipartite clustering coefficient as four times the number of four cycles `C_4` divided by the number of three paths `L_3` in a bipartite graph: .. math:: CC_4 = \frac{4 * C_4}{L_3} Parameters ---------- G : graph a bipartite graph Returns ------- clustering : float The Robins and Alexander bipartite clustering for the input graph. Examples -------- >>> from networkx.algorithms import bipartite >>> G = nx.davis_southern_women_graph() >>> print(round(bipartite.robins_alexander_clustering(G), 3)) 0.468 See Also -------- latapy_clustering square_clustering References ---------- .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking directors: Network structure and distance in bipartite graphs. Computational & Mathematical Organization Theory 10(1), 69–94. """ if G.order() < 4 or G.size() < 3: return 0 L_3 = _threepaths(G) if L_3 == 0: return 0 C_4 = _four_cycles(G) return (4. * C_4) / L_3 def _four_cycles(G): cycles = 0 for v in G: for u, w in itertools.combinations(G[v], 2): cycles += len((set(G[u]) & set(G[w])) - set([v])) return cycles / 4 def _threepaths(G): paths = 0 for v in G: for u in G[v]: for w in set(G[u]) - set([v]): paths += len(set(G[w]) - set([v, u])) # Divide by two because we count each three path twice # one for each possible starting point return paths / 2
normal
{ "blob_id": "a21c132ba9f24ff2c695bf66cae074705025d6b1", "index": 8063, "step-1": "<mask token>\n\n\ndef cc_dot(nu, nv):\n return float(len(nu & nv)) / len(nu | nv)\n\n\ndef cc_max(nu, nv):\n return float(len(nu & nv)) / max(len(nu), len(nv))\n\n\n<mask token>\n\n\ndef average_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute the average bipartite clustering coefficient.\n\n A clustering coefficient for the whole graph is the average, \n\n .. math::\n\n C = \\\\frac{1}{n}\\\\sum_{v \\\\in G} c_v,\n\n where `n` is the number of nodes in `G`.\n\n Similar measures for the two bipartite sets can be defined [1]_\n\n .. math::\n\n C_X = \\\\frac{1}{|X|}\\\\sum_{v \\\\in X} c_v,\n\n where `X` is a bipartite set of `G`.\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n nodes : list or iterable, optional\n A container of nodes to use in computing the average. \n The nodes should be either the entire graph (the default) or one of the \n bipartite sets.\n\n mode : string\n The pariwise bipartite clustering method. \n It must be \"dot\", \"max\", or \"min\" \n\n Returns\n -------\n clustering : float\n The average bipartite clustering for the given set of nodes or the \n entire graph if no nodes are specified.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G=nx.star_graph(3) # star graphs are bipartite\n >>> bipartite.average_clustering(G) \n 0.75\n >>> X,Y=bipartite.sets(G)\n >>> bipartite.average_clustering(G,X) \n 0.0\n >>> bipartite.average_clustering(G,Y) \n 1.0\n\n See Also\n --------\n clustering\n\n Notes \n -----\n The container of nodes passed to this function must contain all of the nodes\n in one of the bipartite sets (\"top\" or \"bottom\") in order to compute \n the correct average bipartite clustering coefficients.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if nodes is None:\n nodes = G\n ccs = latapy_clustering(G, nodes=nodes, mode=mode)\n return float(sum(ccs[v] for v in nodes)) / len(nodes)\n\n\ndef robins_alexander_clustering(G):\n \"\"\"Compute the bipartite clustering of G.\n\n Robins and Alexander [1]_ defined bipartite clustering coefficient as\n four times the number of four cycles `C_4` divided by the number of\n three paths `L_3` in a bipartite graph:\n\n .. math::\n\n CC_4 = \\\\frac{4 * C_4}{L_3}\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n Returns\n -------\n clustering : float\n The Robins and Alexander bipartite clustering for the input graph.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.davis_southern_women_graph()\n >>> print(round(bipartite.robins_alexander_clustering(G), 3))\n 0.468\n\n See Also\n --------\n latapy_clustering\n square_clustering\n\n References\n ----------\n .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking \n directors: Network structure and distance in bipartite graphs. \n Computational & Mathematical Organization Theory 10(1), 69–94.\n\n \"\"\"\n if G.order() < 4 or G.size() < 3:\n return 0\n L_3 = _threepaths(G)\n if L_3 == 0:\n return 0\n C_4 = _four_cycles(G)\n return 4.0 * C_4 / L_3\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef cc_dot(nu, nv):\n return float(len(nu & nv)) / len(nu | nv)\n\n\ndef cc_max(nu, nv):\n return float(len(nu & nv)) / max(len(nu), len(nv))\n\n\ndef cc_min(nu, nv):\n return float(len(nu & nv)) / min(len(nu), len(nv))\n\n\n<mask token>\n\n\ndef latapy_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute a bipartite clustering coefficient for nodes.\n\n The bipartie clustering coefficient is a measure of local density\n of connections defined as [1]_:\n\n .. math::\n\n c_u = \\\\frac{\\\\sum_{v \\\\in N(N(u))} c_{uv} }{|N(N(u))|}\n\n where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, \n and `c_{uv}` is the pairwise clustering coefficient between nodes \n `u` and `v`.\n\n The mode selects the function for `c_{uv}` which can be:\n\n `dot`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{|N(u) \\\\cup N(v)|}\n\n `min`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{min(|N(u)|,|N(v)|)}\n\n `max`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{max(|N(u)|,|N(v)|)}\n\n\n Parameters\n ----------\n G : graph\n A bipartite graph\n\n nodes : list or iterable (optional)\n Compute bipartite clustering for these nodes. The default \n is all nodes in G.\n\n mode : string\n The pariwise bipartite clustering method to be used in the computation.\n It must be \"dot\", \"max\", or \"min\". \n\n Returns\n -------\n clustering : dictionary\n A dictionary keyed by node with the clustering coefficient value.\n\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.path_graph(4) # path graphs are bipartite\n >>> c = bipartite.clustering(G) \n >>> c[0]\n 0.5\n >>> c = bipartite.clustering(G,mode='min') \n >>> c[0]\n 1.0\n\n See Also\n --------\n robins_alexander_clustering\n square_clustering\n average_clustering\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if not nx.algorithms.bipartite.is_bipartite(G):\n raise nx.NetworkXError('Graph is not bipartite')\n try:\n cc_func = modes[mode]\n except KeyError:\n raise nx.NetworkXError(\n 'Mode for bipartite clustering must be: dot, min or max')\n if nodes is None:\n nodes = G\n ccs = {}\n for v in nodes:\n cc = 0.0\n nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])\n for u in nbrs2:\n cc += cc_func(set(G[u]), set(G[v]))\n if cc > 0.0:\n cc /= len(nbrs2)\n ccs[v] = cc\n return ccs\n\n\n<mask token>\n\n\ndef average_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute the average bipartite clustering coefficient.\n\n A clustering coefficient for the whole graph is the average, \n\n .. math::\n\n C = \\\\frac{1}{n}\\\\sum_{v \\\\in G} c_v,\n\n where `n` is the number of nodes in `G`.\n\n Similar measures for the two bipartite sets can be defined [1]_\n\n .. math::\n\n C_X = \\\\frac{1}{|X|}\\\\sum_{v \\\\in X} c_v,\n\n where `X` is a bipartite set of `G`.\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n nodes : list or iterable, optional\n A container of nodes to use in computing the average. \n The nodes should be either the entire graph (the default) or one of the \n bipartite sets.\n\n mode : string\n The pariwise bipartite clustering method. \n It must be \"dot\", \"max\", or \"min\" \n\n Returns\n -------\n clustering : float\n The average bipartite clustering for the given set of nodes or the \n entire graph if no nodes are specified.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G=nx.star_graph(3) # star graphs are bipartite\n >>> bipartite.average_clustering(G) \n 0.75\n >>> X,Y=bipartite.sets(G)\n >>> bipartite.average_clustering(G,X) \n 0.0\n >>> bipartite.average_clustering(G,Y) \n 1.0\n\n See Also\n --------\n clustering\n\n Notes \n -----\n The container of nodes passed to this function must contain all of the nodes\n in one of the bipartite sets (\"top\" or \"bottom\") in order to compute \n the correct average bipartite clustering coefficients.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if nodes is None:\n nodes = G\n ccs = latapy_clustering(G, nodes=nodes, mode=mode)\n return float(sum(ccs[v] for v in nodes)) / len(nodes)\n\n\ndef robins_alexander_clustering(G):\n \"\"\"Compute the bipartite clustering of G.\n\n Robins and Alexander [1]_ defined bipartite clustering coefficient as\n four times the number of four cycles `C_4` divided by the number of\n three paths `L_3` in a bipartite graph:\n\n .. math::\n\n CC_4 = \\\\frac{4 * C_4}{L_3}\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n Returns\n -------\n clustering : float\n The Robins and Alexander bipartite clustering for the input graph.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.davis_southern_women_graph()\n >>> print(round(bipartite.robins_alexander_clustering(G), 3))\n 0.468\n\n See Also\n --------\n latapy_clustering\n square_clustering\n\n References\n ----------\n .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking \n directors: Network structure and distance in bipartite graphs. \n Computational & Mathematical Organization Theory 10(1), 69–94.\n\n \"\"\"\n if G.order() < 4 or G.size() < 3:\n return 0\n L_3 = _threepaths(G)\n if L_3 == 0:\n return 0\n C_4 = _four_cycles(G)\n return 4.0 * C_4 / L_3\n\n\ndef _four_cycles(G):\n cycles = 0\n for v in G:\n for u, w in itertools.combinations(G[v], 2):\n cycles += len((set(G[u]) & set(G[w])) - set([v]))\n return cycles / 4\n\n\ndef _threepaths(G):\n paths = 0\n for v in G:\n for u in G[v]:\n for w in (set(G[u]) - set([v])):\n paths += len(set(G[w]) - set([v, u]))\n return paths / 2\n", "step-3": "<mask token>\n__author__ = '\\n'.join(['Jordi Torrents <[email protected]>',\n 'Aric Hagberg ([email protected])'])\n__all__ = ['clustering', 'average_clustering', 'latapy_clustering',\n 'robins_alexander_clustering']\n\n\ndef cc_dot(nu, nv):\n return float(len(nu & nv)) / len(nu | nv)\n\n\ndef cc_max(nu, nv):\n return float(len(nu & nv)) / max(len(nu), len(nv))\n\n\ndef cc_min(nu, nv):\n return float(len(nu & nv)) / min(len(nu), len(nv))\n\n\nmodes = {'dot': cc_dot, 'min': cc_min, 'max': cc_max}\n\n\ndef latapy_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute a bipartite clustering coefficient for nodes.\n\n The bipartie clustering coefficient is a measure of local density\n of connections defined as [1]_:\n\n .. math::\n\n c_u = \\\\frac{\\\\sum_{v \\\\in N(N(u))} c_{uv} }{|N(N(u))|}\n\n where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, \n and `c_{uv}` is the pairwise clustering coefficient between nodes \n `u` and `v`.\n\n The mode selects the function for `c_{uv}` which can be:\n\n `dot`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{|N(u) \\\\cup N(v)|}\n\n `min`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{min(|N(u)|,|N(v)|)}\n\n `max`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{max(|N(u)|,|N(v)|)}\n\n\n Parameters\n ----------\n G : graph\n A bipartite graph\n\n nodes : list or iterable (optional)\n Compute bipartite clustering for these nodes. The default \n is all nodes in G.\n\n mode : string\n The pariwise bipartite clustering method to be used in the computation.\n It must be \"dot\", \"max\", or \"min\". \n\n Returns\n -------\n clustering : dictionary\n A dictionary keyed by node with the clustering coefficient value.\n\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.path_graph(4) # path graphs are bipartite\n >>> c = bipartite.clustering(G) \n >>> c[0]\n 0.5\n >>> c = bipartite.clustering(G,mode='min') \n >>> c[0]\n 1.0\n\n See Also\n --------\n robins_alexander_clustering\n square_clustering\n average_clustering\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if not nx.algorithms.bipartite.is_bipartite(G):\n raise nx.NetworkXError('Graph is not bipartite')\n try:\n cc_func = modes[mode]\n except KeyError:\n raise nx.NetworkXError(\n 'Mode for bipartite clustering must be: dot, min or max')\n if nodes is None:\n nodes = G\n ccs = {}\n for v in nodes:\n cc = 0.0\n nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])\n for u in nbrs2:\n cc += cc_func(set(G[u]), set(G[v]))\n if cc > 0.0:\n cc /= len(nbrs2)\n ccs[v] = cc\n return ccs\n\n\nclustering = latapy_clustering\n\n\ndef average_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute the average bipartite clustering coefficient.\n\n A clustering coefficient for the whole graph is the average, \n\n .. math::\n\n C = \\\\frac{1}{n}\\\\sum_{v \\\\in G} c_v,\n\n where `n` is the number of nodes in `G`.\n\n Similar measures for the two bipartite sets can be defined [1]_\n\n .. math::\n\n C_X = \\\\frac{1}{|X|}\\\\sum_{v \\\\in X} c_v,\n\n where `X` is a bipartite set of `G`.\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n nodes : list or iterable, optional\n A container of nodes to use in computing the average. \n The nodes should be either the entire graph (the default) or one of the \n bipartite sets.\n\n mode : string\n The pariwise bipartite clustering method. \n It must be \"dot\", \"max\", or \"min\" \n\n Returns\n -------\n clustering : float\n The average bipartite clustering for the given set of nodes or the \n entire graph if no nodes are specified.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G=nx.star_graph(3) # star graphs are bipartite\n >>> bipartite.average_clustering(G) \n 0.75\n >>> X,Y=bipartite.sets(G)\n >>> bipartite.average_clustering(G,X) \n 0.0\n >>> bipartite.average_clustering(G,Y) \n 1.0\n\n See Also\n --------\n clustering\n\n Notes \n -----\n The container of nodes passed to this function must contain all of the nodes\n in one of the bipartite sets (\"top\" or \"bottom\") in order to compute \n the correct average bipartite clustering coefficients.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if nodes is None:\n nodes = G\n ccs = latapy_clustering(G, nodes=nodes, mode=mode)\n return float(sum(ccs[v] for v in nodes)) / len(nodes)\n\n\ndef robins_alexander_clustering(G):\n \"\"\"Compute the bipartite clustering of G.\n\n Robins and Alexander [1]_ defined bipartite clustering coefficient as\n four times the number of four cycles `C_4` divided by the number of\n three paths `L_3` in a bipartite graph:\n\n .. math::\n\n CC_4 = \\\\frac{4 * C_4}{L_3}\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n Returns\n -------\n clustering : float\n The Robins and Alexander bipartite clustering for the input graph.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.davis_southern_women_graph()\n >>> print(round(bipartite.robins_alexander_clustering(G), 3))\n 0.468\n\n See Also\n --------\n latapy_clustering\n square_clustering\n\n References\n ----------\n .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking \n directors: Network structure and distance in bipartite graphs. \n Computational & Mathematical Organization Theory 10(1), 69–94.\n\n \"\"\"\n if G.order() < 4 or G.size() < 3:\n return 0\n L_3 = _threepaths(G)\n if L_3 == 0:\n return 0\n C_4 = _four_cycles(G)\n return 4.0 * C_4 / L_3\n\n\ndef _four_cycles(G):\n cycles = 0\n for v in G:\n for u, w in itertools.combinations(G[v], 2):\n cycles += len((set(G[u]) & set(G[w])) - set([v]))\n return cycles / 4\n\n\ndef _threepaths(G):\n paths = 0\n for v in G:\n for u in G[v]:\n for w in (set(G[u]) - set([v])):\n paths += len(set(G[w]) - set([v, u]))\n return paths / 2\n", "step-4": "import itertools\nimport networkx as nx\n__author__ = '\\n'.join(['Jordi Torrents <[email protected]>',\n 'Aric Hagberg ([email protected])'])\n__all__ = ['clustering', 'average_clustering', 'latapy_clustering',\n 'robins_alexander_clustering']\n\n\ndef cc_dot(nu, nv):\n return float(len(nu & nv)) / len(nu | nv)\n\n\ndef cc_max(nu, nv):\n return float(len(nu & nv)) / max(len(nu), len(nv))\n\n\ndef cc_min(nu, nv):\n return float(len(nu & nv)) / min(len(nu), len(nv))\n\n\nmodes = {'dot': cc_dot, 'min': cc_min, 'max': cc_max}\n\n\ndef latapy_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute a bipartite clustering coefficient for nodes.\n\n The bipartie clustering coefficient is a measure of local density\n of connections defined as [1]_:\n\n .. math::\n\n c_u = \\\\frac{\\\\sum_{v \\\\in N(N(u))} c_{uv} }{|N(N(u))|}\n\n where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, \n and `c_{uv}` is the pairwise clustering coefficient between nodes \n `u` and `v`.\n\n The mode selects the function for `c_{uv}` which can be:\n\n `dot`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{|N(u) \\\\cup N(v)|}\n\n `min`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{min(|N(u)|,|N(v)|)}\n\n `max`: \n\n .. math::\n\n c_{uv}=\\\\frac{|N(u)\\\\cap N(v)|}{max(|N(u)|,|N(v)|)}\n\n\n Parameters\n ----------\n G : graph\n A bipartite graph\n\n nodes : list or iterable (optional)\n Compute bipartite clustering for these nodes. The default \n is all nodes in G.\n\n mode : string\n The pariwise bipartite clustering method to be used in the computation.\n It must be \"dot\", \"max\", or \"min\". \n\n Returns\n -------\n clustering : dictionary\n A dictionary keyed by node with the clustering coefficient value.\n\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.path_graph(4) # path graphs are bipartite\n >>> c = bipartite.clustering(G) \n >>> c[0]\n 0.5\n >>> c = bipartite.clustering(G,mode='min') \n >>> c[0]\n 1.0\n\n See Also\n --------\n robins_alexander_clustering\n square_clustering\n average_clustering\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if not nx.algorithms.bipartite.is_bipartite(G):\n raise nx.NetworkXError('Graph is not bipartite')\n try:\n cc_func = modes[mode]\n except KeyError:\n raise nx.NetworkXError(\n 'Mode for bipartite clustering must be: dot, min or max')\n if nodes is None:\n nodes = G\n ccs = {}\n for v in nodes:\n cc = 0.0\n nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])\n for u in nbrs2:\n cc += cc_func(set(G[u]), set(G[v]))\n if cc > 0.0:\n cc /= len(nbrs2)\n ccs[v] = cc\n return ccs\n\n\nclustering = latapy_clustering\n\n\ndef average_clustering(G, nodes=None, mode='dot'):\n \"\"\"Compute the average bipartite clustering coefficient.\n\n A clustering coefficient for the whole graph is the average, \n\n .. math::\n\n C = \\\\frac{1}{n}\\\\sum_{v \\\\in G} c_v,\n\n where `n` is the number of nodes in `G`.\n\n Similar measures for the two bipartite sets can be defined [1]_\n\n .. math::\n\n C_X = \\\\frac{1}{|X|}\\\\sum_{v \\\\in X} c_v,\n\n where `X` is a bipartite set of `G`.\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n nodes : list or iterable, optional\n A container of nodes to use in computing the average. \n The nodes should be either the entire graph (the default) or one of the \n bipartite sets.\n\n mode : string\n The pariwise bipartite clustering method. \n It must be \"dot\", \"max\", or \"min\" \n\n Returns\n -------\n clustering : float\n The average bipartite clustering for the given set of nodes or the \n entire graph if no nodes are specified.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G=nx.star_graph(3) # star graphs are bipartite\n >>> bipartite.average_clustering(G) \n 0.75\n >>> X,Y=bipartite.sets(G)\n >>> bipartite.average_clustering(G,X) \n 0.0\n >>> bipartite.average_clustering(G,Y) \n 1.0\n\n See Also\n --------\n clustering\n\n Notes \n -----\n The container of nodes passed to this function must contain all of the nodes\n in one of the bipartite sets (\"top\" or \"bottom\") in order to compute \n the correct average bipartite clustering coefficients.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if nodes is None:\n nodes = G\n ccs = latapy_clustering(G, nodes=nodes, mode=mode)\n return float(sum(ccs[v] for v in nodes)) / len(nodes)\n\n\ndef robins_alexander_clustering(G):\n \"\"\"Compute the bipartite clustering of G.\n\n Robins and Alexander [1]_ defined bipartite clustering coefficient as\n four times the number of four cycles `C_4` divided by the number of\n three paths `L_3` in a bipartite graph:\n\n .. math::\n\n CC_4 = \\\\frac{4 * C_4}{L_3}\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n Returns\n -------\n clustering : float\n The Robins and Alexander bipartite clustering for the input graph.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.davis_southern_women_graph()\n >>> print(round(bipartite.robins_alexander_clustering(G), 3))\n 0.468\n\n See Also\n --------\n latapy_clustering\n square_clustering\n\n References\n ----------\n .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking \n directors: Network structure and distance in bipartite graphs. \n Computational & Mathematical Organization Theory 10(1), 69–94.\n\n \"\"\"\n if G.order() < 4 or G.size() < 3:\n return 0\n L_3 = _threepaths(G)\n if L_3 == 0:\n return 0\n C_4 = _four_cycles(G)\n return 4.0 * C_4 / L_3\n\n\ndef _four_cycles(G):\n cycles = 0\n for v in G:\n for u, w in itertools.combinations(G[v], 2):\n cycles += len((set(G[u]) & set(G[w])) - set([v]))\n return cycles / 4\n\n\ndef _threepaths(G):\n paths = 0\n for v in G:\n for u in G[v]:\n for w in (set(G[u]) - set([v])):\n paths += len(set(G[w]) - set([v, u]))\n return paths / 2\n", "step-5": "#-*- coding: utf-8 -*-\n# Copyright (C) 2011 by\n# Jordi Torrents <[email protected]>\n# Aric Hagberg <[email protected]>\n# All rights reserved.\n# BSD license.\nimport itertools\nimport networkx as nx\n__author__ = \"\"\"\\n\"\"\".join(['Jordi Torrents <[email protected]>',\n 'Aric Hagberg ([email protected])'])\n__all__ = ['clustering',\n 'average_clustering',\n 'latapy_clustering',\n 'robins_alexander_clustering']\n\n# functions for computing clustering of pairs\n\n\ndef cc_dot(nu, nv):\n return float(len(nu & nv)) / len(nu | nv)\n\n\ndef cc_max(nu, nv):\n return float(len(nu & nv)) / max(len(nu), len(nv))\n\n\ndef cc_min(nu, nv):\n return float(len(nu & nv)) / min(len(nu), len(nv))\n\n\nmodes = {'dot': cc_dot,\n 'min': cc_min,\n 'max': cc_max}\n\n\ndef latapy_clustering(G, nodes=None, mode='dot'):\n r\"\"\"Compute a bipartite clustering coefficient for nodes.\n\n The bipartie clustering coefficient is a measure of local density\n of connections defined as [1]_:\n\n .. math::\n\n c_u = \\frac{\\sum_{v \\in N(N(u))} c_{uv} }{|N(N(u))|}\n\n where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, \n and `c_{uv}` is the pairwise clustering coefficient between nodes \n `u` and `v`.\n\n The mode selects the function for `c_{uv}` which can be:\n\n `dot`: \n\n .. math::\n\n c_{uv}=\\frac{|N(u)\\cap N(v)|}{|N(u) \\cup N(v)|}\n\n `min`: \n\n .. math::\n\n c_{uv}=\\frac{|N(u)\\cap N(v)|}{min(|N(u)|,|N(v)|)}\n\n `max`: \n\n .. math::\n\n c_{uv}=\\frac{|N(u)\\cap N(v)|}{max(|N(u)|,|N(v)|)}\n\n\n Parameters\n ----------\n G : graph\n A bipartite graph\n\n nodes : list or iterable (optional)\n Compute bipartite clustering for these nodes. The default \n is all nodes in G.\n\n mode : string\n The pariwise bipartite clustering method to be used in the computation.\n It must be \"dot\", \"max\", or \"min\". \n\n Returns\n -------\n clustering : dictionary\n A dictionary keyed by node with the clustering coefficient value.\n\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.path_graph(4) # path graphs are bipartite\n >>> c = bipartite.clustering(G) \n >>> c[0]\n 0.5\n >>> c = bipartite.clustering(G,mode='min') \n >>> c[0]\n 1.0\n\n See Also\n --------\n robins_alexander_clustering\n square_clustering\n average_clustering\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if not nx.algorithms.bipartite.is_bipartite(G):\n raise nx.NetworkXError(\"Graph is not bipartite\")\n\n try:\n cc_func = modes[mode]\n except KeyError:\n raise nx.NetworkXError(\n \"Mode for bipartite clustering must be: dot, min or max\")\n\n if nodes is None:\n nodes = G\n ccs = {}\n for v in nodes:\n cc = 0.0\n nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])\n for u in nbrs2:\n cc += cc_func(set(G[u]), set(G[v]))\n if cc > 0.0: # len(nbrs2)>0\n cc /= len(nbrs2)\n ccs[v] = cc\n return ccs\n\n\nclustering = latapy_clustering\n\n\ndef average_clustering(G, nodes=None, mode='dot'):\n r\"\"\"Compute the average bipartite clustering coefficient.\n\n A clustering coefficient for the whole graph is the average, \n\n .. math::\n\n C = \\frac{1}{n}\\sum_{v \\in G} c_v,\n\n where `n` is the number of nodes in `G`.\n\n Similar measures for the two bipartite sets can be defined [1]_\n\n .. math::\n\n C_X = \\frac{1}{|X|}\\sum_{v \\in X} c_v,\n\n where `X` is a bipartite set of `G`.\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n nodes : list or iterable, optional\n A container of nodes to use in computing the average. \n The nodes should be either the entire graph (the default) or one of the \n bipartite sets.\n\n mode : string\n The pariwise bipartite clustering method. \n It must be \"dot\", \"max\", or \"min\" \n\n Returns\n -------\n clustering : float\n The average bipartite clustering for the given set of nodes or the \n entire graph if no nodes are specified.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G=nx.star_graph(3) # star graphs are bipartite\n >>> bipartite.average_clustering(G) \n 0.75\n >>> X,Y=bipartite.sets(G)\n >>> bipartite.average_clustering(G,X) \n 0.0\n >>> bipartite.average_clustering(G,Y) \n 1.0\n\n See Also\n --------\n clustering\n\n Notes \n -----\n The container of nodes passed to this function must contain all of the nodes\n in one of the bipartite sets (\"top\" or \"bottom\") in order to compute \n the correct average bipartite clustering coefficients.\n See :mod:`bipartite documentation <networkx.algorithms.bipartite>`\n for further details on how bipartite graphs are handled in NetworkX.\n\n\n References\n ----------\n .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).\n Basic notions for the analysis of large two-mode networks. \n Social Networks 30(1), 31--48.\n \"\"\"\n if nodes is None:\n nodes = G\n ccs = latapy_clustering(G, nodes=nodes, mode=mode)\n return float(sum(ccs[v] for v in nodes)) / len(nodes)\n\n\ndef robins_alexander_clustering(G):\n r\"\"\"Compute the bipartite clustering of G.\n\n Robins and Alexander [1]_ defined bipartite clustering coefficient as\n four times the number of four cycles `C_4` divided by the number of\n three paths `L_3` in a bipartite graph:\n\n .. math::\n\n CC_4 = \\frac{4 * C_4}{L_3}\n\n Parameters\n ----------\n G : graph\n a bipartite graph\n\n Returns\n -------\n clustering : float\n The Robins and Alexander bipartite clustering for the input graph.\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.davis_southern_women_graph()\n >>> print(round(bipartite.robins_alexander_clustering(G), 3))\n 0.468\n\n See Also\n --------\n latapy_clustering\n square_clustering\n\n References\n ----------\n .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking \n directors: Network structure and distance in bipartite graphs. \n Computational & Mathematical Organization Theory 10(1), 69–94.\n\n \"\"\"\n if G.order() < 4 or G.size() < 3:\n return 0\n L_3 = _threepaths(G)\n if L_3 == 0:\n return 0\n C_4 = _four_cycles(G)\n return (4. * C_4) / L_3\n\n\ndef _four_cycles(G):\n cycles = 0\n for v in G:\n for u, w in itertools.combinations(G[v], 2):\n cycles += len((set(G[u]) & set(G[w])) - set([v]))\n return cycles / 4\n\n\ndef _threepaths(G):\n paths = 0\n for v in G:\n for u in G[v]:\n for w in set(G[u]) - set([v]):\n paths += len(set(G[w]) - set([v, u]))\n # Divide by two because we count each three path twice\n # one for each possible starting point\n return paths / 2\n", "step-ids": [ 4, 8, 9, 10, 11 ] }
[ 4, 8, 9, 10, 11 ]
import os.path as osp from evaluations.common import tiou from evaluations.util import load_file import generate_track_link def eval_ground_scores(gt_relations, pred_relations, tiou_threshold): """ :param gt_relations: :param pred_relations: :param tiou_threshold: :return: """ # pred_relations = sorted(pred_relations, key=lambda x: x['score'], reverse=True) relation_num = len(gt_relations) predict, predict_sub, predict_obj = 0, 0, 0 for relation, pred_trajs in pred_relations.items(): pred_sub = pred_trajs['sub'] pred_obj = pred_trajs['obj'] flag, flag_s, flag_o = False, False, False gt_trajs = gt_relations[relation] # print(relation) for gt_traj in gt_trajs: gt_sub = gt_traj['sub'] gt_obj = gt_traj['obj'] s_tiou = tiou(pred_sub, gt_sub) o_tiou = tiou(pred_obj, gt_obj) r_iou = min(s_tiou, o_tiou) if r_iou >= tiou_threshold: flag = True if s_tiou >= tiou_threshold: flag_s = True if o_tiou >= tiou_threshold: flag_o = True if flag: predict += 1 if flag_s: predict_sub += 1 if flag_o: predict_obj += 1 predict = predict / relation_num predict_sub = predict_sub /relation_num predict_obj = predict_obj /relation_num return predict, predict_sub, predict_obj, relation_num def evaluate(groundtruth, prediction, tiou_threshold=0.5): """ evaluate visual relation detection and visual relation tagging. """ video_num = len(groundtruth) print('Computing grounding accuracy over {} videos...'.format(video_num)) acc, acc_sub, acc_obj = 0.0, 0.0, 0.0 gt_rnum = 0 for qid, relation_gt in groundtruth.items(): if qid not in prediction: continue relation_pred = prediction[qid] if len(relation_pred) == 0: continue video_acc, video_acc_sub, video_acc_obj, relation_num = eval_ground_scores(relation_gt, relation_pred, tiou_threshold) acc += video_acc acc_sub += video_acc_sub acc_obj += video_acc_obj gt_rnum += relation_num acc /= video_num acc_sub /= video_num acc_obj /= video_num print("Acc_S\t Acc_O\t Acc_R") print('{:.2f}\t {:.2f}\t {:.2f}'.format(acc_sub*100, acc_obj*100, acc*100)) def main(): groundtruth_dir = 'dataset/vidvrd/' gt_file = osp.join(groundtruth_dir, 'gt_relation_frame.json') result_dir = 'results/' res_file = osp.join(result_dir, 'test_viterbi_1gap_04_batch.json') if not osp.exists(res_file): print('Generating ...') generate_track_link.main(res_file) grountruth = load_file(gt_file) prediction = load_file(res_file) evaluate(grountruth, prediction) if __name__ == "__main__": main()
normal
{ "blob_id": "f26e6164fc4c07fd3339171e316b3a1f7a4be669", "index": 2447, "step-1": "<mask token>\n\n\ndef eval_ground_scores(gt_relations, pred_relations, tiou_threshold):\n \"\"\"\n\n :param gt_relations:\n :param pred_relations:\n :param tiou_threshold:\n :return:\n \"\"\"\n relation_num = len(gt_relations)\n predict, predict_sub, predict_obj = 0, 0, 0\n for relation, pred_trajs in pred_relations.items():\n pred_sub = pred_trajs['sub']\n pred_obj = pred_trajs['obj']\n flag, flag_s, flag_o = False, False, False\n gt_trajs = gt_relations[relation]\n for gt_traj in gt_trajs:\n gt_sub = gt_traj['sub']\n gt_obj = gt_traj['obj']\n s_tiou = tiou(pred_sub, gt_sub)\n o_tiou = tiou(pred_obj, gt_obj)\n r_iou = min(s_tiou, o_tiou)\n if r_iou >= tiou_threshold:\n flag = True\n if s_tiou >= tiou_threshold:\n flag_s = True\n if o_tiou >= tiou_threshold:\n flag_o = True\n if flag:\n predict += 1\n if flag_s:\n predict_sub += 1\n if flag_o:\n predict_obj += 1\n predict = predict / relation_num\n predict_sub = predict_sub / relation_num\n predict_obj = predict_obj / relation_num\n return predict, predict_sub, predict_obj, relation_num\n\n\ndef evaluate(groundtruth, prediction, tiou_threshold=0.5):\n \"\"\" evaluate visual relation detection and visual \n relation tagging.\n \"\"\"\n video_num = len(groundtruth)\n print('Computing grounding accuracy over {} videos...'.format(video_num))\n acc, acc_sub, acc_obj = 0.0, 0.0, 0.0\n gt_rnum = 0\n for qid, relation_gt in groundtruth.items():\n if qid not in prediction:\n continue\n relation_pred = prediction[qid]\n if len(relation_pred) == 0:\n continue\n video_acc, video_acc_sub, video_acc_obj, relation_num = (\n eval_ground_scores(relation_gt, relation_pred, tiou_threshold))\n acc += video_acc\n acc_sub += video_acc_sub\n acc_obj += video_acc_obj\n gt_rnum += relation_num\n acc /= video_num\n acc_sub /= video_num\n acc_obj /= video_num\n print('Acc_S\\t Acc_O\\t Acc_R')\n print('{:.2f}\\t {:.2f}\\t {:.2f}'.format(acc_sub * 100, acc_obj * 100, \n acc * 100))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef eval_ground_scores(gt_relations, pred_relations, tiou_threshold):\n \"\"\"\n\n :param gt_relations:\n :param pred_relations:\n :param tiou_threshold:\n :return:\n \"\"\"\n relation_num = len(gt_relations)\n predict, predict_sub, predict_obj = 0, 0, 0\n for relation, pred_trajs in pred_relations.items():\n pred_sub = pred_trajs['sub']\n pred_obj = pred_trajs['obj']\n flag, flag_s, flag_o = False, False, False\n gt_trajs = gt_relations[relation]\n for gt_traj in gt_trajs:\n gt_sub = gt_traj['sub']\n gt_obj = gt_traj['obj']\n s_tiou = tiou(pred_sub, gt_sub)\n o_tiou = tiou(pred_obj, gt_obj)\n r_iou = min(s_tiou, o_tiou)\n if r_iou >= tiou_threshold:\n flag = True\n if s_tiou >= tiou_threshold:\n flag_s = True\n if o_tiou >= tiou_threshold:\n flag_o = True\n if flag:\n predict += 1\n if flag_s:\n predict_sub += 1\n if flag_o:\n predict_obj += 1\n predict = predict / relation_num\n predict_sub = predict_sub / relation_num\n predict_obj = predict_obj / relation_num\n return predict, predict_sub, predict_obj, relation_num\n\n\ndef evaluate(groundtruth, prediction, tiou_threshold=0.5):\n \"\"\" evaluate visual relation detection and visual \n relation tagging.\n \"\"\"\n video_num = len(groundtruth)\n print('Computing grounding accuracy over {} videos...'.format(video_num))\n acc, acc_sub, acc_obj = 0.0, 0.0, 0.0\n gt_rnum = 0\n for qid, relation_gt in groundtruth.items():\n if qid not in prediction:\n continue\n relation_pred = prediction[qid]\n if len(relation_pred) == 0:\n continue\n video_acc, video_acc_sub, video_acc_obj, relation_num = (\n eval_ground_scores(relation_gt, relation_pred, tiou_threshold))\n acc += video_acc\n acc_sub += video_acc_sub\n acc_obj += video_acc_obj\n gt_rnum += relation_num\n acc /= video_num\n acc_sub /= video_num\n acc_obj /= video_num\n print('Acc_S\\t Acc_O\\t Acc_R')\n print('{:.2f}\\t {:.2f}\\t {:.2f}'.format(acc_sub * 100, acc_obj * 100, \n acc * 100))\n\n\ndef main():\n groundtruth_dir = 'dataset/vidvrd/'\n gt_file = osp.join(groundtruth_dir, 'gt_relation_frame.json')\n result_dir = 'results/'\n res_file = osp.join(result_dir, 'test_viterbi_1gap_04_batch.json')\n if not osp.exists(res_file):\n print('Generating ...')\n generate_track_link.main(res_file)\n grountruth = load_file(gt_file)\n prediction = load_file(res_file)\n evaluate(grountruth, prediction)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef eval_ground_scores(gt_relations, pred_relations, tiou_threshold):\n \"\"\"\n\n :param gt_relations:\n :param pred_relations:\n :param tiou_threshold:\n :return:\n \"\"\"\n relation_num = len(gt_relations)\n predict, predict_sub, predict_obj = 0, 0, 0\n for relation, pred_trajs in pred_relations.items():\n pred_sub = pred_trajs['sub']\n pred_obj = pred_trajs['obj']\n flag, flag_s, flag_o = False, False, False\n gt_trajs = gt_relations[relation]\n for gt_traj in gt_trajs:\n gt_sub = gt_traj['sub']\n gt_obj = gt_traj['obj']\n s_tiou = tiou(pred_sub, gt_sub)\n o_tiou = tiou(pred_obj, gt_obj)\n r_iou = min(s_tiou, o_tiou)\n if r_iou >= tiou_threshold:\n flag = True\n if s_tiou >= tiou_threshold:\n flag_s = True\n if o_tiou >= tiou_threshold:\n flag_o = True\n if flag:\n predict += 1\n if flag_s:\n predict_sub += 1\n if flag_o:\n predict_obj += 1\n predict = predict / relation_num\n predict_sub = predict_sub / relation_num\n predict_obj = predict_obj / relation_num\n return predict, predict_sub, predict_obj, relation_num\n\n\ndef evaluate(groundtruth, prediction, tiou_threshold=0.5):\n \"\"\" evaluate visual relation detection and visual \n relation tagging.\n \"\"\"\n video_num = len(groundtruth)\n print('Computing grounding accuracy over {} videos...'.format(video_num))\n acc, acc_sub, acc_obj = 0.0, 0.0, 0.0\n gt_rnum = 0\n for qid, relation_gt in groundtruth.items():\n if qid not in prediction:\n continue\n relation_pred = prediction[qid]\n if len(relation_pred) == 0:\n continue\n video_acc, video_acc_sub, video_acc_obj, relation_num = (\n eval_ground_scores(relation_gt, relation_pred, tiou_threshold))\n acc += video_acc\n acc_sub += video_acc_sub\n acc_obj += video_acc_obj\n gt_rnum += relation_num\n acc /= video_num\n acc_sub /= video_num\n acc_obj /= video_num\n print('Acc_S\\t Acc_O\\t Acc_R')\n print('{:.2f}\\t {:.2f}\\t {:.2f}'.format(acc_sub * 100, acc_obj * 100, \n acc * 100))\n\n\ndef main():\n groundtruth_dir = 'dataset/vidvrd/'\n gt_file = osp.join(groundtruth_dir, 'gt_relation_frame.json')\n result_dir = 'results/'\n res_file = osp.join(result_dir, 'test_viterbi_1gap_04_batch.json')\n if not osp.exists(res_file):\n print('Generating ...')\n generate_track_link.main(res_file)\n grountruth = load_file(gt_file)\n prediction = load_file(res_file)\n evaluate(grountruth, prediction)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import os.path as osp\nfrom evaluations.common import tiou\nfrom evaluations.util import load_file\nimport generate_track_link\n\n\ndef eval_ground_scores(gt_relations, pred_relations, tiou_threshold):\n \"\"\"\n\n :param gt_relations:\n :param pred_relations:\n :param tiou_threshold:\n :return:\n \"\"\"\n relation_num = len(gt_relations)\n predict, predict_sub, predict_obj = 0, 0, 0\n for relation, pred_trajs in pred_relations.items():\n pred_sub = pred_trajs['sub']\n pred_obj = pred_trajs['obj']\n flag, flag_s, flag_o = False, False, False\n gt_trajs = gt_relations[relation]\n for gt_traj in gt_trajs:\n gt_sub = gt_traj['sub']\n gt_obj = gt_traj['obj']\n s_tiou = tiou(pred_sub, gt_sub)\n o_tiou = tiou(pred_obj, gt_obj)\n r_iou = min(s_tiou, o_tiou)\n if r_iou >= tiou_threshold:\n flag = True\n if s_tiou >= tiou_threshold:\n flag_s = True\n if o_tiou >= tiou_threshold:\n flag_o = True\n if flag:\n predict += 1\n if flag_s:\n predict_sub += 1\n if flag_o:\n predict_obj += 1\n predict = predict / relation_num\n predict_sub = predict_sub / relation_num\n predict_obj = predict_obj / relation_num\n return predict, predict_sub, predict_obj, relation_num\n\n\ndef evaluate(groundtruth, prediction, tiou_threshold=0.5):\n \"\"\" evaluate visual relation detection and visual \n relation tagging.\n \"\"\"\n video_num = len(groundtruth)\n print('Computing grounding accuracy over {} videos...'.format(video_num))\n acc, acc_sub, acc_obj = 0.0, 0.0, 0.0\n gt_rnum = 0\n for qid, relation_gt in groundtruth.items():\n if qid not in prediction:\n continue\n relation_pred = prediction[qid]\n if len(relation_pred) == 0:\n continue\n video_acc, video_acc_sub, video_acc_obj, relation_num = (\n eval_ground_scores(relation_gt, relation_pred, tiou_threshold))\n acc += video_acc\n acc_sub += video_acc_sub\n acc_obj += video_acc_obj\n gt_rnum += relation_num\n acc /= video_num\n acc_sub /= video_num\n acc_obj /= video_num\n print('Acc_S\\t Acc_O\\t Acc_R')\n print('{:.2f}\\t {:.2f}\\t {:.2f}'.format(acc_sub * 100, acc_obj * 100, \n acc * 100))\n\n\ndef main():\n groundtruth_dir = 'dataset/vidvrd/'\n gt_file = osp.join(groundtruth_dir, 'gt_relation_frame.json')\n result_dir = 'results/'\n res_file = osp.join(result_dir, 'test_viterbi_1gap_04_batch.json')\n if not osp.exists(res_file):\n print('Generating ...')\n generate_track_link.main(res_file)\n grountruth = load_file(gt_file)\n prediction = load_file(res_file)\n evaluate(grountruth, prediction)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import os.path as osp\nfrom evaluations.common import tiou\nfrom evaluations.util import load_file\nimport generate_track_link\n\ndef eval_ground_scores(gt_relations, pred_relations, tiou_threshold):\n \"\"\"\n\n :param gt_relations:\n :param pred_relations:\n :param tiou_threshold:\n :return:\n \"\"\"\n # pred_relations = sorted(pred_relations, key=lambda x: x['score'], reverse=True)\n\n relation_num = len(gt_relations)\n predict, predict_sub, predict_obj = 0, 0, 0\n\n for relation, pred_trajs in pred_relations.items():\n pred_sub = pred_trajs['sub']\n pred_obj = pred_trajs['obj']\n flag, flag_s, flag_o = False, False, False\n\n gt_trajs = gt_relations[relation]\n\n # print(relation)\n\n for gt_traj in gt_trajs:\n gt_sub = gt_traj['sub']\n gt_obj = gt_traj['obj']\n s_tiou = tiou(pred_sub, gt_sub)\n o_tiou = tiou(pred_obj, gt_obj)\n r_iou = min(s_tiou, o_tiou)\n\n if r_iou >= tiou_threshold:\n flag = True\n if s_tiou >= tiou_threshold:\n flag_s = True\n if o_tiou >= tiou_threshold:\n flag_o = True\n if flag:\n predict += 1\n if flag_s:\n predict_sub += 1\n if flag_o:\n predict_obj += 1\n\n predict = predict / relation_num\n predict_sub = predict_sub /relation_num\n predict_obj = predict_obj /relation_num\n\n return predict, predict_sub, predict_obj, relation_num\n\n\ndef evaluate(groundtruth, prediction, tiou_threshold=0.5):\n \"\"\" evaluate visual relation detection and visual \n relation tagging.\n \"\"\"\n\n video_num = len(groundtruth)\n print('Computing grounding accuracy over {} videos...'.format(video_num))\n acc, acc_sub, acc_obj = 0.0, 0.0, 0.0\n\n gt_rnum = 0\n for qid, relation_gt in groundtruth.items():\n\n if qid not in prediction:\n continue\n relation_pred = prediction[qid]\n if len(relation_pred) == 0:\n continue\n\n video_acc, video_acc_sub, video_acc_obj, relation_num = eval_ground_scores(relation_gt, relation_pred, tiou_threshold)\n\n acc += video_acc\n acc_sub += video_acc_sub\n acc_obj += video_acc_obj\n gt_rnum += relation_num\n\n\n acc /= video_num\n acc_sub /= video_num\n acc_obj /= video_num\n\n print(\"Acc_S\\t Acc_O\\t Acc_R\")\n\n print('{:.2f}\\t {:.2f}\\t {:.2f}'.format(acc_sub*100, acc_obj*100, acc*100))\n\n\ndef main():\n\n groundtruth_dir = 'dataset/vidvrd/'\n gt_file = osp.join(groundtruth_dir, 'gt_relation_frame.json')\n\n result_dir = 'results/'\n res_file = osp.join(result_dir, 'test_viterbi_1gap_04_batch.json')\n if not osp.exists(res_file):\n print('Generating ...')\n generate_track_link.main(res_file)\n\n grountruth = load_file(gt_file)\n prediction = load_file(res_file)\n\n evaluate(grountruth, prediction)\n\n\nif __name__ == \"__main__\":\n main()\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# Generated by Django 2.2.4 on 2019-08-19 19:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('application', '0003_auto_20190818_1623'), ] operations = [ migrations.AlterField( model_name='user', name='visited', field=models.ManyToManyField(related_name='visitors', to='application.EscapeRoom'), ), ]
normal
{ "blob_id": "913e1f5a0af436ef081ab567c44b4149299d0ec6", "index": 3154, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('application', '0003_auto_20190818_1623')]\n operations = [migrations.AlterField(model_name='user', name='visited',\n field=models.ManyToManyField(related_name='visitors', to=\n 'application.EscapeRoom'))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('application', '0003_auto_20190818_1623')]\n operations = [migrations.AlterField(model_name='user', name='visited',\n field=models.ManyToManyField(related_name='visitors', to=\n 'application.EscapeRoom'))]\n", "step-5": "# Generated by Django 2.2.4 on 2019-08-19 19:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0003_auto_20190818_1623'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='visited',\n field=models.ManyToManyField(related_name='visitors', to='application.EscapeRoom'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os import sys from subprocess import check_output from charmhelpers.fetch import ( apt_install, apt_update, add_source, ) from charmhelpers.core.templating import render from charmhelpers.contrib.database.mysql import MySQLHelper def install_mysql(package='mysql-server', sources=None, keys=None): if not sources: sources = [] if not keys: keys = [] from subprocess import ( Popen, PIPE, ) for source in sources: add_source(source) if sources: apt_update() with open('/var/lib/mysql/mysql.passwd', 'r') as rpw: root_pass = rpw.read() dconf = Popen(['debconf-set-selections'], stdin=PIPE) dconf.stdin.write("%s %s/root_password password %s\n" % (package, package, root_pass)) dconf.stdin.write("%s %s/root_password_again password %s\n" % (package, package, root_pass)) dconf.communicate() dconf.wait() apt_install(package) def build_mycnf(cfg): i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave') # REFACTOR add to charm helpers unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1] if i_am_a_slave and cfg.get('tuning-level') != 'fast': # On slaves, this gets overwritten render( source='mysql/binlog.cnf', target='/etc/mysql/conf.d/binlog.cnf', context={ 'unit_id': unit_id, 'format': cfg.get('binlog-format', 'MIXED') }, ) render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg) def human_to_bytes(human): if human.isdigit(): return human factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776} modifier = human[-1] if modifier.lower() in factors: return int(human[:-1]) * factors[modifier.lower()] raise ValueError("Can only convert K, M, G, and T") def dataset_size(size, page): if not size.endswith('%'): return human_to_bytes(size) total_mem = human_to_bytes(get_memtotal()) sys_mem_limit = mem_limit() if is_32bits() and total_mem > sys_mem_limit: total_ram = sys_mem_limit factor = int(size[:-1]) * 0.01 pctram = sys_mem_limit * factor return int(pctram - (pctram % page)) def is_32bits(): try: IS_32BIT_SYSTEM = sys.maxsize < 2**32. except OverflowError: IS_32BIT_SYSTEM = True return IS_32BIT_SYSTEM def mem_limit(): import platform SYS_MEM_LIMIT = human_to_bytes(get_memtotal()) if platform.machine() in ['armv7l']: SYS_MEM_LIMIT = human_to_bytes('2700M') # experimentally determined elif is_32bits(): SYS_MEM_LIMIT = human_to_bytes('4G') return SYS_MEM_LIMIT def get_memtotal(): with open('/proc/meminfo') as meminfo_file: for line in meminfo_file: (key, mem) = line.split(':', 2) if key == 'MemTotal': (mtot, modifier) = mem.strip().split(' ') return '%s%s' % (mtot, modifier[0].upper()) def get_db_helper(): return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd', upasswdf_template='/var/lib/mysql/mysql-{}.passwd', delete_ondisk_passwd_file=False) # REFACTOR factory/cache def get_db_cursor(): import MySQLdb # Connect to mysql db_helper = get_db_helper() passwd = db_helper.get_mysql_root_password() connection = MySQLdb.connect(user="root", host="localhost", passwd=passwd) return connection.cursor() def create_database(name): # REFACTOR UTF-8 # Clean databasename cursor = get_db_cursor() cursor.execute("show databases like '%s'" % name) if cursor.fetchall(): return name cursor.execute("create database `%s` character set utf8" % name) cursor.close() return name def create_user(): # REFACTOR pwgen python module? maybe? yeah? (user, password) = check_output(['pwgen', '-N 2', '15']).split('\n')[:-1] cursor = get_db_cursor() grant_sql = "grant replication client on *.* to `%s` identified by '%s'" cursor.execute(grant_sql % (user, password)) cursor.close() return (user, password) def grant_database(database, user, password): cursor = get_db_cursor() cursor.execute( "grant all on `%s`.* to `%s` identified by '%s'" % (database, user, password)) cursor.close() # #relation_id = os.environ.get('JUJU_RELATION_ID') #change_unit = os.environ.get('JUJU_REMOTE_UNIT') # ## We'll name the database the same as the service. #database_name_file = '.%s_database_name' % (relation_id) ## change_unit will be None on broken hooks #database_name = '' #if change_unit: # database_name, _ = change_unit.split("/") # with open(database_name_file, 'w') as dbnf: # dbnf.write("%s\n" % database_name) # dbnf.flush() #elif os.path.exists(database_name_file): # with open(database_name_file, 'r') as dbname: # database_name = dbname.readline().strip() #else: # print 'No established database and no REMOTE_UNIT.' ## A user per service unit so we can deny access quickly #lastrun_path = '/var/lib/juju/%s.%s.lastrun' % (database_name, user) #slave_configured_path = '/var/lib/juju.slave.configured.for.%s' % database_name #slave_configured = os.path.exists(slave_configured_path) #slave = os.path.exists('/var/lib/juju/i.am.a.slave') #broken_path = '/var/lib/juju/%s.mysql.broken' % database_name #broken = os.path.exists(broken_path) # # # # #def migrate_to_mount(new_path): # """Invoked when new mountpoint appears. This function safely migrates # MySQL data from local disk to persistent storage (only if needed) # """ # old_path = '/var/lib/mysql' # if os.path.islink(old_path): # hookenv.log('{} is already a symlink, skipping migration'.format( # old_path)) # return True # # Ensure our new mountpoint is empty. Otherwise error and allow # # users to investigate and migrate manually # files = os.listdir(new_path) # try: # files.remove('lost+found') # except ValueError: # pass # if files: # raise RuntimeError('Persistent storage contains old data. ' # 'Please investigate and migrate data manually ' # 'to: {}'.format(new_path)) # os.chmod(new_path, 0o700) # if os.path.isdir('/etc/apparmor.d/local'): # render('apparmor.j2', '/etc/apparmor.d/local/usr.sbin.mysqld', # context={'path': os.path.join(new_path, '')}) # host.service_reload('apparmor') # host.service_stop('mysql') # host.rsync(os.path.join(old_path, ''), # Ensure we have trailing slashes # os.path.join(new_path, ''), # options=['--archive']) # shutil.rmtree(old_path) # os.symlink(new_path, old_path) # host.service_start('mysql')
normal
{ "blob_id": "083a9555f8db586fbb065d59e4e333bb16ee3d2a", "index": 5521, "step-1": "<mask token>\n\n\ndef install_mysql(package='mysql-server', sources=None, keys=None):\n if not sources:\n sources = []\n if not keys:\n keys = []\n from subprocess import Popen, PIPE\n for source in sources:\n add_source(source)\n if sources:\n apt_update()\n with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:\n root_pass = rpw.read()\n dconf = Popen(['debconf-set-selections'], stdin=PIPE)\n dconf.stdin.write('%s %s/root_password password %s\\n' % (package,\n package, root_pass))\n dconf.stdin.write('%s %s/root_password_again password %s\\n' % (package,\n package, root_pass))\n dconf.communicate()\n dconf.wait()\n apt_install(package)\n\n\ndef build_mycnf(cfg):\n i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')\n unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]\n if i_am_a_slave and cfg.get('tuning-level') != 'fast':\n render(source='mysql/binlog.cnf', target=\n '/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,\n 'format': cfg.get('binlog-format', 'MIXED')})\n render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)\n\n\ndef human_to_bytes(human):\n if human.isdigit():\n return human\n factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}\n modifier = human[-1]\n if modifier.lower() in factors:\n return int(human[:-1]) * factors[modifier.lower()]\n raise ValueError('Can only convert K, M, G, and T')\n\n\ndef dataset_size(size, page):\n if not size.endswith('%'):\n return human_to_bytes(size)\n total_mem = human_to_bytes(get_memtotal())\n sys_mem_limit = mem_limit()\n if is_32bits() and total_mem > sys_mem_limit:\n total_ram = sys_mem_limit\n factor = int(size[:-1]) * 0.01\n pctram = sys_mem_limit * factor\n return int(pctram - pctram % page)\n\n\ndef is_32bits():\n try:\n IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0\n except OverflowError:\n IS_32BIT_SYSTEM = True\n return IS_32BIT_SYSTEM\n\n\n<mask token>\n\n\ndef get_memtotal():\n with open('/proc/meminfo') as meminfo_file:\n for line in meminfo_file:\n key, mem = line.split(':', 2)\n if key == 'MemTotal':\n mtot, modifier = mem.strip().split(' ')\n return '%s%s' % (mtot, modifier[0].upper())\n\n\ndef get_db_helper():\n return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',\n upasswdf_template='/var/lib/mysql/mysql-{}.passwd',\n delete_ondisk_passwd_file=False)\n\n\ndef get_db_cursor():\n import MySQLdb\n db_helper = get_db_helper()\n passwd = db_helper.get_mysql_root_password()\n connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)\n return connection.cursor()\n\n\n<mask token>\n\n\ndef grant_database(database, user, password):\n cursor = get_db_cursor()\n cursor.execute(\"grant all on `%s`.* to `%s` identified by '%s'\" % (\n database, user, password))\n cursor.close()\n", "step-2": "<mask token>\n\n\ndef install_mysql(package='mysql-server', sources=None, keys=None):\n if not sources:\n sources = []\n if not keys:\n keys = []\n from subprocess import Popen, PIPE\n for source in sources:\n add_source(source)\n if sources:\n apt_update()\n with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:\n root_pass = rpw.read()\n dconf = Popen(['debconf-set-selections'], stdin=PIPE)\n dconf.stdin.write('%s %s/root_password password %s\\n' % (package,\n package, root_pass))\n dconf.stdin.write('%s %s/root_password_again password %s\\n' % (package,\n package, root_pass))\n dconf.communicate()\n dconf.wait()\n apt_install(package)\n\n\ndef build_mycnf(cfg):\n i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')\n unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]\n if i_am_a_slave and cfg.get('tuning-level') != 'fast':\n render(source='mysql/binlog.cnf', target=\n '/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,\n 'format': cfg.get('binlog-format', 'MIXED')})\n render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)\n\n\ndef human_to_bytes(human):\n if human.isdigit():\n return human\n factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}\n modifier = human[-1]\n if modifier.lower() in factors:\n return int(human[:-1]) * factors[modifier.lower()]\n raise ValueError('Can only convert K, M, G, and T')\n\n\ndef dataset_size(size, page):\n if not size.endswith('%'):\n return human_to_bytes(size)\n total_mem = human_to_bytes(get_memtotal())\n sys_mem_limit = mem_limit()\n if is_32bits() and total_mem > sys_mem_limit:\n total_ram = sys_mem_limit\n factor = int(size[:-1]) * 0.01\n pctram = sys_mem_limit * factor\n return int(pctram - pctram % page)\n\n\ndef is_32bits():\n try:\n IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0\n except OverflowError:\n IS_32BIT_SYSTEM = True\n return IS_32BIT_SYSTEM\n\n\n<mask token>\n\n\ndef get_memtotal():\n with open('/proc/meminfo') as meminfo_file:\n for line in meminfo_file:\n key, mem = line.split(':', 2)\n if key == 'MemTotal':\n mtot, modifier = mem.strip().split(' ')\n return '%s%s' % (mtot, modifier[0].upper())\n\n\ndef get_db_helper():\n return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',\n upasswdf_template='/var/lib/mysql/mysql-{}.passwd',\n delete_ondisk_passwd_file=False)\n\n\ndef get_db_cursor():\n import MySQLdb\n db_helper = get_db_helper()\n passwd = db_helper.get_mysql_root_password()\n connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)\n return connection.cursor()\n\n\ndef create_database(name):\n cursor = get_db_cursor()\n cursor.execute(\"show databases like '%s'\" % name)\n if cursor.fetchall():\n return name\n cursor.execute('create database `%s` character set utf8' % name)\n cursor.close()\n return name\n\n\n<mask token>\n\n\ndef grant_database(database, user, password):\n cursor = get_db_cursor()\n cursor.execute(\"grant all on `%s`.* to `%s` identified by '%s'\" % (\n database, user, password))\n cursor.close()\n", "step-3": "<mask token>\n\n\ndef install_mysql(package='mysql-server', sources=None, keys=None):\n if not sources:\n sources = []\n if not keys:\n keys = []\n from subprocess import Popen, PIPE\n for source in sources:\n add_source(source)\n if sources:\n apt_update()\n with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:\n root_pass = rpw.read()\n dconf = Popen(['debconf-set-selections'], stdin=PIPE)\n dconf.stdin.write('%s %s/root_password password %s\\n' % (package,\n package, root_pass))\n dconf.stdin.write('%s %s/root_password_again password %s\\n' % (package,\n package, root_pass))\n dconf.communicate()\n dconf.wait()\n apt_install(package)\n\n\ndef build_mycnf(cfg):\n i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')\n unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]\n if i_am_a_slave and cfg.get('tuning-level') != 'fast':\n render(source='mysql/binlog.cnf', target=\n '/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,\n 'format': cfg.get('binlog-format', 'MIXED')})\n render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)\n\n\ndef human_to_bytes(human):\n if human.isdigit():\n return human\n factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}\n modifier = human[-1]\n if modifier.lower() in factors:\n return int(human[:-1]) * factors[modifier.lower()]\n raise ValueError('Can only convert K, M, G, and T')\n\n\ndef dataset_size(size, page):\n if not size.endswith('%'):\n return human_to_bytes(size)\n total_mem = human_to_bytes(get_memtotal())\n sys_mem_limit = mem_limit()\n if is_32bits() and total_mem > sys_mem_limit:\n total_ram = sys_mem_limit\n factor = int(size[:-1]) * 0.01\n pctram = sys_mem_limit * factor\n return int(pctram - pctram % page)\n\n\ndef is_32bits():\n try:\n IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0\n except OverflowError:\n IS_32BIT_SYSTEM = True\n return IS_32BIT_SYSTEM\n\n\ndef mem_limit():\n import platform\n SYS_MEM_LIMIT = human_to_bytes(get_memtotal())\n if platform.machine() in ['armv7l']:\n SYS_MEM_LIMIT = human_to_bytes('2700M')\n elif is_32bits():\n SYS_MEM_LIMIT = human_to_bytes('4G')\n return SYS_MEM_LIMIT\n\n\ndef get_memtotal():\n with open('/proc/meminfo') as meminfo_file:\n for line in meminfo_file:\n key, mem = line.split(':', 2)\n if key == 'MemTotal':\n mtot, modifier = mem.strip().split(' ')\n return '%s%s' % (mtot, modifier[0].upper())\n\n\ndef get_db_helper():\n return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',\n upasswdf_template='/var/lib/mysql/mysql-{}.passwd',\n delete_ondisk_passwd_file=False)\n\n\ndef get_db_cursor():\n import MySQLdb\n db_helper = get_db_helper()\n passwd = db_helper.get_mysql_root_password()\n connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)\n return connection.cursor()\n\n\ndef create_database(name):\n cursor = get_db_cursor()\n cursor.execute(\"show databases like '%s'\" % name)\n if cursor.fetchall():\n return name\n cursor.execute('create database `%s` character set utf8' % name)\n cursor.close()\n return name\n\n\n<mask token>\n\n\ndef grant_database(database, user, password):\n cursor = get_db_cursor()\n cursor.execute(\"grant all on `%s`.* to `%s` identified by '%s'\" % (\n database, user, password))\n cursor.close()\n", "step-4": "import os\nimport sys\nfrom subprocess import check_output\nfrom charmhelpers.fetch import apt_install, apt_update, add_source\nfrom charmhelpers.core.templating import render\nfrom charmhelpers.contrib.database.mysql import MySQLHelper\n\n\ndef install_mysql(package='mysql-server', sources=None, keys=None):\n if not sources:\n sources = []\n if not keys:\n keys = []\n from subprocess import Popen, PIPE\n for source in sources:\n add_source(source)\n if sources:\n apt_update()\n with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:\n root_pass = rpw.read()\n dconf = Popen(['debconf-set-selections'], stdin=PIPE)\n dconf.stdin.write('%s %s/root_password password %s\\n' % (package,\n package, root_pass))\n dconf.stdin.write('%s %s/root_password_again password %s\\n' % (package,\n package, root_pass))\n dconf.communicate()\n dconf.wait()\n apt_install(package)\n\n\ndef build_mycnf(cfg):\n i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')\n unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]\n if i_am_a_slave and cfg.get('tuning-level') != 'fast':\n render(source='mysql/binlog.cnf', target=\n '/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,\n 'format': cfg.get('binlog-format', 'MIXED')})\n render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)\n\n\ndef human_to_bytes(human):\n if human.isdigit():\n return human\n factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}\n modifier = human[-1]\n if modifier.lower() in factors:\n return int(human[:-1]) * factors[modifier.lower()]\n raise ValueError('Can only convert K, M, G, and T')\n\n\ndef dataset_size(size, page):\n if not size.endswith('%'):\n return human_to_bytes(size)\n total_mem = human_to_bytes(get_memtotal())\n sys_mem_limit = mem_limit()\n if is_32bits() and total_mem > sys_mem_limit:\n total_ram = sys_mem_limit\n factor = int(size[:-1]) * 0.01\n pctram = sys_mem_limit * factor\n return int(pctram - pctram % page)\n\n\ndef is_32bits():\n try:\n IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0\n except OverflowError:\n IS_32BIT_SYSTEM = True\n return IS_32BIT_SYSTEM\n\n\ndef mem_limit():\n import platform\n SYS_MEM_LIMIT = human_to_bytes(get_memtotal())\n if platform.machine() in ['armv7l']:\n SYS_MEM_LIMIT = human_to_bytes('2700M')\n elif is_32bits():\n SYS_MEM_LIMIT = human_to_bytes('4G')\n return SYS_MEM_LIMIT\n\n\ndef get_memtotal():\n with open('/proc/meminfo') as meminfo_file:\n for line in meminfo_file:\n key, mem = line.split(':', 2)\n if key == 'MemTotal':\n mtot, modifier = mem.strip().split(' ')\n return '%s%s' % (mtot, modifier[0].upper())\n\n\ndef get_db_helper():\n return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',\n upasswdf_template='/var/lib/mysql/mysql-{}.passwd',\n delete_ondisk_passwd_file=False)\n\n\ndef get_db_cursor():\n import MySQLdb\n db_helper = get_db_helper()\n passwd = db_helper.get_mysql_root_password()\n connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)\n return connection.cursor()\n\n\ndef create_database(name):\n cursor = get_db_cursor()\n cursor.execute(\"show databases like '%s'\" % name)\n if cursor.fetchall():\n return name\n cursor.execute('create database `%s` character set utf8' % name)\n cursor.close()\n return name\n\n\ndef create_user():\n user, password = check_output(['pwgen', '-N 2', '15']).split('\\n')[:-1]\n cursor = get_db_cursor()\n grant_sql = \"grant replication client on *.* to `%s` identified by '%s'\"\n cursor.execute(grant_sql % (user, password))\n cursor.close()\n return user, password\n\n\ndef grant_database(database, user, password):\n cursor = get_db_cursor()\n cursor.execute(\"grant all on `%s`.* to `%s` identified by '%s'\" % (\n database, user, password))\n cursor.close()\n", "step-5": "import os\nimport sys\n\nfrom subprocess import check_output\n\nfrom charmhelpers.fetch import (\n apt_install,\n apt_update,\n add_source,\n)\n\nfrom charmhelpers.core.templating import render\nfrom charmhelpers.contrib.database.mysql import MySQLHelper\n\n\ndef install_mysql(package='mysql-server', sources=None, keys=None):\n if not sources:\n sources = []\n\n if not keys:\n keys = []\n\n from subprocess import (\n Popen,\n PIPE,\n )\n\n for source in sources:\n add_source(source)\n\n if sources:\n apt_update()\n\n with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:\n root_pass = rpw.read()\n\n dconf = Popen(['debconf-set-selections'], stdin=PIPE)\n dconf.stdin.write(\"%s %s/root_password password %s\\n\" % (package, package,\n root_pass))\n dconf.stdin.write(\"%s %s/root_password_again password %s\\n\" % (package,\n package,\n root_pass))\n dconf.communicate()\n dconf.wait()\n\n apt_install(package)\n\n\ndef build_mycnf(cfg):\n i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')\n # REFACTOR add to charm helpers\n unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]\n\n if i_am_a_slave and cfg.get('tuning-level') != 'fast':\n # On slaves, this gets overwritten\n render(\n source='mysql/binlog.cnf',\n target='/etc/mysql/conf.d/binlog.cnf',\n context={\n 'unit_id': unit_id,\n 'format': cfg.get('binlog-format', 'MIXED')\n },\n )\n\n render(source='mysql/my.cnf', target='/etc/mysql/my.cnf',\n context=cfg)\n\n\ndef human_to_bytes(human):\n if human.isdigit():\n return human\n factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}\n modifier = human[-1]\n if modifier.lower() in factors:\n return int(human[:-1]) * factors[modifier.lower()]\n\n raise ValueError(\"Can only convert K, M, G, and T\")\n\n\ndef dataset_size(size, page):\n if not size.endswith('%'):\n return human_to_bytes(size)\n\n total_mem = human_to_bytes(get_memtotal())\n sys_mem_limit = mem_limit()\n if is_32bits() and total_mem > sys_mem_limit:\n total_ram = sys_mem_limit\n\n factor = int(size[:-1]) * 0.01\n pctram = sys_mem_limit * factor\n return int(pctram - (pctram % page))\n\n\ndef is_32bits():\n try:\n IS_32BIT_SYSTEM = sys.maxsize < 2**32.\n except OverflowError:\n IS_32BIT_SYSTEM = True\n\n return IS_32BIT_SYSTEM\n\n\ndef mem_limit():\n import platform\n\n SYS_MEM_LIMIT = human_to_bytes(get_memtotal())\n\n if platform.machine() in ['armv7l']:\n SYS_MEM_LIMIT = human_to_bytes('2700M') # experimentally determined\n elif is_32bits():\n SYS_MEM_LIMIT = human_to_bytes('4G')\n\n return SYS_MEM_LIMIT\n\n\ndef get_memtotal():\n with open('/proc/meminfo') as meminfo_file:\n for line in meminfo_file:\n (key, mem) = line.split(':', 2)\n if key == 'MemTotal':\n (mtot, modifier) = mem.strip().split(' ')\n return '%s%s' % (mtot, modifier[0].upper())\n\n\ndef get_db_helper():\n return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',\n upasswdf_template='/var/lib/mysql/mysql-{}.passwd',\n delete_ondisk_passwd_file=False)\n\n\n# REFACTOR factory/cache\ndef get_db_cursor():\n import MySQLdb\n # Connect to mysql\n db_helper = get_db_helper()\n passwd = db_helper.get_mysql_root_password()\n connection = MySQLdb.connect(user=\"root\", host=\"localhost\", passwd=passwd)\n return connection.cursor()\n\n\ndef create_database(name):\n # REFACTOR UTF-8\n # Clean databasename\n cursor = get_db_cursor()\n cursor.execute(\"show databases like '%s'\" % name)\n if cursor.fetchall():\n return name\n cursor.execute(\"create database `%s` character set utf8\" % name)\n cursor.close()\n return name\n\n\ndef create_user():\n # REFACTOR pwgen python module? maybe? yeah?\n (user, password) = check_output(['pwgen', '-N 2', '15']).split('\\n')[:-1]\n cursor = get_db_cursor()\n grant_sql = \"grant replication client on *.* to `%s` identified by '%s'\"\n cursor.execute(grant_sql % (user, password))\n cursor.close()\n return (user, password)\n\n\ndef grant_database(database, user, password):\n cursor = get_db_cursor()\n cursor.execute(\n \"grant all on `%s`.* to `%s` identified by '%s'\" % (database,\n user, password))\n cursor.close()\n\n\n\n#\n#relation_id = os.environ.get('JUJU_RELATION_ID')\n#change_unit = os.environ.get('JUJU_REMOTE_UNIT')\n#\n## We'll name the database the same as the service.\n#database_name_file = '.%s_database_name' % (relation_id)\n## change_unit will be None on broken hooks\n#database_name = ''\n#if change_unit:\n# database_name, _ = change_unit.split(\"/\")\n# with open(database_name_file, 'w') as dbnf:\n# dbnf.write(\"%s\\n\" % database_name)\n# dbnf.flush()\n#elif os.path.exists(database_name_file):\n# with open(database_name_file, 'r') as dbname:\n# database_name = dbname.readline().strip()\n#else:\n# print 'No established database and no REMOTE_UNIT.'\n## A user per service unit so we can deny access quickly\n#lastrun_path = '/var/lib/juju/%s.%s.lastrun' % (database_name, user)\n#slave_configured_path = '/var/lib/juju.slave.configured.for.%s' % database_name\n#slave_configured = os.path.exists(slave_configured_path)\n#slave = os.path.exists('/var/lib/juju/i.am.a.slave')\n#broken_path = '/var/lib/juju/%s.mysql.broken' % database_name\n#broken = os.path.exists(broken_path)\n#\n#\n#\n#\n#def migrate_to_mount(new_path):\n# \"\"\"Invoked when new mountpoint appears. This function safely migrates\n# MySQL data from local disk to persistent storage (only if needed)\n# \"\"\"\n# old_path = '/var/lib/mysql'\n# if os.path.islink(old_path):\n# hookenv.log('{} is already a symlink, skipping migration'.format(\n# old_path))\n# return True\n# # Ensure our new mountpoint is empty. Otherwise error and allow\n# # users to investigate and migrate manually\n# files = os.listdir(new_path)\n# try:\n# files.remove('lost+found')\n# except ValueError:\n# pass\n# if files:\n# raise RuntimeError('Persistent storage contains old data. '\n# 'Please investigate and migrate data manually '\n# 'to: {}'.format(new_path))\n# os.chmod(new_path, 0o700)\n# if os.path.isdir('/etc/apparmor.d/local'):\n# render('apparmor.j2', '/etc/apparmor.d/local/usr.sbin.mysqld',\n# context={'path': os.path.join(new_path, '')})\n# host.service_reload('apparmor')\n# host.service_stop('mysql')\n# host.rsync(os.path.join(old_path, ''), # Ensure we have trailing slashes\n# os.path.join(new_path, ''),\n# options=['--archive'])\n# shutil.rmtree(old_path)\n# os.symlink(new_path, old_path)\n# host.service_start('mysql')\n", "step-ids": [ 9, 10, 11, 13, 14 ] }
[ 9, 10, 11, 13, 14 ]
# Basic script which send some request via rest api to the test-management-tool. # Be sure you setup host and api_token variable import http.client host = "localhost:8000" api_token = "fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3" # Connection conn = http.client.HTTPConnection(host) # Create a header of http request headers = { 'authorization': "Bearer " + api_token, 'content-type': "application/json", 'cache-control': "no-cache", 'postman-token': "44709a5c-ca4a-bbce-4b24-f0632a29bde4" } ################################################ payload = "{\n \"Name\": \"Create and edit project\"\n}" conn.request("POST", "/api/v1/testsuites", payload, headers) ### res = conn.getresponse() data = res.read() payload = "{\n \"Name\": \"Create and edit requirement\"\n}" conn.request("POST", "/api/v1/testsuites", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Not selected project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project without name\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Check if overview contains project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Edit project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() ################################################ ### payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Create project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement without name\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Overview contains requirement\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Edit requirement\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Cover requirement\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() ################################################ payload = "{\n \"Name\": \"Create and edit TestSuites and TestCase\"\n}" conn.request("POST", "/api/v1/testsuites", payload, headers) ### res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite without name\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains suite\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test suite\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without details\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case with details\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without name\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains case\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test case\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() ################################################ payload = "{\n \"Name\": \"Create test set and run\"\n}" conn.request("POST", "/api/v1/testsuites", payload, headers) ### res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create project\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains set\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without name\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without tests\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Edit test set\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create test run\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains run\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Execute contains tests\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() ################################################ payload = "{\n \"Name\": \"Registration and log test\"\n}" conn.request("POST", "/api/v1/testsuites", payload, headers) ### res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Redirect to login page\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Registration\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Registrate same user\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers) res = conn.getresponse() data = res.read() payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Log and logout\"\n}" conn.request("POST", "/api/v1/testcases", payload, headers)
normal
{ "blob_id": "0cc1aaa182fcf002ff2ae6cbcd6cbb84a08a3bc1", "index": 936, "step-1": "<mask token>\n", "step-2": "<mask token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<mask token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n", "step-3": "<mask token>\nhost = 'localhost:8000'\napi_token = 'fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3'\nconn = http.client.HTTPConnection(host)\nheaders = {'authorization': 'Bearer ' + api_token, 'content-type':\n 'application/json', 'cache-control': 'no-cache', 'postman-token':\n '44709a5c-ca4a-bbce-4b24-f0632a29bde4'}\npayload = \"\"\"{\n \"Name\": \"Create and edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Not selected project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Check if overview contains project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Overview contains requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Cover requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit TestSuites and TestCase\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case with details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create test set and run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Edit test set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create test run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Execute contains tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Registration and log test\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Redirect to login page\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registration\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registrate same user\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Log and logout\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\n", "step-4": "import http.client\nhost = 'localhost:8000'\napi_token = 'fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3'\nconn = http.client.HTTPConnection(host)\nheaders = {'authorization': 'Bearer ' + api_token, 'content-type':\n 'application/json', 'cache-control': 'no-cache', 'postman-token':\n '44709a5c-ca4a-bbce-4b24-f0632a29bde4'}\npayload = \"\"\"{\n \"Name\": \"Create and edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Not selected project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Check if overview contains project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Overview contains requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Cover requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit TestSuites and TestCase\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case with details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create test set and run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Edit test set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create test run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Execute contains tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Registration and log test\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Redirect to login page\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registration\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registrate same user\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Log and logout\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\n", "step-5": "# Basic script which send some request via rest api to the test-management-tool.\n# Be sure you setup host and api_token variable\n\nimport http.client\n\nhost = \"localhost:8000\"\napi_token = \"fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3\"\n\n# Connection\nconn = http.client.HTTPConnection(host)\n\n# Create a header of http request\nheaders = {\n 'authorization': \"Bearer \" + api_token,\n 'content-type': \"application/json\",\n 'cache-control': \"no-cache\",\n 'postman-token': \"44709a5c-ca4a-bbce-4b24-f0632a29bde4\"\n }\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Create and edit project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"Name\\\": \\\"Create and edit requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Not selected project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Create project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Create project without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Check if overview contains project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Edit project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n################################################\n\n###\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Create project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Create requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Create requirement without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Overview contains requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Edit requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Cover requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Create and edit TestSuites and TestCase\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test suite\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test suite without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Check if overview contains suite\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Edit test suite\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test case without details\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test case with details\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test case without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Check if overview contains case\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Edit test case\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Create test set and run\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create set\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Overview contains set\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create set without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create set without tests\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Edit test set\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create test run\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Overview contains run\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Execute contains tests\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Registration and log test\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Redirect to login page\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Registration\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Registrate same user\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Log and logout\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from pyecharts import options as opts from pyecharts.charts import * import pandas as pd import namemap from pyecharts.globals import ThemeType # import time import json import requests from datetime import datetime import pandas as pd import numpy as np def read_country_code(): """ 获取国家中英文字典 :return: """ country_dict = {} for key, val in namemap.nameMap.items(): # 将 nameMap 列表里面键值互换 country_dict[val] = key return country_dict def read_csv(): """ 读取数据,返回国家英文名称列表和累计确诊数列表 :return: """ country_dict = read_country_code() data = pd.read_csv("2019-nCoV.csv", index_col=False) countrys_names = list() confirmed_count = list() for x in range(len(data.index)): if data['name'].iloc[x] in country_dict.keys(): countrys_names.append(country_dict[data['name'].iloc[x]]) confirmed_count.append(data['confirm'].iloc[x]) else: print(data['name'].iloc[x]) return countrys_names, confirmed_count def catch_data(): url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5' reponse = requests.get(url=url).json() data = json.loads(reponse['data']) return data # 定义数据处理函数 def confirm(x): confirm = eval(str(x))['confirm'] return confirm def suspect(x): suspect = eval(str(x))['suspect'] return suspect def dead(x): dead = eval(str(x))['dead'] return dead def heal(x): heal = eval(str(x))['heal'] return heal def draw_map(): """ china! """ data = catch_data() dict_keys = data.keys() # China lastUpdateTime = data['lastUpdateTime'] chinaTotal = data['chinaTotal'] chinaAdd = data['chinaAdd'] #结果{'confirm': 84970, 'heal': 79963, 'dead': 4645, 'nowConfirm': 362, 'suspect': 11, #'nowSevere': 13, 'importedCase': 1868, 'noInfect': 108} areaTree = data['areaTree'] china_data = areaTree[0]['children'] china_list = [] for a in range(len(china_data)): province = china_data[a]['name'] province_list = china_data[a]['children'] for b in range(len(province_list)): city = province_list[b]['name'] total = province_list[b]['total'] today = province_list[b]['today'] china_dict = {} china_dict['province'] = province china_dict['city'] = city china_dict['total'] = total china_dict['today'] = today china_list.append(china_dict) china_data = pd.DataFrame(china_list) china_data.head() # 函数映射 china_data['confirm'] = china_data['total'].map(confirm) china_data['suspect'] = china_data['total'].map(suspect) china_data['dead'] = china_data['total'].map(dead) china_data['heal'] = china_data['total'].map(heal) china_data['addconfirm'] = china_data['today'].map(confirm) #['addsuspect'] = china_data['today'].map(suspect) #china_data['adddead'] = china_data['today'].map(dead) #china_data['addheal'] = china_data['today'].map(heal) china_data = china_data[["province","city","confirm","suspect","dead","heal","addconfirm"]] china_data.head() total_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width = '900px',height ='350px')) #设置主题,和画布大小 total_pie.add("",[list(z) for z in zip(chinaTotal.keys(), chinaTotal.values())], center=["50%", "70%"], #图的位置 radius=[50, 80]) #内外径大小 total_pie.set_global_opts( title_opts=opts.TitleOpts(title="全国总量",subtitle=("截止"+lastUpdateTime))) total_pie.set_series_opts(label_opts=opts.LabelOpts(formatter="{c}")) #标签格式 total_pie.render_notebook() totaladd_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width = '900px',height ='350px')) #设置主题,和画布大小 totaladd_pie.add("",[list(z) for z in zip(chinaAdd.keys(), chinaAdd.values())], center=["50%", "50%"], radius=[50, 80]) totaladd_pie.set_global_opts( title_opts=opts.TitleOpts(title="昨日新增")) totaladd_pie.set_series_opts(label_opts=opts.LabelOpts(formatter="{c}")) #标签格式 totaladd_pie.render_notebook() area_data = china_data.groupby("province")["confirm"].sum().reset_index() area_data.columns = ["province","confirm"] area_map = Map(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS)) area_map.add("",[list(z) for z in zip(list(area_data["province"]), list(area_data["confirm"]))], "china",is_map_symbol_show=False) area_map.set_global_opts(title_opts=opts.TitleOpts(title="2019_nCoV中国疫情地图"),visualmap_opts=opts.VisualMapOpts(is_piecewise=True, pieces = [ {"min": 1001 , "label": '>1000',"color": "#893448"}, #不指定 max,表示 max 为无限大 {"min": 500, "max": 1000, "label": '500-1000',"color": "#ff585e"}, {"min": 101, "max": 499, "label": '101-499',"color": "#fb8146"}, {"min": 10, "max": 100, "label": '10-100',"color": "#ffb248"}, {"min": 0, "max": 9, "label": '0-9',"color" : "#fff2d1" }])) area_map.render_notebook() page = Page() page.add(total_pie) page.add(totaladd_pie) page.add(area_map) """ 绘制世界地图 遇到一个很神奇的问题: 两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据 :return: """ # 修复注释中的问题,原因是 confirmed_count 中的 int 是 numpy 的 int ,需转化为 python 中的 int # 感谢公众号的 @李康伟 同学提出 countrys_names, confirmed_count = read_csv() confirmed_count_list = [] for item in confirmed_count: confirmed_count_list.append(int(item)) # countrys_names = ['United States', 'Brazil', 'Russia', 'Spain', 'United Kingdom', 'Italy', 'France', 'Germany', 'Turkey', 'Iran', 'India', 'Peru', 'Canada', 'Saudi Arabia', 'Mexico', 'Chile', 'Belgium', 'Pakistan', 'Netherlands', 'Qatar', 'Ecuador', 'Belarus', 'Sweden', 'Bangladesh', 'Singapore Rep.', 'Switzerland', 'Portugal', 'United Arab Emirates', 'Ireland', 'Indonesia', 'South Africa', 'Poland', 'Ukraine', 'Kuwait', 'Colombia', 'Romania', 'Israel', 'Japan', 'Egypt', 'Austria', 'Dominican Rep.', 'Philippines', 'Denmark', 'Argentina', 'Korea', 'Serbia', 'Panama', 'Afghanistan', 'Czech Rep.', 'Norway', 'Kazakhstan', 'Algeria', 'Nigeria', 'Morocco', 'Oman', 'Malaysia', 'Australia', 'Moldova', 'Ghana', 'Finland', 'Armenia', 'Bolivia', 'Cameroon', 'Iraq', 'Luxembourg', 'Azerbaijan', 'Honduras', 'Hungary', 'Sudan', 'Guinea', 'Uzbekistan', 'Guatemala', 'Thailand', 'Senegal', 'Greece', 'Tajikistan', 'Bulgaria', "Côte d'Ivoire", 'Djibouti', 'Croatia', 'Gabon', 'Cuba', 'Estonia', 'El Salvador', 'Iceland', 'Lithuania', 'Somalia', 'New Zealand', 'Slovakia', 'Slovenia', 'Kyrgyzstan', 'Kenya', 'Guinea Bissau', 'Lebanon', 'Sri Lanka', 'Tunisia', 'Latvia', 'Mali', 'Venezuela', 'Albania', 'Eq. Guinea', 'Niger', 'Cyprus', 'Zambia', 'Costa Rica', 'Haiti', 'Paraguay', 'Burkina Faso', 'Uruguay', 'Georgia', 'Jordan', 'Chad', 'Sierra Leone', 'Nepal', 'Jamaica', 'Tanzania', 'Ethiopia', 'Madagascar', 'Palestine', 'Togo', 'Vietnam', 'Rwanda', 'Montenegro', 'Nicaragua', 'Liberia', 'Swaziland', 'Mauritania', 'Yemen', 'Myanmar', 'Uganda', 'Mozambique', 'Mongolia', 'Brunei', 'Benin', 'Guyana', 'Cambodia', 'The Bahamas', 'Malawi', 'Libya', 'Syria', 'Angola', 'Zimbabwe', 'Burundi', 'Eritrea', 'Botswana', 'Gambia', 'Bhutan', 'East Timor', 'Namibia', 'Lao PDR', 'Fiji', 'Belize', 'Suriname', 'Papua New Guinea', 'Lesotho'] # # confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2] c = ( Map() .add( "确诊人数", [list(z) for z in zip(countrys_names, confirmed_count_list)], is_map_symbol_show=False, maptype="world", label_opts=opts.LabelOpts(is_show=False), itemstyle_opts=opts.ItemStyleOpts(color="rgb(49,60,72)") ) .set_series_opts(label_opts=opts.LabelOpts(is_show=False)) .set_global_opts( title_opts=opts.TitleOpts(title="全球 2019-nCoV 地图"), visualmap_opts=opts.VisualMapOpts(max_=1700000), ) #.render("map_world.html") ) page.add(c) page.render('covid-19 中国和世界数据.html')
normal
{ "blob_id": "fe3584dd858c06d66215b4a182adf87d35324975", "index": 4486, "step-1": "<mask token>\n\n\ndef read_country_code():\n \"\"\"\n 获取国家中英文字典\n :return:\n \"\"\"\n country_dict = {}\n for key, val in namemap.nameMap.items():\n country_dict[val] = key\n return country_dict\n\n\ndef read_csv():\n \"\"\"\n 读取数据,返回国家英文名称列表和累计确诊数列表\n :return:\n \"\"\"\n country_dict = read_country_code()\n data = pd.read_csv('2019-nCoV.csv', index_col=False)\n countrys_names = list()\n confirmed_count = list()\n for x in range(len(data.index)):\n if data['name'].iloc[x] in country_dict.keys():\n countrys_names.append(country_dict[data['name'].iloc[x]])\n confirmed_count.append(data['confirm'].iloc[x])\n else:\n print(data['name'].iloc[x])\n return countrys_names, confirmed_count\n\n\ndef catch_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\n reponse = requests.get(url=url).json()\n data = json.loads(reponse['data'])\n return data\n\n\ndef confirm(x):\n confirm = eval(str(x))['confirm']\n return confirm\n\n\n<mask token>\n\n\ndef dead(x):\n dead = eval(str(x))['dead']\n return dead\n\n\ndef heal(x):\n heal = eval(str(x))['heal']\n return heal\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef read_country_code():\n \"\"\"\n 获取国家中英文字典\n :return:\n \"\"\"\n country_dict = {}\n for key, val in namemap.nameMap.items():\n country_dict[val] = key\n return country_dict\n\n\ndef read_csv():\n \"\"\"\n 读取数据,返回国家英文名称列表和累计确诊数列表\n :return:\n \"\"\"\n country_dict = read_country_code()\n data = pd.read_csv('2019-nCoV.csv', index_col=False)\n countrys_names = list()\n confirmed_count = list()\n for x in range(len(data.index)):\n if data['name'].iloc[x] in country_dict.keys():\n countrys_names.append(country_dict[data['name'].iloc[x]])\n confirmed_count.append(data['confirm'].iloc[x])\n else:\n print(data['name'].iloc[x])\n return countrys_names, confirmed_count\n\n\ndef catch_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\n reponse = requests.get(url=url).json()\n data = json.loads(reponse['data'])\n return data\n\n\ndef confirm(x):\n confirm = eval(str(x))['confirm']\n return confirm\n\n\ndef suspect(x):\n suspect = eval(str(x))['suspect']\n return suspect\n\n\ndef dead(x):\n dead = eval(str(x))['dead']\n return dead\n\n\ndef heal(x):\n heal = eval(str(x))['heal']\n return heal\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef read_country_code():\n \"\"\"\n 获取国家中英文字典\n :return:\n \"\"\"\n country_dict = {}\n for key, val in namemap.nameMap.items():\n country_dict[val] = key\n return country_dict\n\n\ndef read_csv():\n \"\"\"\n 读取数据,返回国家英文名称列表和累计确诊数列表\n :return:\n \"\"\"\n country_dict = read_country_code()\n data = pd.read_csv('2019-nCoV.csv', index_col=False)\n countrys_names = list()\n confirmed_count = list()\n for x in range(len(data.index)):\n if data['name'].iloc[x] in country_dict.keys():\n countrys_names.append(country_dict[data['name'].iloc[x]])\n confirmed_count.append(data['confirm'].iloc[x])\n else:\n print(data['name'].iloc[x])\n return countrys_names, confirmed_count\n\n\ndef catch_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\n reponse = requests.get(url=url).json()\n data = json.loads(reponse['data'])\n return data\n\n\ndef confirm(x):\n confirm = eval(str(x))['confirm']\n return confirm\n\n\ndef suspect(x):\n suspect = eval(str(x))['suspect']\n return suspect\n\n\ndef dead(x):\n dead = eval(str(x))['dead']\n return dead\n\n\ndef heal(x):\n heal = eval(str(x))['heal']\n return heal\n\n\ndef draw_map():\n \"\"\"\n china!\n \"\"\"\n data = catch_data()\n dict_keys = data.keys()\n lastUpdateTime = data['lastUpdateTime']\n chinaTotal = data['chinaTotal']\n chinaAdd = data['chinaAdd']\n areaTree = data['areaTree']\n china_data = areaTree[0]['children']\n china_list = []\n for a in range(len(china_data)):\n province = china_data[a]['name']\n province_list = china_data[a]['children']\n for b in range(len(province_list)):\n city = province_list[b]['name']\n total = province_list[b]['total']\n today = province_list[b]['today']\n china_dict = {}\n china_dict['province'] = province\n china_dict['city'] = city\n china_dict['total'] = total\n china_dict['today'] = today\n china_list.append(china_dict)\n china_data = pd.DataFrame(china_list)\n china_data.head()\n china_data['confirm'] = china_data['total'].map(confirm)\n china_data['suspect'] = china_data['total'].map(suspect)\n china_data['dead'] = china_data['total'].map(dead)\n china_data['heal'] = china_data['total'].map(heal)\n china_data['addconfirm'] = china_data['today'].map(confirm)\n china_data = china_data[['province', 'city', 'confirm', 'suspect',\n 'dead', 'heal', 'addconfirm']]\n china_data.head()\n total_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS, width\n ='900px', height='350px'))\n total_pie.add('', [list(z) for z in zip(chinaTotal.keys(), chinaTotal.\n values())], center=['50%', '70%'], radius=[50, 80])\n total_pie.set_global_opts(title_opts=opts.TitleOpts(title='全国总量',\n subtitle='截止' + lastUpdateTime))\n total_pie.set_series_opts(label_opts=opts.LabelOpts(formatter='{c}'))\n total_pie.render_notebook()\n totaladd_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,\n width='900px', height='350px'))\n totaladd_pie.add('', [list(z) for z in zip(chinaAdd.keys(), chinaAdd.\n values())], center=['50%', '50%'], radius=[50, 80])\n totaladd_pie.set_global_opts(title_opts=opts.TitleOpts(title='昨日新增'))\n totaladd_pie.set_series_opts(label_opts=opts.LabelOpts(formatter='{c}'))\n totaladd_pie.render_notebook()\n area_data = china_data.groupby('province')['confirm'].sum().reset_index()\n area_data.columns = ['province', 'confirm']\n area_map = Map(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS))\n area_map.add('', [list(z) for z in zip(list(area_data['province']),\n list(area_data['confirm']))], 'china', is_map_symbol_show=False)\n area_map.set_global_opts(title_opts=opts.TitleOpts(title=\n '2019_nCoV中国疫情地图'), visualmap_opts=opts.VisualMapOpts(is_piecewise=\n True, pieces=[{'min': 1001, 'label': '>1000', 'color': '#893448'},\n {'min': 500, 'max': 1000, 'label': '500-1000', 'color': '#ff585e'},\n {'min': 101, 'max': 499, 'label': '101-499', 'color': '#fb8146'}, {\n 'min': 10, 'max': 100, 'label': '10-100', 'color': '#ffb248'}, {\n 'min': 0, 'max': 9, 'label': '0-9', 'color': '#fff2d1'}]))\n area_map.render_notebook()\n page = Page()\n page.add(total_pie)\n page.add(totaladd_pie)\n page.add(area_map)\n \"\"\"\n 绘制世界地图\n 遇到一个很神奇的问题:\n 两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据\n :return:\n \"\"\"\n countrys_names, confirmed_count = read_csv()\n confirmed_count_list = []\n for item in confirmed_count:\n confirmed_count_list.append(int(item))\n c = Map().add('确诊人数', [list(z) for z in zip(countrys_names,\n confirmed_count_list)], is_map_symbol_show=False, maptype='world',\n label_opts=opts.LabelOpts(is_show=False), itemstyle_opts=opts.\n ItemStyleOpts(color='rgb(49,60,72)')).set_series_opts(label_opts=\n opts.LabelOpts(is_show=False)).set_global_opts(title_opts=opts.\n TitleOpts(title='全球 2019-nCoV 地图'), visualmap_opts=opts.\n VisualMapOpts(max_=1700000))\n page.add(c)\n page.render('covid-19 中国和世界数据.html')\n", "step-4": "from pyecharts import options as opts\nfrom pyecharts.charts import *\nimport pandas as pd\nimport namemap\nfrom pyecharts.globals import ThemeType\nimport time\nimport json\nimport requests\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\n\n\ndef read_country_code():\n \"\"\"\n 获取国家中英文字典\n :return:\n \"\"\"\n country_dict = {}\n for key, val in namemap.nameMap.items():\n country_dict[val] = key\n return country_dict\n\n\ndef read_csv():\n \"\"\"\n 读取数据,返回国家英文名称列表和累计确诊数列表\n :return:\n \"\"\"\n country_dict = read_country_code()\n data = pd.read_csv('2019-nCoV.csv', index_col=False)\n countrys_names = list()\n confirmed_count = list()\n for x in range(len(data.index)):\n if data['name'].iloc[x] in country_dict.keys():\n countrys_names.append(country_dict[data['name'].iloc[x]])\n confirmed_count.append(data['confirm'].iloc[x])\n else:\n print(data['name'].iloc[x])\n return countrys_names, confirmed_count\n\n\ndef catch_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\n reponse = requests.get(url=url).json()\n data = json.loads(reponse['data'])\n return data\n\n\ndef confirm(x):\n confirm = eval(str(x))['confirm']\n return confirm\n\n\ndef suspect(x):\n suspect = eval(str(x))['suspect']\n return suspect\n\n\ndef dead(x):\n dead = eval(str(x))['dead']\n return dead\n\n\ndef heal(x):\n heal = eval(str(x))['heal']\n return heal\n\n\ndef draw_map():\n \"\"\"\n china!\n \"\"\"\n data = catch_data()\n dict_keys = data.keys()\n lastUpdateTime = data['lastUpdateTime']\n chinaTotal = data['chinaTotal']\n chinaAdd = data['chinaAdd']\n areaTree = data['areaTree']\n china_data = areaTree[0]['children']\n china_list = []\n for a in range(len(china_data)):\n province = china_data[a]['name']\n province_list = china_data[a]['children']\n for b in range(len(province_list)):\n city = province_list[b]['name']\n total = province_list[b]['total']\n today = province_list[b]['today']\n china_dict = {}\n china_dict['province'] = province\n china_dict['city'] = city\n china_dict['total'] = total\n china_dict['today'] = today\n china_list.append(china_dict)\n china_data = pd.DataFrame(china_list)\n china_data.head()\n china_data['confirm'] = china_data['total'].map(confirm)\n china_data['suspect'] = china_data['total'].map(suspect)\n china_data['dead'] = china_data['total'].map(dead)\n china_data['heal'] = china_data['total'].map(heal)\n china_data['addconfirm'] = china_data['today'].map(confirm)\n china_data = china_data[['province', 'city', 'confirm', 'suspect',\n 'dead', 'heal', 'addconfirm']]\n china_data.head()\n total_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS, width\n ='900px', height='350px'))\n total_pie.add('', [list(z) for z in zip(chinaTotal.keys(), chinaTotal.\n values())], center=['50%', '70%'], radius=[50, 80])\n total_pie.set_global_opts(title_opts=opts.TitleOpts(title='全国总量',\n subtitle='截止' + lastUpdateTime))\n total_pie.set_series_opts(label_opts=opts.LabelOpts(formatter='{c}'))\n total_pie.render_notebook()\n totaladd_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,\n width='900px', height='350px'))\n totaladd_pie.add('', [list(z) for z in zip(chinaAdd.keys(), chinaAdd.\n values())], center=['50%', '50%'], radius=[50, 80])\n totaladd_pie.set_global_opts(title_opts=opts.TitleOpts(title='昨日新增'))\n totaladd_pie.set_series_opts(label_opts=opts.LabelOpts(formatter='{c}'))\n totaladd_pie.render_notebook()\n area_data = china_data.groupby('province')['confirm'].sum().reset_index()\n area_data.columns = ['province', 'confirm']\n area_map = Map(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS))\n area_map.add('', [list(z) for z in zip(list(area_data['province']),\n list(area_data['confirm']))], 'china', is_map_symbol_show=False)\n area_map.set_global_opts(title_opts=opts.TitleOpts(title=\n '2019_nCoV中国疫情地图'), visualmap_opts=opts.VisualMapOpts(is_piecewise=\n True, pieces=[{'min': 1001, 'label': '>1000', 'color': '#893448'},\n {'min': 500, 'max': 1000, 'label': '500-1000', 'color': '#ff585e'},\n {'min': 101, 'max': 499, 'label': '101-499', 'color': '#fb8146'}, {\n 'min': 10, 'max': 100, 'label': '10-100', 'color': '#ffb248'}, {\n 'min': 0, 'max': 9, 'label': '0-9', 'color': '#fff2d1'}]))\n area_map.render_notebook()\n page = Page()\n page.add(total_pie)\n page.add(totaladd_pie)\n page.add(area_map)\n \"\"\"\n 绘制世界地图\n 遇到一个很神奇的问题:\n 两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据\n :return:\n \"\"\"\n countrys_names, confirmed_count = read_csv()\n confirmed_count_list = []\n for item in confirmed_count:\n confirmed_count_list.append(int(item))\n c = Map().add('确诊人数', [list(z) for z in zip(countrys_names,\n confirmed_count_list)], is_map_symbol_show=False, maptype='world',\n label_opts=opts.LabelOpts(is_show=False), itemstyle_opts=opts.\n ItemStyleOpts(color='rgb(49,60,72)')).set_series_opts(label_opts=\n opts.LabelOpts(is_show=False)).set_global_opts(title_opts=opts.\n TitleOpts(title='全球 2019-nCoV 地图'), visualmap_opts=opts.\n VisualMapOpts(max_=1700000))\n page.add(c)\n page.render('covid-19 中国和世界数据.html')\n", "step-5": "from pyecharts import options as opts\r\nfrom pyecharts.charts import *\r\nimport pandas as pd\r\nimport namemap\r\nfrom pyecharts.globals import ThemeType\r\n\r\n\r\n#\r\nimport time \r\nimport json\r\nimport requests\r\nfrom datetime import datetime\r\nimport pandas as pd \r\nimport numpy as np\r\n \r\ndef read_country_code():\r\n \"\"\"\r\n 获取国家中英文字典\r\n :return:\r\n \"\"\"\r\n country_dict = {}\r\n for key, val in namemap.nameMap.items(): # 将 nameMap 列表里面键值互换\r\n country_dict[val] = key\r\n return country_dict\r\n\r\ndef read_csv():\r\n \"\"\"\r\n 读取数据,返回国家英文名称列表和累计确诊数列表\r\n :return:\r\n \"\"\"\r\n country_dict = read_country_code()\r\n data = pd.read_csv(\"2019-nCoV.csv\", index_col=False)\r\n\r\n countrys_names = list()\r\n confirmed_count = list()\r\n\r\n for x in range(len(data.index)):\r\n if data['name'].iloc[x] in country_dict.keys():\r\n countrys_names.append(country_dict[data['name'].iloc[x]])\r\n confirmed_count.append(data['confirm'].iloc[x])\r\n else:\r\n print(data['name'].iloc[x])\r\n\r\n return countrys_names, confirmed_count\r\n\r\n\r\ndef catch_data():\r\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\r\n reponse = requests.get(url=url).json()\r\n data = json.loads(reponse['data'])\r\n return data\r\n\r\n\r\n\r\n\r\n# 定义数据处理函数\r\ndef confirm(x):\r\n confirm = eval(str(x))['confirm']\r\n return confirm\r\ndef suspect(x):\r\n suspect = eval(str(x))['suspect']\r\n return suspect\r\ndef dead(x):\r\n dead = eval(str(x))['dead']\r\n return dead\r\ndef heal(x):\r\n heal = eval(str(x))['heal']\r\n return heal\r\n\r\ndef draw_map():\r\n \"\"\"\r\n china!\r\n \"\"\"\r\n data = catch_data()\r\n dict_keys = data.keys()\r\n # China\r\n lastUpdateTime = data['lastUpdateTime']\r\n chinaTotal = data['chinaTotal']\r\n chinaAdd = data['chinaAdd']\r\n #结果{'confirm': 84970, 'heal': 79963, 'dead': 4645, 'nowConfirm': 362, 'suspect': 11, \r\n #'nowSevere': 13, 'importedCase': 1868, 'noInfect': 108}\r\n areaTree = data['areaTree']\r\n china_data = areaTree[0]['children']\r\n china_list = []\r\n for a in range(len(china_data)):\r\n province = china_data[a]['name']\r\n province_list = china_data[a]['children']\r\n for b in range(len(province_list)):\r\n city = province_list[b]['name']\r\n total = province_list[b]['total']\r\n today = province_list[b]['today']\r\n china_dict = {}\r\n china_dict['province'] = province\r\n china_dict['city'] = city\r\n china_dict['total'] = total\r\n china_dict['today'] = today\r\n china_list.append(china_dict)\r\n china_data = pd.DataFrame(china_list)\r\n china_data.head()\r\n \r\n # 函数映射\r\n china_data['confirm'] = china_data['total'].map(confirm)\r\n china_data['suspect'] = china_data['total'].map(suspect)\r\n china_data['dead'] = china_data['total'].map(dead)\r\n china_data['heal'] = china_data['total'].map(heal)\r\n china_data['addconfirm'] = china_data['today'].map(confirm)\r\n #['addsuspect'] = china_data['today'].map(suspect)\r\n #china_data['adddead'] = china_data['today'].map(dead)\r\n #china_data['addheal'] = china_data['today'].map(heal)\r\n china_data = china_data[[\"province\",\"city\",\"confirm\",\"suspect\",\"dead\",\"heal\",\"addconfirm\"]]\r\n china_data.head()\r\n\r\n\r\n\r\n total_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width = '900px',height ='350px')) #设置主题,和画布大小\r\n total_pie.add(\"\",[list(z) for z in zip(chinaTotal.keys(), chinaTotal.values())],\r\n center=[\"50%\", \"70%\"], #图的位置\r\n radius=[50, 80]) #内外径大小\r\n total_pie.set_global_opts(\r\n title_opts=opts.TitleOpts(title=\"全国总量\",subtitle=(\"截止\"+lastUpdateTime)))\r\n total_pie.set_series_opts(label_opts=opts.LabelOpts(formatter=\"{c}\")) #标签格式\r\n total_pie.render_notebook()\r\n\r\n totaladd_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width = '900px',height ='350px')) #设置主题,和画布大小\r\n totaladd_pie.add(\"\",[list(z) for z in zip(chinaAdd.keys(), chinaAdd.values())],\r\n center=[\"50%\", \"50%\"],\r\n radius=[50, 80])\r\n totaladd_pie.set_global_opts(\r\n title_opts=opts.TitleOpts(title=\"昨日新增\"))\r\n totaladd_pie.set_series_opts(label_opts=opts.LabelOpts(formatter=\"{c}\")) #标签格式\r\n totaladd_pie.render_notebook()\r\n\r\n area_data = china_data.groupby(\"province\")[\"confirm\"].sum().reset_index()\r\n area_data.columns = [\"province\",\"confirm\"]\r\n area_map = Map(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS))\r\n area_map.add(\"\",[list(z) for z in zip(list(area_data[\"province\"]), list(area_data[\"confirm\"]))], \"china\",is_map_symbol_show=False)\r\n area_map.set_global_opts(title_opts=opts.TitleOpts(title=\"2019_nCoV中国疫情地图\"),visualmap_opts=opts.VisualMapOpts(is_piecewise=True,\r\n pieces = [\r\n {\"min\": 1001 , \"label\": '>1000',\"color\": \"#893448\"}, #不指定 max,表示 max 为无限大\r\n {\"min\": 500, \"max\": 1000, \"label\": '500-1000',\"color\": \"#ff585e\"},\r\n {\"min\": 101, \"max\": 499, \"label\": '101-499',\"color\": \"#fb8146\"},\r\n {\"min\": 10, \"max\": 100, \"label\": '10-100',\"color\": \"#ffb248\"},\r\n {\"min\": 0, \"max\": 9, \"label\": '0-9',\"color\" : \"#fff2d1\" }]))\r\n area_map.render_notebook()\r\n\r\n\r\n page = Page()\r\n page.add(total_pie)\r\n page.add(totaladd_pie)\r\n page.add(area_map)\r\n\r\n\r\n \"\"\"\r\n 绘制世界地图\r\n 遇到一个很神奇的问题:\r\n 两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据\r\n :return:\r\n \"\"\"\r\n\r\n # 修复注释中的问题,原因是 confirmed_count 中的 int 是 numpy 的 int ,需转化为 python 中的 int\r\n # 感谢公众号的 @李康伟 同学提出\r\n countrys_names, confirmed_count = read_csv()\r\n confirmed_count_list = []\r\n for item in confirmed_count:\r\n confirmed_count_list.append(int(item))\r\n\r\n # countrys_names = ['United States', 'Brazil', 'Russia', 'Spain', 'United Kingdom', 'Italy', 'France', 'Germany', 'Turkey', 'Iran', 'India', 'Peru', 'Canada', 'Saudi Arabia', 'Mexico', 'Chile', 'Belgium', 'Pakistan', 'Netherlands', 'Qatar', 'Ecuador', 'Belarus', 'Sweden', 'Bangladesh', 'Singapore Rep.', 'Switzerland', 'Portugal', 'United Arab Emirates', 'Ireland', 'Indonesia', 'South Africa', 'Poland', 'Ukraine', 'Kuwait', 'Colombia', 'Romania', 'Israel', 'Japan', 'Egypt', 'Austria', 'Dominican Rep.', 'Philippines', 'Denmark', 'Argentina', 'Korea', 'Serbia', 'Panama', 'Afghanistan', 'Czech Rep.', 'Norway', 'Kazakhstan', 'Algeria', 'Nigeria', 'Morocco', 'Oman', 'Malaysia', 'Australia', 'Moldova', 'Ghana', 'Finland', 'Armenia', 'Bolivia', 'Cameroon', 'Iraq', 'Luxembourg', 'Azerbaijan', 'Honduras', 'Hungary', 'Sudan', 'Guinea', 'Uzbekistan', 'Guatemala', 'Thailand', 'Senegal', 'Greece', 'Tajikistan', 'Bulgaria', \"Côte d'Ivoire\", 'Djibouti', 'Croatia', 'Gabon', 'Cuba', 'Estonia', 'El Salvador', 'Iceland', 'Lithuania', 'Somalia', 'New Zealand', 'Slovakia', 'Slovenia', 'Kyrgyzstan', 'Kenya', 'Guinea Bissau', 'Lebanon', 'Sri Lanka', 'Tunisia', 'Latvia', 'Mali', 'Venezuela', 'Albania', 'Eq. Guinea', 'Niger', 'Cyprus', 'Zambia', 'Costa Rica', 'Haiti', 'Paraguay', 'Burkina Faso', 'Uruguay', 'Georgia', 'Jordan', 'Chad', 'Sierra Leone', 'Nepal', 'Jamaica', 'Tanzania', 'Ethiopia', 'Madagascar', 'Palestine', 'Togo', 'Vietnam', 'Rwanda', 'Montenegro', 'Nicaragua', 'Liberia', 'Swaziland', 'Mauritania', 'Yemen', 'Myanmar', 'Uganda', 'Mozambique', 'Mongolia', 'Brunei', 'Benin', 'Guyana', 'Cambodia', 'The Bahamas', 'Malawi', 'Libya', 'Syria', 'Angola', 'Zimbabwe', 'Burundi', 'Eritrea', 'Botswana', 'Gambia', 'Bhutan', 'East Timor', 'Namibia', 'Lao PDR', 'Fiji', 'Belize', 'Suriname', 'Papua New Guinea', 'Lesotho']\r\n # \r\n # confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2]\r\n\r\n\r\n c = (\r\n Map()\r\n .add(\r\n \"确诊人数\",\r\n [list(z) for z in zip(countrys_names, confirmed_count_list)],\r\n is_map_symbol_show=False,\r\n maptype=\"world\",\r\n label_opts=opts.LabelOpts(is_show=False),\r\n itemstyle_opts=opts.ItemStyleOpts(color=\"rgb(49,60,72)\")\r\n )\r\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\r\n .set_global_opts(\r\n title_opts=opts.TitleOpts(title=\"全球 2019-nCoV 地图\"),\r\n visualmap_opts=opts.VisualMapOpts(max_=1700000),\r\n )\r\n #.render(\"map_world.html\")\r\n )\r\n page.add(c)\r\n page.render('covid-19 中国和世界数据.html')\r\n\r\n\r\n", "step-ids": [ 6, 7, 8, 9, 10 ] }
[ 6, 7, 8, 9, 10 ]
rule run_all: shell: ''' echo 'Hello World!' '''
normal
{ "blob_id": "c967a63d03f9f836d97ae917dba2a7bfb7a54a0e", "index": 9673, "step-1": "rule run_all:\n shell:\n '''\n echo 'Hello World!'\n '''\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import re import os import pandas as pd instruments_file = os.path.abspath("instruments.csv") input_names_file = os.path.abspath("names.txt") output_names_file = os.path.abspath("names.csv") inst_name_file = os.path.abspath("name_instrument.csv") reg_ex = '; |, |\\*|\n' name_header = ["first_name", "last_name"] def process_names(): """ Opening, reading name file and building name array. """ with open(input_names_file, 'r') as data: plaintext = data.read() name_array = plaintext.split('\n') # Final name list final_name_list = [] # Parsing different name formats and standardizing to create csv for name in name_array: if len(name.split(',')) == 2: temp_name_list = re.split(reg_ex, name) last_name = temp_name_list.pop() first_name = temp_name_list.pop() final_name_list.append(last_name + ',' + first_name) elif len(name.split(' ')) == 2: final_name_list.append(name.replace(' ', ',')) elif len(name.split(' ')) == 3: temp_name_list = re.split(' ', name) last_name = temp_name_list.pop() middle_name = temp_name_list.pop() first_name = temp_name_list.pop() final_name_list.append(first_name + ',' + middle_name + ' ' + last_name) else: final_name_list.append(name) # Writing final name list to a file with open(output_names_file, "w") as txt_file: txt_file.write("first_name,last_name" + "\n") for name in final_name_list: txt_file.write(name + "\n") # works with any number of elements in a line names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')
normal
{ "blob_id": "8c539dbbb762717393b9a71ddca8eb3872890854", "index": 288, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n final_name_list = []\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' +\n last_name)\n else:\n final_name_list.append(name)\n with open(output_names_file, 'w') as txt_file:\n txt_file.write('first_name,last_name' + '\\n')\n for name in final_name_list:\n txt_file.write(name + '\\n')\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',',\n engine='python')\n", "step-3": "<mask token>\ninstruments_file = os.path.abspath('instruments.csv')\ninput_names_file = os.path.abspath('names.txt')\noutput_names_file = os.path.abspath('names.csv')\ninst_name_file = os.path.abspath('name_instrument.csv')\nreg_ex = '; |, |\\\\*|\\n'\nname_header = ['first_name', 'last_name']\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n final_name_list = []\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' +\n last_name)\n else:\n final_name_list.append(name)\n with open(output_names_file, 'w') as txt_file:\n txt_file.write('first_name,last_name' + '\\n')\n for name in final_name_list:\n txt_file.write(name + '\\n')\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',',\n engine='python')\n", "step-4": "import re\nimport os\nimport pandas as pd\ninstruments_file = os.path.abspath('instruments.csv')\ninput_names_file = os.path.abspath('names.txt')\noutput_names_file = os.path.abspath('names.csv')\ninst_name_file = os.path.abspath('name_instrument.csv')\nreg_ex = '; |, |\\\\*|\\n'\nname_header = ['first_name', 'last_name']\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n final_name_list = []\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' +\n last_name)\n else:\n final_name_list.append(name)\n with open(output_names_file, 'w') as txt_file:\n txt_file.write('first_name,last_name' + '\\n')\n for name in final_name_list:\n txt_file.write(name + '\\n')\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',',\n engine='python')\n", "step-5": "import re\nimport os\nimport pandas as pd\n\ninstruments_file = os.path.abspath(\"instruments.csv\")\ninput_names_file = os.path.abspath(\"names.txt\")\noutput_names_file = os.path.abspath(\"names.csv\")\ninst_name_file = os.path.abspath(\"name_instrument.csv\")\nreg_ex = '; |, |\\\\*|\\n'\nname_header = [\"first_name\", \"last_name\"]\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n\n # Final name list\n final_name_list = []\n\n # Parsing different name formats and standardizing to create csv\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' + last_name)\n else:\n final_name_list.append(name)\n\n # Writing final name list to a file\n with open(output_names_file, \"w\") as txt_file:\n txt_file.write(\"first_name,last_name\" + \"\\n\")\n for name in final_name_list:\n txt_file.write(name + \"\\n\") # works with any number of elements in a line\n\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import json import sys import time # boardName pageNum indexNewest # Baseball 5000 5183 # Elephants 3500 3558 # Monkeys 3500 3672 # Lions 3300 3381 # Guardians 3500 3542 boardNameList = ["Baseball", "Elephants", "Monkeys", "Lions", "Guardians"] def loadData(filename): _data = json.loads(open(filename).read()) return _data def buildUserDict(userDict, _data, boardName): #各版發文數 發文總推數 發文總噓數 發文總->數 各版推文數 各板噓文數 各版->數 #article article_g article_b article_n g b n # userDict = dict() for article in _data: _user = article['b_作者'].split(" ")[0] if not _user in userDict: userDict[_user] = dict() if not boardName in userDict[_user]: userDict[_user][boardName] = {'article':0,'article_g':0,'article_b':0,'article_n':0,'g':0,'b':0,'n':0} userDict[_user][boardName]['article'] += 1 userDict[_user][boardName]['article_g'] += article['h_推文總數']['g'] userDict[_user][boardName]['article_b'] += article['h_推文總數']['b'] userDict[_user][boardName]['article_n'] += article['h_推文總數']['n'] responses = article['g_推文'] for res in responses: resUser = responses[res]['留言者'] if not resUser in userDict: userDict[resUser] = dict() if not boardName in userDict[resUser]: userDict[resUser][boardName] = {'article':0,'article_g':0,'article_b':0,'article_n':0,'g':0,'b':0,'n':0} if responses[res]['狀態'] == u'噓 ': userDict[resUser][boardName]['b'] += 1 elif responses[res]['狀態'] == u'推 ': userDict[resUser][boardName]['g'] += 1 else: userDict[resUser][boardName]['n'] += 1 return userDict def printFeature2File(userDict, filename): _file = open(filename, "w") json.dump(userDict,_file) _file.close() if __name__ == "__main__": # filename = str(sys.argv[1]) featureFileOut = str(sys.argv[1]) dataDir = "../data/" filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json','data-Elephants-3500-2017-06-29-03-30-22.json', 'data-Monkeys-3500-2017-06-29-03-31-55.json','data-Guardians-3500-2017-06-29-04-12-43.json', 'data-Lions-3300-2017-06-29-04-11-50.json'] #python3 extractFeatures.py ../data/userFeatureTest.json total_start = time.time() _start = time.time() userDict = dict() for index in range(len(filenameList)): print("Loading data from "+boardNameList[index]+" ...") _data = loadData(dataDir+filenameList[index]) print("number of articles : "+str(len(_data))) print("Cost time : "+str(time.time()-_start)+" secs") _start = time.time() print("Building user dict...") boardName = boardNameList[index] userDict = buildUserDict(userDict, _data, boardName) print("Total user number : "+str(len(userDict.keys()))) print("Cost time : "+str(time.time()-_start)+" secs") _start = time.time() print("Extract user features...") printFeature2File(userDict, featureFileOut) print("Cost time : "+str(time.time()-_start)+" secs") print("Total cost time : "+str(time.time()-total_start)+" secs") _start = time.time() # for dd in _data: # print("=====================================") # print(dd['b_作者'].split(" ")[0]) # print(dd['h_推文總數']['b']) # print(dd['h_推文總數']['g']) # print(dd['h_推文總數']['all']) # res = dd['g_推文'] # goodResList = list() # BooResList = list() # neutralResList = list() # for rr in res: # if res[rr]['狀態'] == u'噓 ': # BooResList.append(res[rr]['留言者']) # elif res[rr]['狀態'] == u'推 ': # goodResList.append(res[rr]['留言者']) # else: # neutralResList.append(res[rr]['留言者']) # print("噓"+str(BooResList)) # print("推"+str(goodResList)) # print("->"+str(neutralResList)) # print(_data[0]['c_標題']) # print(_data[0]['h_推文總數']) # print(_data[0]['g_推文'])
normal
{ "blob_id": "306240db8a1652fe7cd79808c40e4354c3158d3e", "index": 3434, "step-1": "<mask token>\n\n\ndef loadData(filename):\n _data = json.loads(open(filename).read())\n return _data\n\n\ndef buildUserDict(userDict, _data, boardName):\n for article in _data:\n _user = article['b_作者'].split(' ')[0]\n if not _user in userDict:\n userDict[_user] = dict()\n if not boardName in userDict[_user]:\n userDict[_user][boardName] = {'article': 0, 'article_g': 0,\n 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n userDict[_user][boardName]['article'] += 1\n userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']\n userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']\n userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']\n responses = article['g_推文']\n for res in responses:\n resUser = responses[res]['留言者']\n if not resUser in userDict:\n userDict[resUser] = dict()\n if not boardName in userDict[resUser]:\n userDict[resUser][boardName] = {'article': 0, 'article_g': \n 0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n if responses[res]['狀態'] == u'噓 ':\n userDict[resUser][boardName]['b'] += 1\n elif responses[res]['狀態'] == u'推 ':\n userDict[resUser][boardName]['g'] += 1\n else:\n userDict[resUser][boardName]['n'] += 1\n return userDict\n\n\ndef printFeature2File(userDict, filename):\n _file = open(filename, 'w')\n json.dump(userDict, _file)\n _file.close()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef loadData(filename):\n _data = json.loads(open(filename).read())\n return _data\n\n\ndef buildUserDict(userDict, _data, boardName):\n for article in _data:\n _user = article['b_作者'].split(' ')[0]\n if not _user in userDict:\n userDict[_user] = dict()\n if not boardName in userDict[_user]:\n userDict[_user][boardName] = {'article': 0, 'article_g': 0,\n 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n userDict[_user][boardName]['article'] += 1\n userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']\n userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']\n userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']\n responses = article['g_推文']\n for res in responses:\n resUser = responses[res]['留言者']\n if not resUser in userDict:\n userDict[resUser] = dict()\n if not boardName in userDict[resUser]:\n userDict[resUser][boardName] = {'article': 0, 'article_g': \n 0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n if responses[res]['狀態'] == u'噓 ':\n userDict[resUser][boardName]['b'] += 1\n elif responses[res]['狀態'] == u'推 ':\n userDict[resUser][boardName]['g'] += 1\n else:\n userDict[resUser][boardName]['n'] += 1\n return userDict\n\n\ndef printFeature2File(userDict, filename):\n _file = open(filename, 'w')\n json.dump(userDict, _file)\n _file.close()\n\n\nif __name__ == '__main__':\n featureFileOut = str(sys.argv[1])\n dataDir = '../data/'\n filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json',\n 'data-Elephants-3500-2017-06-29-03-30-22.json',\n 'data-Monkeys-3500-2017-06-29-03-31-55.json',\n 'data-Guardians-3500-2017-06-29-04-12-43.json',\n 'data-Lions-3300-2017-06-29-04-11-50.json']\n total_start = time.time()\n _start = time.time()\n userDict = dict()\n for index in range(len(filenameList)):\n print('Loading data from ' + boardNameList[index] + ' ...')\n _data = loadData(dataDir + filenameList[index])\n print('number of articles : ' + str(len(_data)))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Building user dict...')\n boardName = boardNameList[index]\n userDict = buildUserDict(userDict, _data, boardName)\n print('Total user number : ' + str(len(userDict.keys())))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Extract user features...')\n printFeature2File(userDict, featureFileOut)\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n print('Total cost time : ' + str(time.time() - total_start) + ' secs')\n _start = time.time()\n", "step-3": "<mask token>\nboardNameList = ['Baseball', 'Elephants', 'Monkeys', 'Lions', 'Guardians']\n\n\ndef loadData(filename):\n _data = json.loads(open(filename).read())\n return _data\n\n\ndef buildUserDict(userDict, _data, boardName):\n for article in _data:\n _user = article['b_作者'].split(' ')[0]\n if not _user in userDict:\n userDict[_user] = dict()\n if not boardName in userDict[_user]:\n userDict[_user][boardName] = {'article': 0, 'article_g': 0,\n 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n userDict[_user][boardName]['article'] += 1\n userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']\n userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']\n userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']\n responses = article['g_推文']\n for res in responses:\n resUser = responses[res]['留言者']\n if not resUser in userDict:\n userDict[resUser] = dict()\n if not boardName in userDict[resUser]:\n userDict[resUser][boardName] = {'article': 0, 'article_g': \n 0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n if responses[res]['狀態'] == u'噓 ':\n userDict[resUser][boardName]['b'] += 1\n elif responses[res]['狀態'] == u'推 ':\n userDict[resUser][boardName]['g'] += 1\n else:\n userDict[resUser][boardName]['n'] += 1\n return userDict\n\n\ndef printFeature2File(userDict, filename):\n _file = open(filename, 'w')\n json.dump(userDict, _file)\n _file.close()\n\n\nif __name__ == '__main__':\n featureFileOut = str(sys.argv[1])\n dataDir = '../data/'\n filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json',\n 'data-Elephants-3500-2017-06-29-03-30-22.json',\n 'data-Monkeys-3500-2017-06-29-03-31-55.json',\n 'data-Guardians-3500-2017-06-29-04-12-43.json',\n 'data-Lions-3300-2017-06-29-04-11-50.json']\n total_start = time.time()\n _start = time.time()\n userDict = dict()\n for index in range(len(filenameList)):\n print('Loading data from ' + boardNameList[index] + ' ...')\n _data = loadData(dataDir + filenameList[index])\n print('number of articles : ' + str(len(_data)))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Building user dict...')\n boardName = boardNameList[index]\n userDict = buildUserDict(userDict, _data, boardName)\n print('Total user number : ' + str(len(userDict.keys())))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Extract user features...')\n printFeature2File(userDict, featureFileOut)\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n print('Total cost time : ' + str(time.time() - total_start) + ' secs')\n _start = time.time()\n", "step-4": "import json\nimport sys\nimport time\nboardNameList = ['Baseball', 'Elephants', 'Monkeys', 'Lions', 'Guardians']\n\n\ndef loadData(filename):\n _data = json.loads(open(filename).read())\n return _data\n\n\ndef buildUserDict(userDict, _data, boardName):\n for article in _data:\n _user = article['b_作者'].split(' ')[0]\n if not _user in userDict:\n userDict[_user] = dict()\n if not boardName in userDict[_user]:\n userDict[_user][boardName] = {'article': 0, 'article_g': 0,\n 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n userDict[_user][boardName]['article'] += 1\n userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']\n userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']\n userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']\n responses = article['g_推文']\n for res in responses:\n resUser = responses[res]['留言者']\n if not resUser in userDict:\n userDict[resUser] = dict()\n if not boardName in userDict[resUser]:\n userDict[resUser][boardName] = {'article': 0, 'article_g': \n 0, 'article_b': 0, 'article_n': 0, 'g': 0, 'b': 0, 'n': 0}\n if responses[res]['狀態'] == u'噓 ':\n userDict[resUser][boardName]['b'] += 1\n elif responses[res]['狀態'] == u'推 ':\n userDict[resUser][boardName]['g'] += 1\n else:\n userDict[resUser][boardName]['n'] += 1\n return userDict\n\n\ndef printFeature2File(userDict, filename):\n _file = open(filename, 'w')\n json.dump(userDict, _file)\n _file.close()\n\n\nif __name__ == '__main__':\n featureFileOut = str(sys.argv[1])\n dataDir = '../data/'\n filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json',\n 'data-Elephants-3500-2017-06-29-03-30-22.json',\n 'data-Monkeys-3500-2017-06-29-03-31-55.json',\n 'data-Guardians-3500-2017-06-29-04-12-43.json',\n 'data-Lions-3300-2017-06-29-04-11-50.json']\n total_start = time.time()\n _start = time.time()\n userDict = dict()\n for index in range(len(filenameList)):\n print('Loading data from ' + boardNameList[index] + ' ...')\n _data = loadData(dataDir + filenameList[index])\n print('number of articles : ' + str(len(_data)))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Building user dict...')\n boardName = boardNameList[index]\n userDict = buildUserDict(userDict, _data, boardName)\n print('Total user number : ' + str(len(userDict.keys())))\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n _start = time.time()\n print('Extract user features...')\n printFeature2File(userDict, featureFileOut)\n print('Cost time : ' + str(time.time() - _start) + ' secs')\n print('Total cost time : ' + str(time.time() - total_start) + ' secs')\n _start = time.time()\n", "step-5": "import json\nimport sys\nimport time\n# boardName\tpageNum\tindexNewest\n# Baseball\t5000\t5183\n# Elephants\t3500\t3558\n# Monkeys\t3500\t3672\n# Lions\t3300\t3381\n# Guardians\t3500\t3542\n\nboardNameList = [\"Baseball\", \"Elephants\", \"Monkeys\", \"Lions\", \"Guardians\"]\ndef loadData(filename):\n\t_data = json.loads(open(filename).read())\n\treturn _data\n\ndef buildUserDict(userDict, _data, boardName):\n\t#各版發文數\t發文總推數\t發文總噓數\t發文總->數\t各版推文數\t各板噓文數\t各版->數\n\t#article\tarticle_g\tarticle_b\tarticle_n\tg \t\t\tb \t\t\tn \t\t\t\n\t# userDict = dict()\n\tfor article in _data:\n\t\t_user = article['b_作者'].split(\" \")[0] \n\t\tif not _user in userDict:\n\t\t\tuserDict[_user] = dict()\n\t\tif not boardName in userDict[_user]:\n\t\t\tuserDict[_user][boardName] = {'article':0,'article_g':0,'article_b':0,'article_n':0,'g':0,'b':0,'n':0}\n\t\t\n\t\tuserDict[_user][boardName]['article'] += 1\n\t\tuserDict[_user][boardName]['article_g'] += article['h_推文總數']['g']\n\t\tuserDict[_user][boardName]['article_b'] += article['h_推文總數']['b']\n\t\tuserDict[_user][boardName]['article_n'] += article['h_推文總數']['n']\n\t\tresponses = article['g_推文']\n\t\tfor res in responses:\n\t\t\tresUser = responses[res]['留言者']\n\t\t\tif not resUser in userDict:\n\t\t\t\tuserDict[resUser] = dict()\n\t\t\tif not boardName in userDict[resUser]:\n\t\t\t\tuserDict[resUser][boardName] = {'article':0,'article_g':0,'article_b':0,'article_n':0,'g':0,'b':0,'n':0}\n\n\t\t\tif responses[res]['狀態'] == u'噓 ':\n\t\t\t\tuserDict[resUser][boardName]['b'] += 1\n\t\t\telif responses[res]['狀態'] == u'推 ':\n\t\t\t\tuserDict[resUser][boardName]['g'] += 1\n\t\t\telse:\n\t\t\t\tuserDict[resUser][boardName]['n'] += 1\n\treturn userDict\ndef printFeature2File(userDict, filename):\n\t_file = open(filename, \"w\")\n\tjson.dump(userDict,_file)\n\t_file.close()\n\nif __name__ == \"__main__\": \n\t# filename = str(sys.argv[1])\n\tfeatureFileOut = str(sys.argv[1])\n\tdataDir = \"../data/\"\n\tfilenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json','data-Elephants-3500-2017-06-29-03-30-22.json',\n\t\t\t\t\t'data-Monkeys-3500-2017-06-29-03-31-55.json','data-Guardians-3500-2017-06-29-04-12-43.json',\n\t\t\t\t\t'data-Lions-3300-2017-06-29-04-11-50.json']\n\t#python3 extractFeatures.py ../data/userFeatureTest.json\n\ttotal_start = time.time()\n\t_start = time.time()\n\tuserDict = dict()\n\tfor index in range(len(filenameList)):\n\t\tprint(\"Loading data from \"+boardNameList[index]+\" ...\")\n\t\t_data = loadData(dataDir+filenameList[index])\n\t\tprint(\"number of articles : \"+str(len(_data)))\n\t\tprint(\"Cost time : \"+str(time.time()-_start)+\" secs\")\n\t\t_start = time.time()\n\n\t\tprint(\"Building user dict...\")\n\t\tboardName = boardNameList[index]\n\t\tuserDict = buildUserDict(userDict, _data, boardName)\n\t\tprint(\"Total user number : \"+str(len(userDict.keys())))\n\t\tprint(\"Cost time : \"+str(time.time()-_start)+\" secs\")\n\t\t_start = time.time()\n\n\tprint(\"Extract user features...\")\n\tprintFeature2File(userDict, featureFileOut)\n\tprint(\"Cost time : \"+str(time.time()-_start)+\" secs\")\n\tprint(\"Total cost time : \"+str(time.time()-total_start)+\" secs\")\n\t_start = time.time()\n\t\n\t# for dd in _data:\n\t# \tprint(\"=====================================\")\n\t# \tprint(dd['b_作者'].split(\" \")[0])\n\t# \tprint(dd['h_推文總數']['b'])\n\t# \tprint(dd['h_推文總數']['g'])\n\t# \tprint(dd['h_推文總數']['all'])\n\t# \tres = dd['g_推文']\n\t# \tgoodResList = list()\n\t# \tBooResList = list()\n\t# \tneutralResList = list()\n\t# \tfor rr in res:\n\t# \t\tif res[rr]['狀態'] == u'噓 ':\n\t# \t\t\tBooResList.append(res[rr]['留言者'])\n\t# \t\telif res[rr]['狀態'] == u'推 ':\n\t# \t\t\tgoodResList.append(res[rr]['留言者'])\n\t# \t\telse:\n\t# \t\t\tneutralResList.append(res[rr]['留言者'])\n\t# \tprint(\"噓\"+str(BooResList))\n\t# \tprint(\"推\"+str(goodResList))\n\t# \tprint(\"->\"+str(neutralResList))\n\t# print(_data[0]['c_標題'])\n\t# print(_data[0]['h_推文總數'])\n\t# print(_data[0]['g_推文'])", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from django.db import models #Precisa existir uma conversao ticker -> ticker_id mais facil, ou definir como trabalhar com o ticker.name, #na maioria dos casos só tenho o nome do ticker, nao o id. class User(models.Model): """ Usuario que pode operar ativos """ name = models.CharField(max_length=200) saldo = models.DecimalField(max_digits=12, decimal_places=3) def __unicode__(self): return "User(%s, %.3f)" %(self.name, self.saldo) class Stock(models.Model): """ Representa um ativo """ ticker = models.CharField(max_length=8, unique=True) name = models.CharField(max_length=200) def __unicode__(self): return "Stock(%s, %s)" %(self.ticker, self.name) class Order(models.Model): """ Ordem criada por um usuario, que vai para o book de um dado ativo """ ORDER_BUY = 'C' ORDER_SELL = 'V' STATUS_NEW = 'N' STATUS_OPEN = 'A' STATUS_PARTIAL = 'P' STATUS_FINALIZED = 'F' STATUS_CANCELLED = 'C' ORDER_TYPES = [ (ORDER_BUY, "Compra"), (ORDER_SELL, "Venda") ] ORDER_STATUS = [ (STATUS_NEW, "Nova"), (STATUS_OPEN, "Aberta"), (STATUS_PARTIAL, "Parcialmente Executada"), (STATUS_FINALIZED, "Finalizada"), (STATUS_CANCELLED, "Cancelada") ] user = models.ForeignKey(User) stock = models.ForeignKey(Stock) tipo = models.CharField(max_length=1, choices=ORDER_TYPES) status = models.CharField(max_length=1, choices=ORDER_STATUS, default=STATUS_NEW) original_qty = models.IntegerField() qty = models.IntegerField() value = models.DecimalField(max_digits=6, decimal_places=3) included = models.DateTimeField(auto_now_add = True) cancel_reason = models.CharField(max_length=255) def __unicode__(self): return "Order(%c, %d, %s, %s, %s | %s)" %(self.tipo, self.qty, self.stock.ticker, self.value, self.user.name, self.status) class PortfolioItem(models.Model): """ Representa um ativo em uma custódia """ user = models.ForeignKey(User) stock = models.ForeignKey(Stock) qty = models.IntegerField() def __unicode__(self): return "PortfolioItem(%s, %s, %d)" %(self.user.name, self.stock.ticker, self.qty) class Historical(models.Model): """ Registra uma negociacao efetuada """ stock = models.ForeignKey(Stock) qty = models.IntegerField() value = models.DecimalField(max_digits=6, decimal_places=3) user_buy = models.ForeignKey(User, related_name='buy_historical') user_sell = models.ForeignKey(User, related_name='sell_historical') timestamp = models.DateTimeField(auto_now_add = True) def __unicode__(self): return "Historical(%s, %d, %s, %s, %s)" %\ (self.stock.ticker, self.qty, self.value, self.user_buy.name, self.user_sell.name)
normal
{ "blob_id": "13e7484a80e4e45ee911f15837b9d82a1ef4d0b1", "index": 7259, "step-1": "from django.db import models\r\n\r\n#Precisa existir uma conversao ticker -> ticker_id mais facil, ou definir como trabalhar com o ticker.name,\r\n#na maioria dos casos só tenho o nome do ticker, nao o id.\r\n\r\nclass User(models.Model):\r\n \"\"\" Usuario que pode operar ativos \"\"\"\r\n name = models.CharField(max_length=200)\r\n saldo = models.DecimalField(max_digits=12, decimal_places=3)\r\n \r\n def __unicode__(self):\r\n return \"User(%s, %.3f)\" %(self.name, self.saldo)\r\n\r\nclass Stock(models.Model):\r\n \"\"\" Representa um ativo \"\"\"\r\n ticker = models.CharField(max_length=8, unique=True)\r\n name = models.CharField(max_length=200)\r\n \r\n def __unicode__(self):\r\n return \"Stock(%s, %s)\" %(self.ticker, self.name)\r\n\r\nclass Order(models.Model):\r\n \"\"\" Ordem criada por um usuario, que vai para o book de um dado ativo \"\"\"\r\n ORDER_BUY = 'C'\r\n ORDER_SELL = 'V'\r\n \r\n STATUS_NEW = 'N'\r\n STATUS_OPEN = 'A'\r\n STATUS_PARTIAL = 'P'\r\n STATUS_FINALIZED = 'F'\r\n STATUS_CANCELLED = 'C'\r\n \r\n ORDER_TYPES = [ \r\n (ORDER_BUY, \"Compra\"),\r\n (ORDER_SELL, \"Venda\") ]\r\n ORDER_STATUS = [\r\n (STATUS_NEW, \"Nova\"),\r\n (STATUS_OPEN, \"Aberta\"),\r\n (STATUS_PARTIAL, \"Parcialmente Executada\"),\r\n (STATUS_FINALIZED, \"Finalizada\"),\r\n (STATUS_CANCELLED, \"Cancelada\") ]\r\n \r\n user = models.ForeignKey(User)\r\n stock = models.ForeignKey(Stock)\r\n \r\n tipo = models.CharField(max_length=1, choices=ORDER_TYPES) \r\n status = models.CharField(max_length=1, choices=ORDER_STATUS, default=STATUS_NEW) \r\n original_qty = models.IntegerField()\r\n qty = models.IntegerField()\r\n value = models.DecimalField(max_digits=6, decimal_places=3)\r\n included = models.DateTimeField(auto_now_add = True)\r\n \r\n cancel_reason = models.CharField(max_length=255)\r\n \r\n def __unicode__(self):\r\n return \"Order(%c, %d, %s, %s, %s | %s)\" %(self.tipo, self.qty, self.stock.ticker, self.value, self.user.name, self.status)\r\n\r\nclass PortfolioItem(models.Model):\r\n \"\"\" Representa um ativo em uma custódia \"\"\"\r\n user = models.ForeignKey(User)\r\n stock = models.ForeignKey(Stock)\r\n qty = models.IntegerField()\r\n \r\n def __unicode__(self):\r\n return \"PortfolioItem(%s, %s, %d)\" %(self.user.name, self.stock.ticker, self.qty)\r\n \r\nclass Historical(models.Model):\r\n \"\"\" Registra uma negociacao efetuada \"\"\"\r\n stock = models.ForeignKey(Stock)\r\n qty = models.IntegerField()\r\n value = models.DecimalField(max_digits=6, decimal_places=3)\r\n user_buy = models.ForeignKey(User, related_name='buy_historical')\r\n user_sell = models.ForeignKey(User, related_name='sell_historical')\r\n timestamp = models.DateTimeField(auto_now_add = True)\r\n \r\n def __unicode__(self):\r\n return \"Historical(%s, %d, %s, %s, %s)\" %\\\r\n (self.stock.ticker, self.qty, self.value, self.user_buy.name, self.user_sell.name)\r\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from django.db import models from django.template.defaultfilters import slugify # Create your models here. class SlugStampMixin(object): ''' An Worflow is an ordered collection of a Protocols ''' def save(self, *args, **kwargs): super(SlugStampMixin, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name. new_slug = self.generate_slug() if not new_slug == self.slug: # Triggered when its a clone method self.slug = new_slug super(SlugStampMixin, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name. def generate_slug(self): slug = slugify(self.name) if self.pk: return "%d-%s" % (self.pk, slug) else: return slug
normal
{ "blob_id": "c30f11e9bac54771df5198971c312624f68d0a33", "index": 4259, "step-1": "<mask token>\n\n\nclass SlugStampMixin(object):\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass SlugStampMixin(object):\n <mask token>\n\n def save(self, *args, **kwargs):\n super(SlugStampMixin, self).save(*args, **kwargs)\n new_slug = self.generate_slug()\n if not new_slug == self.slug:\n self.slug = new_slug\n super(SlugStampMixin, self).save(*args, **kwargs)\n\n def generate_slug(self):\n slug = slugify(self.name)\n if self.pk:\n return '%d-%s' % (self.pk, slug)\n else:\n return slug\n", "step-3": "<mask token>\n\n\nclass SlugStampMixin(object):\n \"\"\"\n An Worflow is an ordered collection of a Protocols\n \"\"\"\n\n def save(self, *args, **kwargs):\n super(SlugStampMixin, self).save(*args, **kwargs)\n new_slug = self.generate_slug()\n if not new_slug == self.slug:\n self.slug = new_slug\n super(SlugStampMixin, self).save(*args, **kwargs)\n\n def generate_slug(self):\n slug = slugify(self.name)\n if self.pk:\n return '%d-%s' % (self.pk, slug)\n else:\n return slug\n", "step-4": "from django.db import models\nfrom django.template.defaultfilters import slugify\n\n\nclass SlugStampMixin(object):\n \"\"\"\n An Worflow is an ordered collection of a Protocols\n \"\"\"\n\n def save(self, *args, **kwargs):\n super(SlugStampMixin, self).save(*args, **kwargs)\n new_slug = self.generate_slug()\n if not new_slug == self.slug:\n self.slug = new_slug\n super(SlugStampMixin, self).save(*args, **kwargs)\n\n def generate_slug(self):\n slug = slugify(self.name)\n if self.pk:\n return '%d-%s' % (self.pk, slug)\n else:\n return slug\n", "step-5": "from django.db import models\nfrom django.template.defaultfilters import slugify\n\n# Create your models here.\n\nclass SlugStampMixin(object):\n '''\n An Worflow is an ordered collection of a Protocols\n '''\n\n def save(self, *args, **kwargs):\n super(SlugStampMixin, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name.\n \n new_slug = self.generate_slug()\n\n if not new_slug == self.slug: # Triggered when its a clone method\n self.slug = new_slug\n super(SlugStampMixin, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name.\n\n\n def generate_slug(self):\n slug = slugify(self.name)\n\n if self.pk:\n return \"%d-%s\" % (self.pk, slug)\n else:\n return slug\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
#!/usr/bin/python3 def file_to_code(fname): mem = [] for line in open(fname,"r"): mem.extend([int(i) for i in line.split(",")]) return mem class Opcode(object): def __init__(self, mem, ptr, code, inc): """ >>> o = Opcode([1001, 2, 4, 1], 0, 1, 4) >>> o._Opcode__par_modes [0, 1] """ if mem[ptr]%100 != code: raise Exception("Creating Opcode%d for opcode %d"%(code, mem[ptr])) self.memory = mem self.ptr = ptr self.__par_modes = list(reversed([int(i) for i in str(int(mem[ptr]/100))])) self.__ptr_inc = inc def ptr_inc(self): return self.__ptr_inc def get_val(self, arg_idx): """ >>> o = Opcode([1001, 2, 4, 1], 0, 1, 4) >>> o.get_val(1) 4 >>> o.get_val(2) 4 >>> o.get_val(3) 2 """ idx = arg_idx-1 if idx >= len(self.__par_modes) or self.__par_modes[idx] == 0: return self.memory[self.memory[self.ptr+arg_idx]] elif self.__par_modes[idx] == 1: return self.memory[self.ptr + arg_idx] def set_ptr(self): return False,0 def reads(self): raise Exception("Call to base class reads()") def writes(self): raise Exception("Call to base class writes()") def op(self): raise Exception("Call to base class op()") def params(self): raise Exception("Call to base class params()") def run(self): raise Exception("Call to base class run()") class Opcode1(Opcode): """ >>> o = Opcode1([101, 2, 1, 3], 0) >>> o.run() True >>> o.memory [101, 2, 1, 4] """ def __init__(self, mem, ptr): super().__init__(mem, ptr, 1, 4) self.__first = self.get_val(1) self.__second = self.get_val(2) self.__res = mem[ptr+3] def run(self): self.memory[self.__res] = self.__first + self.__second return True def params(self): return {'noun':self.__first, 'verb':self.__second, 'result':self.__res} def reads(self): return [self.__first, self.__second] def writes(self): return self.__res def op(self): return "+" def __str__(self): return "loc[%d] = %d + %d"%(self.__res,self.__first,self.__second) class Opcode2(Opcode): """ >>> o = Opcode2([2, 2, 3, 4, 99], 0) >>> o.run() True >>> o.memory [2, 2, 3, 4, 12] """ def __init__(self, mem, ptr): super().__init__(mem, ptr, 2, 4) self.__first = self.get_val(1) self.__second = self.get_val(2) self.__res = mem[ptr+3] def run(self): self.memory[self.__res] = self.__first * self.__second return True def params(self): return {'noun':self.__first, 'verb':self.__second, 'result':self.__res} def reads(self): return [self.__first, self.__second] def writes(self): return self.__res def op(self): return "*" def __str__(self): return "loc[%d] = %d * %d"%(self.__res,self.__first,self.__second) class Opcode99(Opcode): """ >>> o = Opcode99([99,12,3,4,5], 0) >>> o.run() False """ def __init__(self, mem, ptr): super().__init__(mem, ptr, 99, 1) def run(self): return False def params(self): return {} def reads(self): return [] def writes(self): return None def op(self): return "HALT" def __str__(self): return "HALT" def default_ops(): return {1:Opcode1,2:Opcode2,99:Opcode99} class Interpreter(object): def __init__(self, input_code, ops=default_ops()): self.__memory = input_code self.__ops = ops self.__ptr = 0 self.__running = True self.length = len(self.__memory) def stepi(self): o = None if self.__running: o = self.next_op() self.__running = o.run() chk,val = o.set_ptr() if chk: self.__ptr = val else: self.__ptr += o.ptr_inc() return o def run(self): while self.__running: self.stepi() def inspect(self,loc): return self.__memory[loc] def next_op(self): return self.op_at(self.__ptr) def op_at(self, ptr): return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr) def __str__(self): strs = [] for i,v in enumerate(self.__memory): if i == self.__ptr: strs.append("{:*>4}".format(v)) else: strs.append("{:>4}".format(v)) return ",".join(strs) + "\n" + "Next:\n\t" + str(self.next_op()) def poke(self,loc,val): self.__memory[loc] = val def rebind(self,code,call): self.__ops[code] = call def as_opcodes(self): ops = [self.op_at(0)] ptr = ops[-1].ptr_inc() while ops[-1].op() != "HALT": ops.append(self.op_at(ptr)) ptr += ops[-1].ptr_inc() return ops class ValueNode(object): def __init__(self,val,tag=''): self.__val = val self.__tag = tag def __str__(self): return self.__tag + str(self.__val) class OpNode(object): def __init__(self,op,depends): self.__op = op self.__depends = depends def __str__(self): return "(" + self.__op.op().join([str(i) for i in self.__depends]) + ")" class OpcodeTreeBuilder(object): def __init__(self, interp): self.__interpreter = interp self.__codes = interp.as_opcodes() def construct_mappings(self): for i in self.__codes: params = i.params() if 'result' in params.keys(): if params['result'] not in self.__writes_to.keys(): self.__writes_to[params['result']] = [] self.__writes_to[params['result']].append(i) if 'noun' in params.keys(): if params['noun'] not in self.__reads_from.keys(): self.__reads_from[params['noun']] = [] self.__reads_from[params['noun']].append(i) if 'verb' in params.keys(): if params['verb'] not in self.__reads_from.keys(): self.__reads_from[params['verb']] = [] self.__reads_from[params['verb']].append(i) def construct_graph(self): op = self.__interpreter.op_at(0) reads = [ValueNode(self.__interpreter.inspect(i),tag="raw%d_"%(i)) for i in op.reads()] writes = op.writes() base = OpNode(op,reads) ptr = op.ptr_inc() last_write = {} if writes: last_write[writes] = base while op.op() != "HALT": op = self.__interpreter.op_at(ptr) if op.op() == "HALT": break depends = [] for i in op.reads(): if i in last_write.keys(): depends.append(last_write[i]) else: depends.append(ValueNode(self.__interpreter.inspect(i))) base = OpNode(op,depends) if op.writes(): last_write[op.writes()] = base ptr += op.ptr_inc() return base if __name__=='__main__': import doctest doctest.testmod() ################################################# # i = Interpreter(file_to_code("day2_input.txt")) # i.run() # i.inspect(0)
normal
{ "blob_id": "653e65281984ebb06467aeadb6f0e2b11f1bcb4d", "index": 496, "step-1": "<mask token>\n\n\nclass Opcode1(Opcode):\n <mask token>\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 1, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first + self.__second\n return True\n <mask token>\n <mask token>\n <mask token>\n\n def op(self):\n return '+'\n <mask token>\n\n\nclass Opcode2(Opcode):\n \"\"\"\n\t>>> o = Opcode2([2, 2, 3, 4, 99], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[2, 2, 3, 4, 12]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 2, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first * self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '*'\n\n def __str__(self):\n return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode99(Opcode):\n \"\"\"\n\t>>> o = Opcode99([99,12,3,4,5], 0)\n\t>>> o.run()\n\tFalse\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 99, 1)\n\n def run(self):\n return False\n\n def params(self):\n return {}\n\n def reads(self):\n return []\n\n def writes(self):\n return None\n\n def op(self):\n return 'HALT'\n\n def __str__(self):\n return 'HALT'\n\n\n<mask token>\n\n\nclass Interpreter(object):\n\n def __init__(self, input_code, ops=default_ops()):\n self.__memory = input_code\n self.__ops = ops\n self.__ptr = 0\n self.__running = True\n self.length = len(self.__memory)\n\n def stepi(self):\n o = None\n if self.__running:\n o = self.next_op()\n self.__running = o.run()\n chk, val = o.set_ptr()\n if chk:\n self.__ptr = val\n else:\n self.__ptr += o.ptr_inc()\n return o\n\n def run(self):\n while self.__running:\n self.stepi()\n\n def inspect(self, loc):\n return self.__memory[loc]\n\n def next_op(self):\n return self.op_at(self.__ptr)\n\n def op_at(self, ptr):\n return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)\n\n def __str__(self):\n strs = []\n for i, v in enumerate(self.__memory):\n if i == self.__ptr:\n strs.append('{:*>4}'.format(v))\n else:\n strs.append('{:>4}'.format(v))\n return ','.join(strs) + '\\n' + 'Next:\\n\\t' + str(self.next_op())\n\n def poke(self, loc, val):\n self.__memory[loc] = val\n\n def rebind(self, code, call):\n self.__ops[code] = call\n\n def as_opcodes(self):\n ops = [self.op_at(0)]\n ptr = ops[-1].ptr_inc()\n while ops[-1].op() != 'HALT':\n ops.append(self.op_at(ptr))\n ptr += ops[-1].ptr_inc()\n return ops\n\n\nclass ValueNode(object):\n\n def __init__(self, val, tag=''):\n self.__val = val\n self.__tag = tag\n\n def __str__(self):\n return self.__tag + str(self.__val)\n\n\nclass OpNode(object):\n\n def __init__(self, op, depends):\n self.__op = op\n self.__depends = depends\n\n def __str__(self):\n return '(' + self.__op.op().join([str(i) for i in self.__depends]\n ) + ')'\n\n\nclass OpcodeTreeBuilder(object):\n\n def __init__(self, interp):\n self.__interpreter = interp\n self.__codes = interp.as_opcodes()\n\n def construct_mappings(self):\n for i in self.__codes:\n params = i.params()\n if 'result' in params.keys():\n if params['result'] not in self.__writes_to.keys():\n self.__writes_to[params['result']] = []\n self.__writes_to[params['result']].append(i)\n if 'noun' in params.keys():\n if params['noun'] not in self.__reads_from.keys():\n self.__reads_from[params['noun']] = []\n self.__reads_from[params['noun']].append(i)\n if 'verb' in params.keys():\n if params['verb'] not in self.__reads_from.keys():\n self.__reads_from[params['verb']] = []\n self.__reads_from[params['verb']].append(i)\n\n def construct_graph(self):\n op = self.__interpreter.op_at(0)\n reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for\n i in op.reads()]\n writes = op.writes()\n base = OpNode(op, reads)\n ptr = op.ptr_inc()\n last_write = {}\n if writes:\n last_write[writes] = base\n while op.op() != 'HALT':\n op = self.__interpreter.op_at(ptr)\n if op.op() == 'HALT':\n break\n depends = []\n for i in op.reads():\n if i in last_write.keys():\n depends.append(last_write[i])\n else:\n depends.append(ValueNode(self.__interpreter.inspect(i)))\n base = OpNode(op, depends)\n if op.writes():\n last_write[op.writes()] = base\n ptr += op.ptr_inc()\n return base\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Opcode1(Opcode):\n <mask token>\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 1, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first + self.__second\n return True\n <mask token>\n <mask token>\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '+'\n <mask token>\n\n\nclass Opcode2(Opcode):\n \"\"\"\n\t>>> o = Opcode2([2, 2, 3, 4, 99], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[2, 2, 3, 4, 12]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 2, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first * self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '*'\n\n def __str__(self):\n return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode99(Opcode):\n \"\"\"\n\t>>> o = Opcode99([99,12,3,4,5], 0)\n\t>>> o.run()\n\tFalse\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 99, 1)\n\n def run(self):\n return False\n\n def params(self):\n return {}\n\n def reads(self):\n return []\n\n def writes(self):\n return None\n\n def op(self):\n return 'HALT'\n\n def __str__(self):\n return 'HALT'\n\n\n<mask token>\n\n\nclass Interpreter(object):\n\n def __init__(self, input_code, ops=default_ops()):\n self.__memory = input_code\n self.__ops = ops\n self.__ptr = 0\n self.__running = True\n self.length = len(self.__memory)\n\n def stepi(self):\n o = None\n if self.__running:\n o = self.next_op()\n self.__running = o.run()\n chk, val = o.set_ptr()\n if chk:\n self.__ptr = val\n else:\n self.__ptr += o.ptr_inc()\n return o\n\n def run(self):\n while self.__running:\n self.stepi()\n\n def inspect(self, loc):\n return self.__memory[loc]\n\n def next_op(self):\n return self.op_at(self.__ptr)\n\n def op_at(self, ptr):\n return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)\n\n def __str__(self):\n strs = []\n for i, v in enumerate(self.__memory):\n if i == self.__ptr:\n strs.append('{:*>4}'.format(v))\n else:\n strs.append('{:>4}'.format(v))\n return ','.join(strs) + '\\n' + 'Next:\\n\\t' + str(self.next_op())\n\n def poke(self, loc, val):\n self.__memory[loc] = val\n\n def rebind(self, code, call):\n self.__ops[code] = call\n\n def as_opcodes(self):\n ops = [self.op_at(0)]\n ptr = ops[-1].ptr_inc()\n while ops[-1].op() != 'HALT':\n ops.append(self.op_at(ptr))\n ptr += ops[-1].ptr_inc()\n return ops\n\n\nclass ValueNode(object):\n\n def __init__(self, val, tag=''):\n self.__val = val\n self.__tag = tag\n\n def __str__(self):\n return self.__tag + str(self.__val)\n\n\nclass OpNode(object):\n\n def __init__(self, op, depends):\n self.__op = op\n self.__depends = depends\n\n def __str__(self):\n return '(' + self.__op.op().join([str(i) for i in self.__depends]\n ) + ')'\n\n\nclass OpcodeTreeBuilder(object):\n\n def __init__(self, interp):\n self.__interpreter = interp\n self.__codes = interp.as_opcodes()\n\n def construct_mappings(self):\n for i in self.__codes:\n params = i.params()\n if 'result' in params.keys():\n if params['result'] not in self.__writes_to.keys():\n self.__writes_to[params['result']] = []\n self.__writes_to[params['result']].append(i)\n if 'noun' in params.keys():\n if params['noun'] not in self.__reads_from.keys():\n self.__reads_from[params['noun']] = []\n self.__reads_from[params['noun']].append(i)\n if 'verb' in params.keys():\n if params['verb'] not in self.__reads_from.keys():\n self.__reads_from[params['verb']] = []\n self.__reads_from[params['verb']].append(i)\n\n def construct_graph(self):\n op = self.__interpreter.op_at(0)\n reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for\n i in op.reads()]\n writes = op.writes()\n base = OpNode(op, reads)\n ptr = op.ptr_inc()\n last_write = {}\n if writes:\n last_write[writes] = base\n while op.op() != 'HALT':\n op = self.__interpreter.op_at(ptr)\n if op.op() == 'HALT':\n break\n depends = []\n for i in op.reads():\n if i in last_write.keys():\n depends.append(last_write[i])\n else:\n depends.append(ValueNode(self.__interpreter.inspect(i)))\n base = OpNode(op, depends)\n if op.writes():\n last_write[op.writes()] = base\n ptr += op.ptr_inc()\n return base\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Opcode(object):\n <mask token>\n\n def ptr_inc(self):\n return self.__ptr_inc\n\n def get_val(self, arg_idx):\n \"\"\"\n\t\t>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)\n\t\t>>> o.get_val(1)\n\t\t4\n\t\t>>> o.get_val(2)\n\t\t4\n\t\t>>> o.get_val(3)\n\t\t2\n\t\t\"\"\"\n idx = arg_idx - 1\n if idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:\n return self.memory[self.memory[self.ptr + arg_idx]]\n elif self.__par_modes[idx] == 1:\n return self.memory[self.ptr + arg_idx]\n\n def set_ptr(self):\n return False, 0\n\n def reads(self):\n raise Exception('Call to base class reads()')\n <mask token>\n <mask token>\n\n def params(self):\n raise Exception('Call to base class params()')\n\n def run(self):\n raise Exception('Call to base class run()')\n\n\nclass Opcode1(Opcode):\n \"\"\"\n\t>>> o = Opcode1([101, 2, 1, 3], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[101, 2, 1, 4]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 1, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first + self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '+'\n\n def __str__(self):\n return 'loc[%d] = %d + %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode2(Opcode):\n \"\"\"\n\t>>> o = Opcode2([2, 2, 3, 4, 99], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[2, 2, 3, 4, 12]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 2, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first * self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '*'\n\n def __str__(self):\n return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode99(Opcode):\n \"\"\"\n\t>>> o = Opcode99([99,12,3,4,5], 0)\n\t>>> o.run()\n\tFalse\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 99, 1)\n\n def run(self):\n return False\n\n def params(self):\n return {}\n\n def reads(self):\n return []\n\n def writes(self):\n return None\n\n def op(self):\n return 'HALT'\n\n def __str__(self):\n return 'HALT'\n\n\n<mask token>\n\n\nclass Interpreter(object):\n\n def __init__(self, input_code, ops=default_ops()):\n self.__memory = input_code\n self.__ops = ops\n self.__ptr = 0\n self.__running = True\n self.length = len(self.__memory)\n\n def stepi(self):\n o = None\n if self.__running:\n o = self.next_op()\n self.__running = o.run()\n chk, val = o.set_ptr()\n if chk:\n self.__ptr = val\n else:\n self.__ptr += o.ptr_inc()\n return o\n\n def run(self):\n while self.__running:\n self.stepi()\n\n def inspect(self, loc):\n return self.__memory[loc]\n\n def next_op(self):\n return self.op_at(self.__ptr)\n\n def op_at(self, ptr):\n return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)\n\n def __str__(self):\n strs = []\n for i, v in enumerate(self.__memory):\n if i == self.__ptr:\n strs.append('{:*>4}'.format(v))\n else:\n strs.append('{:>4}'.format(v))\n return ','.join(strs) + '\\n' + 'Next:\\n\\t' + str(self.next_op())\n\n def poke(self, loc, val):\n self.__memory[loc] = val\n\n def rebind(self, code, call):\n self.__ops[code] = call\n\n def as_opcodes(self):\n ops = [self.op_at(0)]\n ptr = ops[-1].ptr_inc()\n while ops[-1].op() != 'HALT':\n ops.append(self.op_at(ptr))\n ptr += ops[-1].ptr_inc()\n return ops\n\n\nclass ValueNode(object):\n\n def __init__(self, val, tag=''):\n self.__val = val\n self.__tag = tag\n\n def __str__(self):\n return self.__tag + str(self.__val)\n\n\nclass OpNode(object):\n\n def __init__(self, op, depends):\n self.__op = op\n self.__depends = depends\n\n def __str__(self):\n return '(' + self.__op.op().join([str(i) for i in self.__depends]\n ) + ')'\n\n\nclass OpcodeTreeBuilder(object):\n\n def __init__(self, interp):\n self.__interpreter = interp\n self.__codes = interp.as_opcodes()\n\n def construct_mappings(self):\n for i in self.__codes:\n params = i.params()\n if 'result' in params.keys():\n if params['result'] not in self.__writes_to.keys():\n self.__writes_to[params['result']] = []\n self.__writes_to[params['result']].append(i)\n if 'noun' in params.keys():\n if params['noun'] not in self.__reads_from.keys():\n self.__reads_from[params['noun']] = []\n self.__reads_from[params['noun']].append(i)\n if 'verb' in params.keys():\n if params['verb'] not in self.__reads_from.keys():\n self.__reads_from[params['verb']] = []\n self.__reads_from[params['verb']].append(i)\n\n def construct_graph(self):\n op = self.__interpreter.op_at(0)\n reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for\n i in op.reads()]\n writes = op.writes()\n base = OpNode(op, reads)\n ptr = op.ptr_inc()\n last_write = {}\n if writes:\n last_write[writes] = base\n while op.op() != 'HALT':\n op = self.__interpreter.op_at(ptr)\n if op.op() == 'HALT':\n break\n depends = []\n for i in op.reads():\n if i in last_write.keys():\n depends.append(last_write[i])\n else:\n depends.append(ValueNode(self.__interpreter.inspect(i)))\n base = OpNode(op, depends)\n if op.writes():\n last_write[op.writes()] = base\n ptr += op.ptr_inc()\n return base\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass Opcode(object):\n\n def __init__(self, mem, ptr, code, inc):\n \"\"\"\n\t\t>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)\n\t\t>>> o._Opcode__par_modes\n\t\t[0, 1]\n\t\t\"\"\"\n if mem[ptr] % 100 != code:\n raise Exception('Creating Opcode%d for opcode %d' % (code, mem[\n ptr]))\n self.memory = mem\n self.ptr = ptr\n self.__par_modes = list(reversed([int(i) for i in str(int(mem[ptr] /\n 100))]))\n self.__ptr_inc = inc\n\n def ptr_inc(self):\n return self.__ptr_inc\n\n def get_val(self, arg_idx):\n \"\"\"\n\t\t>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)\n\t\t>>> o.get_val(1)\n\t\t4\n\t\t>>> o.get_val(2)\n\t\t4\n\t\t>>> o.get_val(3)\n\t\t2\n\t\t\"\"\"\n idx = arg_idx - 1\n if idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:\n return self.memory[self.memory[self.ptr + arg_idx]]\n elif self.__par_modes[idx] == 1:\n return self.memory[self.ptr + arg_idx]\n\n def set_ptr(self):\n return False, 0\n\n def reads(self):\n raise Exception('Call to base class reads()')\n\n def writes(self):\n raise Exception('Call to base class writes()')\n <mask token>\n\n def params(self):\n raise Exception('Call to base class params()')\n\n def run(self):\n raise Exception('Call to base class run()')\n\n\nclass Opcode1(Opcode):\n \"\"\"\n\t>>> o = Opcode1([101, 2, 1, 3], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[101, 2, 1, 4]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 1, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first + self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '+'\n\n def __str__(self):\n return 'loc[%d] = %d + %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode2(Opcode):\n \"\"\"\n\t>>> o = Opcode2([2, 2, 3, 4, 99], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[2, 2, 3, 4, 12]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 2, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first * self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '*'\n\n def __str__(self):\n return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode99(Opcode):\n \"\"\"\n\t>>> o = Opcode99([99,12,3,4,5], 0)\n\t>>> o.run()\n\tFalse\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 99, 1)\n\n def run(self):\n return False\n\n def params(self):\n return {}\n\n def reads(self):\n return []\n\n def writes(self):\n return None\n\n def op(self):\n return 'HALT'\n\n def __str__(self):\n return 'HALT'\n\n\n<mask token>\n\n\nclass Interpreter(object):\n\n def __init__(self, input_code, ops=default_ops()):\n self.__memory = input_code\n self.__ops = ops\n self.__ptr = 0\n self.__running = True\n self.length = len(self.__memory)\n\n def stepi(self):\n o = None\n if self.__running:\n o = self.next_op()\n self.__running = o.run()\n chk, val = o.set_ptr()\n if chk:\n self.__ptr = val\n else:\n self.__ptr += o.ptr_inc()\n return o\n\n def run(self):\n while self.__running:\n self.stepi()\n\n def inspect(self, loc):\n return self.__memory[loc]\n\n def next_op(self):\n return self.op_at(self.__ptr)\n\n def op_at(self, ptr):\n return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)\n\n def __str__(self):\n strs = []\n for i, v in enumerate(self.__memory):\n if i == self.__ptr:\n strs.append('{:*>4}'.format(v))\n else:\n strs.append('{:>4}'.format(v))\n return ','.join(strs) + '\\n' + 'Next:\\n\\t' + str(self.next_op())\n\n def poke(self, loc, val):\n self.__memory[loc] = val\n\n def rebind(self, code, call):\n self.__ops[code] = call\n\n def as_opcodes(self):\n ops = [self.op_at(0)]\n ptr = ops[-1].ptr_inc()\n while ops[-1].op() != 'HALT':\n ops.append(self.op_at(ptr))\n ptr += ops[-1].ptr_inc()\n return ops\n\n\nclass ValueNode(object):\n\n def __init__(self, val, tag=''):\n self.__val = val\n self.__tag = tag\n\n def __str__(self):\n return self.__tag + str(self.__val)\n\n\nclass OpNode(object):\n\n def __init__(self, op, depends):\n self.__op = op\n self.__depends = depends\n\n def __str__(self):\n return '(' + self.__op.op().join([str(i) for i in self.__depends]\n ) + ')'\n\n\nclass OpcodeTreeBuilder(object):\n\n def __init__(self, interp):\n self.__interpreter = interp\n self.__codes = interp.as_opcodes()\n\n def construct_mappings(self):\n for i in self.__codes:\n params = i.params()\n if 'result' in params.keys():\n if params['result'] not in self.__writes_to.keys():\n self.__writes_to[params['result']] = []\n self.__writes_to[params['result']].append(i)\n if 'noun' in params.keys():\n if params['noun'] not in self.__reads_from.keys():\n self.__reads_from[params['noun']] = []\n self.__reads_from[params['noun']].append(i)\n if 'verb' in params.keys():\n if params['verb'] not in self.__reads_from.keys():\n self.__reads_from[params['verb']] = []\n self.__reads_from[params['verb']].append(i)\n\n def construct_graph(self):\n op = self.__interpreter.op_at(0)\n reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for\n i in op.reads()]\n writes = op.writes()\n base = OpNode(op, reads)\n ptr = op.ptr_inc()\n last_write = {}\n if writes:\n last_write[writes] = base\n while op.op() != 'HALT':\n op = self.__interpreter.op_at(ptr)\n if op.op() == 'HALT':\n break\n depends = []\n for i in op.reads():\n if i in last_write.keys():\n depends.append(last_write[i])\n else:\n depends.append(ValueNode(self.__interpreter.inspect(i)))\n base = OpNode(op, depends)\n if op.writes():\n last_write[op.writes()] = base\n ptr += op.ptr_inc()\n return base\n\n\n<mask token>\n", "step-5": "#!/usr/bin/python3\n\ndef file_to_code(fname):\n\tmem = []\n\tfor line in open(fname,\"r\"):\n\t\tmem.extend([int(i) for i in line.split(\",\")])\n\treturn mem\n\nclass Opcode(object):\n\tdef __init__(self, mem, ptr, code, inc):\n\t\t\"\"\"\n\t\t>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)\n\t\t>>> o._Opcode__par_modes\n\t\t[0, 1]\n\t\t\"\"\"\n\t\tif mem[ptr]%100 != code:\n\t\t\traise Exception(\"Creating Opcode%d for opcode %d\"%(code, mem[ptr]))\n\t\tself.memory = mem\n\t\tself.ptr = ptr\n\t\tself.__par_modes = list(reversed([int(i) for i in str(int(mem[ptr]/100))]))\n\t\tself.__ptr_inc = inc\n\n\tdef ptr_inc(self):\n\t\treturn self.__ptr_inc\n\n\tdef get_val(self, arg_idx):\n\t\t\"\"\"\n\t\t>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)\n\t\t>>> o.get_val(1)\n\t\t4\n\t\t>>> o.get_val(2)\n\t\t4\n\t\t>>> o.get_val(3)\n\t\t2\n\t\t\"\"\"\n\t\tidx = arg_idx-1\n\t\tif idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:\n\t\t\treturn self.memory[self.memory[self.ptr+arg_idx]]\n\t\telif self.__par_modes[idx] == 1:\n\t\t\treturn self.memory[self.ptr + arg_idx]\n\n\tdef set_ptr(self):\n\t\treturn False,0\n\n\tdef reads(self):\n\t\traise Exception(\"Call to base class reads()\")\n\n\tdef writes(self):\n\t\traise Exception(\"Call to base class writes()\")\n\n\tdef op(self):\n\t\traise Exception(\"Call to base class op()\")\n\n\tdef params(self):\n\t\traise Exception(\"Call to base class params()\")\n\n\tdef run(self):\n\t\traise Exception(\"Call to base class run()\")\n\n\nclass Opcode1(Opcode):\n\t\"\"\"\n\t>>> o = Opcode1([101, 2, 1, 3], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[101, 2, 1, 4]\n\t\"\"\"\n\tdef __init__(self, mem, ptr):\n\t\tsuper().__init__(mem, ptr, 1, 4)\n\t\tself.__first = self.get_val(1)\n\t\tself.__second = self.get_val(2)\n\t\tself.__res = mem[ptr+3]\n\n\tdef run(self):\n\t\tself.memory[self.__res] = self.__first + self.__second\n\t\treturn True\n\n\tdef params(self):\n\t\treturn {'noun':self.__first, 'verb':self.__second, 'result':self.__res}\n\n\tdef reads(self):\n\t\treturn [self.__first, self.__second]\n\n\tdef writes(self):\n\t\treturn self.__res\n\n\tdef op(self):\n\t\treturn \"+\"\n\n\tdef __str__(self):\n\t\treturn \"loc[%d] = %d + %d\"%(self.__res,self.__first,self.__second)\n\nclass Opcode2(Opcode):\n\t\"\"\"\n\t>>> o = Opcode2([2, 2, 3, 4, 99], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[2, 2, 3, 4, 12]\n\t\"\"\"\n\tdef __init__(self, mem, ptr):\n\t\tsuper().__init__(mem, ptr, 2, 4)\n\t\tself.__first = self.get_val(1)\n\t\tself.__second = self.get_val(2)\n\t\tself.__res = mem[ptr+3]\n\n\tdef run(self):\n\t\tself.memory[self.__res] = self.__first * self.__second\n\t\treturn True\n\n\tdef params(self):\n\t\treturn {'noun':self.__first, 'verb':self.__second, 'result':self.__res}\n\n\tdef reads(self):\n\t\treturn [self.__first, self.__second]\n\n\tdef writes(self):\n\t\treturn self.__res\n\n\tdef op(self):\n\t\treturn \"*\"\n\n\tdef __str__(self):\n\t\treturn \"loc[%d] = %d * %d\"%(self.__res,self.__first,self.__second)\n\nclass Opcode99(Opcode):\n\t\"\"\"\n\t>>> o = Opcode99([99,12,3,4,5], 0)\n\t>>> o.run()\n\tFalse\n\t\"\"\"\n\tdef __init__(self, mem, ptr):\n\t\tsuper().__init__(mem, ptr, 99, 1)\n\n\tdef run(self):\n\t\treturn False\n\n\tdef params(self):\n\t\treturn {}\n\n\tdef reads(self):\n\t\treturn []\n\n\tdef writes(self):\n\t\treturn None\n\n\tdef op(self):\n\t\treturn \"HALT\"\n\n\tdef __str__(self):\n\t\treturn \"HALT\"\n\ndef default_ops():\n\treturn {1:Opcode1,2:Opcode2,99:Opcode99}\n\nclass Interpreter(object):\n\tdef __init__(self, input_code, ops=default_ops()):\n\t\tself.__memory = input_code\n\n\t\tself.__ops = ops\n\t\tself.__ptr = 0\n\t\tself.__running = True\n\t\tself.length = len(self.__memory)\n\n\tdef stepi(self):\n\t\to = None\n\t\tif self.__running:\n\t\t\to = self.next_op()\n\t\t\tself.__running = o.run()\n\t\t\tchk,val = o.set_ptr()\n\t\t\tif chk:\n\t\t\t\tself.__ptr = val\n\t\t\telse:\n\t\t\t\tself.__ptr += o.ptr_inc()\n\t\treturn o\n\n\tdef run(self):\n\t\twhile self.__running:\n\t\t\tself.stepi()\n\n\tdef inspect(self,loc):\n\t\treturn self.__memory[loc]\n\n\tdef next_op(self):\n\t\treturn self.op_at(self.__ptr)\n\n\tdef op_at(self, ptr):\n\t\treturn self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)\n\n\tdef __str__(self):\n\t\tstrs = []\n\t\tfor i,v in enumerate(self.__memory):\n\t\t\tif i == self.__ptr:\n\t\t\t\tstrs.append(\"{:*>4}\".format(v))\n\t\t\telse:\n\t\t\t\tstrs.append(\"{:>4}\".format(v))\n\t\treturn \",\".join(strs) + \"\\n\" + \"Next:\\n\\t\" + str(self.next_op())\n\n\tdef poke(self,loc,val):\n\t\tself.__memory[loc] = val\n\n\tdef rebind(self,code,call):\n\t\tself.__ops[code] = call\n\n\tdef as_opcodes(self):\n\t\tops = [self.op_at(0)]\n\t\tptr = ops[-1].ptr_inc()\n\t\twhile ops[-1].op() != \"HALT\":\n\t\t\tops.append(self.op_at(ptr))\n\t\t\tptr += ops[-1].ptr_inc()\n\t\treturn ops\n\nclass ValueNode(object):\n\tdef __init__(self,val,tag=''):\n\t\tself.__val = val\n\t\tself.__tag = tag\n\n\tdef __str__(self):\n\t\treturn self.__tag + str(self.__val)\n\nclass OpNode(object):\n\tdef __init__(self,op,depends):\n\t\tself.__op = op\n\t\tself.__depends = depends\n\n\tdef __str__(self):\n\t\treturn \"(\" + self.__op.op().join([str(i) for i in self.__depends]) + \")\"\n\nclass OpcodeTreeBuilder(object):\n\tdef __init__(self, interp):\n\t\tself.__interpreter = interp\n\t\tself.__codes = interp.as_opcodes()\n\n\tdef construct_mappings(self):\n\t\tfor i in self.__codes:\n\t\t\tparams = i.params()\n\t\t\tif 'result' in params.keys():\n\t\t\t\tif params['result'] not in self.__writes_to.keys():\n\t\t\t\t\tself.__writes_to[params['result']] = []\n\t\t\t\tself.__writes_to[params['result']].append(i)\n\t\t\tif 'noun' in params.keys():\n\t\t\t\tif params['noun'] not in self.__reads_from.keys():\n\t\t\t\t\tself.__reads_from[params['noun']] = []\n\t\t\t\tself.__reads_from[params['noun']].append(i)\n\t\t\tif 'verb' in params.keys():\n\t\t\t\tif params['verb'] not in self.__reads_from.keys():\n\t\t\t\t\tself.__reads_from[params['verb']] = []\n\t\t\t\tself.__reads_from[params['verb']].append(i)\n\n\tdef construct_graph(self):\n\t\top = self.__interpreter.op_at(0)\n\t\treads = [ValueNode(self.__interpreter.inspect(i),tag=\"raw%d_\"%(i)) for i in op.reads()]\n\t\twrites = op.writes()\n\t\tbase = OpNode(op,reads)\n\t\tptr = op.ptr_inc()\n\t\tlast_write = {}\n\t\tif writes:\n\t\t\tlast_write[writes] = base\n\t\twhile op.op() != \"HALT\":\n\t\t\top = self.__interpreter.op_at(ptr)\n\t\t\tif op.op() == \"HALT\":\n\t\t\t\tbreak\n\t\t\tdepends = []\n\t\t\tfor i in op.reads():\n\t\t\t\tif i in last_write.keys():\n\t\t\t\t\tdepends.append(last_write[i])\n\t\t\t\telse:\n\t\t\t\t\tdepends.append(ValueNode(self.__interpreter.inspect(i)))\n\t\t\tbase = OpNode(op,depends)\n\t\t\tif op.writes():\n\t\t\t\tlast_write[op.writes()] = base\n\t\t\tptr += op.ptr_inc()\n\t\treturn base\n\nif __name__=='__main__':\n\timport doctest\n\tdoctest.testmod()\n\n#################################################\n\n#\ti = Interpreter(file_to_code(\"day2_input.txt\"))\n#\ti.run()\n#\ti.inspect(0)\n", "step-ids": [ 43, 44, 55, 57, 62 ] }
[ 43, 44, 55, 57, 62 ]
# Tip Calculator # Dan Soloha # 9/12/2019 total = int(input("What was the total your bill came to? ")) print(f"With a total of {total}, you should tip ${int(total + (total * 0.15))}. If the waiter did a really good job, you should tip ${int(total + (total * 0.20))}. ") # Multiplying by 1.x was returning the number rounded down for some reason
normal
{ "blob_id": "45d5c75a993ff50e1a88510bdb16e963403c5356", "index": 8588, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(\n f'With a total of {total}, you should tip ${int(total + total * 0.15)}. If the waiter did a really good job, you should tip ${int(total + total * 0.2)}. '\n )\n", "step-3": "total = int(input('What was the total your bill came to? '))\nprint(\n f'With a total of {total}, you should tip ${int(total + total * 0.15)}. If the waiter did a really good job, you should tip ${int(total + total * 0.2)}. '\n )\n", "step-4": "# Tip Calculator\r\n# Dan Soloha\r\n# 9/12/2019\r\n\r\ntotal = int(input(\"What was the total your bill came to? \"))\r\nprint(f\"With a total of {total}, you should tip ${int(total + (total * 0.15))}. If the waiter did a really good job, you should tip ${int(total + (total * 0.20))}. \") # Multiplying by 1.x was returning the number rounded down for some reason", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# The actual code begins here # This file is intended to load everything downloaded from loaddata.py, preventing user getting banned from IMDB # The code is written to see what are some key words of the reviews from critics and normal viewers # And to see what are some of the differences # The second task is to asses the people's emotion vs. actual score given # First, we need to load back everything we dumped to folder via pickle. import pickle print('loading data...') with open('movienumbers.pickle','rb') as input_file: movienumbers = pickle.load(input_file) with open('ratings.pickle','rb') as input_file: ratings = pickle.load(input_file) with open('userratings.pickle','rb') as input_file: userratings = pickle.load(input_file) with open('metaratings.pickle','rb') as input_file: metaratings = pickle.load(input_file) print('Pickled data successfully loaded.') # then, it's time to use nltp to see the score of the critics vs. viewers on movies from nltk.sentiment.vader import SentimentIntensityAnalyzer # print(movienumbers) # print(ratings) # print(userratings) # print(metaratings) # Userratings is a dictionary in ways like this "ttxxxxxx : [reviews1, reviews2,...]" # print(userratings['tt0111161']) # # print(metaratings['tt0111161']) # print(ratings['tt0111161']) userscore = {} for movieid, reviews in userratings.items(): score = 0 for eachreviews in reviews: score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound'] average = score / len(reviews) userscore[movieid] = average print(userscore) # Meta ratings is a dictionary in ways like this "ttxxxxxx : [reviews1, reviews2,...]" criticsscore = {} for movieid, reviews in metaratings.items(): score_1 = 0 for eachreviews in reviews: score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound'] average = score_1 / len(reviews) criticsscore[movieid] = average print(criticsscore) # Question 1: Are critics always more positive than the audience? counter = 0 for movieid, score in userscore.items(): if movieid in criticsscore and criticsscore[movieid] > score: counter += 1 else: counter += 0 # Displaying results to question 1 print("Critics overpraise these movies " + str(counter) + " times more than normal viewers out of " + str(len(criticsscore)) + " movies in total.") if counter < (len(criticsscore) - counter): print("Because the critics overpraise less than half of the movies sampled here, the critics are more refrained " "than the users on IMDb.") else: print("Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained " "than the users on IMDb.") # Question 2: Is the IMDB score closer to the users' sentiment? Or the critics. useriscloser = 0 criticiscloser = 0 for movieid, score in criticsscore.items(): if abs(userscore[movieid] - (ratings[movieid])/10) > abs(score - (ratings[movieid]/10)): useriscloser += 1 else: criticiscloser += 1 # Displaying results to question 2 print("Critics are more closer to the ratings for " + str(criticiscloser) + " times, while normal viewers are closer " + str(useriscloser) + " times out of " + str(len(criticsscore)) + " movies in total.") if useriscloser > criticiscloser: print("Because the more movies have users resembling closer to the rating, the critics are less accurate " "than the users on IMDb.") else: print("Because the more movies have critics resembling closer to the rating, the users are less accurate " "than the users on IMDb.")
normal
{ "blob_id": "1f69cf5f6d15048e6ead37b5da836c9e2f783f74", "index": 803, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('loading data...')\nwith open('movienumbers.pickle', 'rb') as input_file:\n movienumbers = pickle.load(input_file)\nwith open('ratings.pickle', 'rb') as input_file:\n ratings = pickle.load(input_file)\nwith open('userratings.pickle', 'rb') as input_file:\n userratings = pickle.load(input_file)\nwith open('metaratings.pickle', 'rb') as input_file:\n metaratings = pickle.load(input_file)\nprint('Pickled data successfully loaded.')\n<mask token>\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score / len(reviews)\n userscore[movieid] = average\nprint(userscore)\n<mask token>\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\nprint(criticsscore)\n<mask token>\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\nprint('Critics overpraise these movies ' + str(counter) +\n ' times more than normal viewers out of ' + str(len(criticsscore)) +\n ' movies in total.')\nif counter < len(criticsscore) - counter:\n print(\n 'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'\n )\nelse:\n print(\n 'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'\n )\n<mask token>\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score - \n ratings[movieid] / 10):\n useriscloser += 1\n else:\n criticiscloser += 1\nprint('Critics are more closer to the ratings for ' + str(criticiscloser) +\n ' times, while normal viewers are closer ' + str(useriscloser) +\n ' times out of ' + str(len(criticsscore)) + ' movies in total.')\nif useriscloser > criticiscloser:\n print(\n 'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'\n )\nelse:\n print(\n 'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'\n )\n", "step-3": "<mask token>\nprint('loading data...')\nwith open('movienumbers.pickle', 'rb') as input_file:\n movienumbers = pickle.load(input_file)\nwith open('ratings.pickle', 'rb') as input_file:\n ratings = pickle.load(input_file)\nwith open('userratings.pickle', 'rb') as input_file:\n userratings = pickle.load(input_file)\nwith open('metaratings.pickle', 'rb') as input_file:\n metaratings = pickle.load(input_file)\nprint('Pickled data successfully loaded.')\n<mask token>\nuserscore = {}\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score / len(reviews)\n userscore[movieid] = average\nprint(userscore)\ncriticsscore = {}\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\nprint(criticsscore)\ncounter = 0\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\nprint('Critics overpraise these movies ' + str(counter) +\n ' times more than normal viewers out of ' + str(len(criticsscore)) +\n ' movies in total.')\nif counter < len(criticsscore) - counter:\n print(\n 'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'\n )\nelse:\n print(\n 'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'\n )\nuseriscloser = 0\ncriticiscloser = 0\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score - \n ratings[movieid] / 10):\n useriscloser += 1\n else:\n criticiscloser += 1\nprint('Critics are more closer to the ratings for ' + str(criticiscloser) +\n ' times, while normal viewers are closer ' + str(useriscloser) +\n ' times out of ' + str(len(criticsscore)) + ' movies in total.')\nif useriscloser > criticiscloser:\n print(\n 'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'\n )\nelse:\n print(\n 'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'\n )\n", "step-4": "import pickle\nprint('loading data...')\nwith open('movienumbers.pickle', 'rb') as input_file:\n movienumbers = pickle.load(input_file)\nwith open('ratings.pickle', 'rb') as input_file:\n ratings = pickle.load(input_file)\nwith open('userratings.pickle', 'rb') as input_file:\n userratings = pickle.load(input_file)\nwith open('metaratings.pickle', 'rb') as input_file:\n metaratings = pickle.load(input_file)\nprint('Pickled data successfully loaded.')\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nuserscore = {}\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score / len(reviews)\n userscore[movieid] = average\nprint(userscore)\ncriticsscore = {}\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\nprint(criticsscore)\ncounter = 0\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\nprint('Critics overpraise these movies ' + str(counter) +\n ' times more than normal viewers out of ' + str(len(criticsscore)) +\n ' movies in total.')\nif counter < len(criticsscore) - counter:\n print(\n 'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'\n )\nelse:\n print(\n 'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'\n )\nuseriscloser = 0\ncriticiscloser = 0\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score - \n ratings[movieid] / 10):\n useriscloser += 1\n else:\n criticiscloser += 1\nprint('Critics are more closer to the ratings for ' + str(criticiscloser) +\n ' times, while normal viewers are closer ' + str(useriscloser) +\n ' times out of ' + str(len(criticsscore)) + ' movies in total.')\nif useriscloser > criticiscloser:\n print(\n 'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'\n )\nelse:\n print(\n 'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'\n )\n", "step-5": "# The actual code begins here\n# This file is intended to load everything downloaded from loaddata.py, preventing user getting banned from IMDB\n# The code is written to see what are some key words of the reviews from critics and normal viewers\n# And to see what are some of the differences\n# The second task is to asses the people's emotion vs. actual score given\n\n# First, we need to load back everything we dumped to folder via pickle.\n\nimport pickle\nprint('loading data...')\n\nwith open('movienumbers.pickle','rb') as input_file:\n movienumbers = pickle.load(input_file)\n\nwith open('ratings.pickle','rb') as input_file:\n ratings = pickle.load(input_file)\n\nwith open('userratings.pickle','rb') as input_file:\n userratings = pickle.load(input_file)\n\nwith open('metaratings.pickle','rb') as input_file:\n metaratings = pickle.load(input_file)\n\nprint('Pickled data successfully loaded.')\n\n# then, it's time to use nltp to see the score of the critics vs. viewers on movies\n\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\n# print(movienumbers)\n# print(ratings)\n# print(userratings)\n# print(metaratings)\n\n# Userratings is a dictionary in ways like this \"ttxxxxxx : [reviews1, reviews2,...]\"\n\n# print(userratings['tt0111161'])\n#\n# print(metaratings['tt0111161'])\n# print(ratings['tt0111161'])\n\nuserscore = {}\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']\n average = score / len(reviews)\n userscore[movieid] = average\n\nprint(userscore)\n\n# Meta ratings is a dictionary in ways like this \"ttxxxxxx : [reviews1, reviews2,...]\"\n\n\n\ncriticsscore = {}\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\n\nprint(criticsscore)\n\n\n# Question 1: Are critics always more positive than the audience?\n\ncounter = 0\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\n\n# Displaying results to question 1\nprint(\"Critics overpraise these movies \" + str(counter) + \" times more than normal viewers out of \"\n + str(len(criticsscore)) + \" movies in total.\")\nif counter < (len(criticsscore) - counter):\n print(\"Because the critics overpraise less than half of the movies sampled here, the critics are more refrained \"\n \"than the users on IMDb.\")\nelse:\n print(\"Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained \"\n \"than the users on IMDb.\")\n\n# Question 2: Is the IMDB score closer to the users' sentiment? Or the critics.\n\nuseriscloser = 0\ncriticiscloser = 0\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - (ratings[movieid])/10) > abs(score - (ratings[movieid]/10)):\n useriscloser += 1\n else:\n criticiscloser += 1\n\n# Displaying results to question 2\nprint(\"Critics are more closer to the ratings for \" + str(criticiscloser) +\n \" times, while normal viewers are closer \" + str(useriscloser) + \" times out of \" +\n str(len(criticsscore)) + \" movies in total.\")\n\nif useriscloser > criticiscloser:\n print(\"Because the more movies have users resembling closer to the rating, the critics are less accurate \"\n \"than the users on IMDb.\")\nelse:\n print(\"Because the more movies have critics resembling closer to the rating, the users are less accurate \"\n \"than the users on IMDb.\")", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!env/bin/python3 from app import app from config import config as cfg app.run(debug=True, host=cfg.APP_HOST, port=cfg.APP_PORT)
normal
{ "blob_id": "f97150f60dfb3924cda2c969141d5bfe675725ef", "index": 9150, "step-1": "<mask token>\n", "step-2": "<mask token>\napp.run(debug=True, host=cfg.APP_HOST, port=cfg.APP_PORT)\n", "step-3": "from app import app\nfrom config import config as cfg\napp.run(debug=True, host=cfg.APP_HOST, port=cfg.APP_PORT)\n", "step-4": "#!env/bin/python3\nfrom app import app\nfrom config import config as cfg\napp.run(debug=True, host=cfg.APP_HOST, port=cfg.APP_PORT)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
def getmin(a, b, c): if a <= b and a <= c: print(a) elif b <= a and b <= c: print(b) else: print(c) def filtername(name): if len(name) > 3: return name[:3] elif len(name) < 3: return name + " " * (3 - len(name)) return name def filternames(names): re = [] for n in names: if len(n) != 3: re += [filtername(n)] return re def printsort2(x): for i in range(len(x) - 1): for j in range(1 + i, len(x)): if x[i] > x[j]: x[i], x[j] = x[j], x[i] for a in x: print(a, end=" ") def print_hell(inp): if "안녕" in inp: print("Hello")
normal
{ "blob_id": "917241482dc1f234d5fae9c107a5f21b018fe6d4", "index": 9843, "step-1": "<mask token>\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=' ')\n\n\n<mask token>\n", "step-3": "def getmin(a, b, c):\n if a <= b and a <= c:\n print(a)\n elif b <= a and b <= c:\n print(b)\n else:\n print(c)\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=' ')\n\n\n<mask token>\n", "step-4": "def getmin(a, b, c):\n if a <= b and a <= c:\n print(a)\n elif b <= a and b <= c:\n print(b)\n else:\n print(c)\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + ' ' * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=' ')\n\n\ndef print_hell(inp):\n if '안녕' in inp:\n print('Hello')\n", "step-5": "def getmin(a, b, c):\n if a <= b and a <= c:\n print(a)\n elif b <= a and b <= c:\n print(b)\n else:\n print(c)\n\n\ndef filtername(name):\n if len(name) > 3:\n return name[:3]\n elif len(name) < 3:\n return name + \" \" * (3 - len(name))\n return name\n\n\ndef filternames(names):\n re = []\n for n in names:\n if len(n) != 3:\n re += [filtername(n)]\n return re\n\n\ndef printsort2(x):\n for i in range(len(x) - 1):\n for j in range(1 + i, len(x)):\n if x[i] > x[j]:\n x[i], x[j] = x[j], x[i]\n for a in x:\n print(a, end=\" \")\n\n\ndef print_hell(inp):\n if \"안녕\" in inp:\n print(\"Hello\")\n\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from util import AutomataError from automata import NFA from base import Node from copy import copy, deepcopy from os.path import commonprefix DEBUG = False LAMBDA = u'\u03bb' PHI = u'\u00d8' def copyDeltas(src): out = dict() for k in src: out[k] = dict() for k2 in src[k]: out[k][k2] = copy(src[k][k2]) return out def replaceNode(nfa, old, new): if DEBUG: print('R_Start(%s, %s) ---' % (old, new), nfa) if old in nfa._deltas: for input in nfa._deltas[old]: nfa.addDelta(new, input, nfa._deltas[old][input]) del nfa._deltas[old] if DEBUG: print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa) deltas_temp = copyDeltas(nfa._deltas) for src in deltas_temp: for input in deltas_temp[src]: if old in deltas_temp[src][input]: nfa._deltas[src][input].remove(old) nfa._deltas[src][input].add(new) if DEBUG: print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa) def commonsuffix(seq): def reverse(s): out = '' for c in reversed(s): out += c return out seq = [reverse(i) for i in seq] return reverse(commonprefix(seq)) class NetworkNFA(NFA): def __init__(self, nfa): if type(nfa) is not NFA: raise AutomataError('Can create a NetworkNFA only from an NFA.') if all([len(i) == 1 for i in nfa.charset]): self._charset = copy(nfa._charset) else: self._charset = set(['{%s}' % i for i in nfa._charset]) self._nodes = copy(nfa._nodes) self._deltas = copyDeltas(nfa._deltas) self._start = nfa._start self._terminals = copy(nfa._terminals) def addDelta(self, node, input, dest): if set(input) - (self._charset.union(set('()+*'))): raise AutomataError('%s contains symbols not in charset.' % input) if type(node) is Node: if type(dest) is set and all([type(i) is Node for i in dest]): if len(dest): if node in self._deltas: if input in self._deltas[node]: self._deltas[node][input] = self._deltas[node][input].union( dest) else: self._deltas[node][input] = dest else: self._deltas[node] = {input: dest} elif type(dest) is Node: if node in self._deltas: if input in self._deltas[node]: self._deltas[node][input].add(dest) else: self._deltas[node][input] = set([dest]) else: self._deltas[node] = {input: set([dest])} else: raise AutomataError( 'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__) else: raise AutomataError( 'Delta source must be Node, not %s.' % type(node).__name__) def remDelta(self, node, input): if set(input) - (self._charset.union(set('()+*'))): raise AutomataError('%s contains symbols not in charset.' % input) if type(node) is Node: if node in self._deltas and input in self._deltas[node]: self._deltas[node].pop(input) if len(self._deltas[node]) == 0: del self._deltas[node] else: raise AutomataError( 'Delta source must be a Node, not %s' % type(node).__name__) def isValid(self): if len(self._nodes) == 0: return False if self._start not in self._nodes: return False for i in self._terminals: if i not in self._nodes: return False if not set(self._deltas.keys()).issubset(self._nodes): return False for key in self._deltas: for char in self._deltas[key]: if set(char) - (self._charset.union(set('()+*'))): return False return True def apply(self, input, start): raise AutomataError('NetworkNFA does not allow direct application.') def __repr__(self): ret = '<NetworkNFA>\n' ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset)) ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes]) ret += 'Terminals: {%s}\n' % ','.join( [i.label for i in self._terminals]) ret += ' Start: %s\n' % (self._start and self._start.label) ret += ' Delta: ' if len(self._deltas): for qFrom in self._deltas: for input in self._deltas[qFrom]: ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join( [i.label for i in self._deltas[qFrom][input]])) ret = ret.rstrip() + '\n' else: ret += 'None\n' ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No') ret += '</NetworkNFA>' return ret def nfa2regex(nfa): if not nfa.isValid(): raise AutomataError( 'NFA must be in a valid state to be converted to a regex.') network = NetworkNFA(nfa) if DEBUG: print('START', network) # Take care of multi-terminals # if len(network.terminals) > 1: ## end = Node('qf') # network.addNode(end) # for i in copy(network.terminals): ## network.addDelta(i, '', end) # network.remTerminal(i) # network.addTerminal(end) # Add a dummy start and end nodes start = Node('qs') network.addNode(start) network.addDelta(start, '', network.start) network.start = start end = Node('qf') network.addNode(end) for i in network.terminals: network.addDelta(i, '', end) network.remTerminal(i) network.addTerminal(end) if DEBUG: print('Dummies added: ', network) # Collapse connections for src in network.nodes: delta_temp = network.getDelta(src) for dest in network.nodes: chars = [] for input in delta_temp: if input and dest in delta_temp[input]: chars.append(input) if len(chars): for c in chars: delta_temp[c].remove(dest) if len(delta_temp[c]) == 0: del delta_temp[c] if len(chars) > 1: chars = '(' + '+'.join(chars) + ')' else: chars = '+'.join(chars) network.addDelta(src, chars, dest) if DEBUG: print('Collapsed: ', network) # Collect pliable nodes pliableNodes = list(network.nodes) pliableNodes.remove(network.start) for n in network.terminals: pliableNodes.remove(n) # Build a distance-from-terminal table nodeFinalDist = {} maxDist = len(network.nodes) ** len(network.nodes) # Lazy for n in network.nodes: nodeFinalDist[n] = maxDist nodeFinalDist[network.terminals[0]] = 0 toProcess = list(network.nodes) toProcess.remove(network.terminals[0]) while len(toProcess): for node in toProcess: dests = network.getDelta(node).values() if len(dests) == 0: dests = set([]) else: dests = reduce(set.union, network.getDelta(node).values()) if len(dests) == 0: toProcess.remove(node) else: minDist = min([nodeFinalDist[i] for i in dests]) if minDist != maxDist: nodeFinalDist[node] = minDist + 1 toProcess.remove(node) # Sort pliable nodes by distance from terminal pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True) if DEBUG: print('Pliables: ', pliableNodes) for node in pliableNodes: # Remove Node network.remNode(node) # Save delta delta = copy(network.getDelta(node)) # Convert loops to regex loops = [] for input in delta: if node in delta[input]: if len(input): loops.append(input) loopRegex = '+'.join(loops) if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'): loopRegex = '(' + loopRegex + ')*' elif len(loopRegex) >= 1: loopRegex = loopRegex + '*' # Remove loops for input in copy(delta): if delta[input] == set([node]): del delta[input] elif node in delta[input]: delta[input].remove(node) # Search lambda-closure equivalence if '' in delta and (len(delta) != 1 or len(delta['']) != 1): eligible = [] for dest in delta['']: delta_temp = network.getDelta(dest) if '' in delta_temp and node in delta_temp['']: eligible.append(dest) if len(eligible): replaceNode(network, node, eligible[0]) continue # Remove delta try: del network._deltas[node] except KeyError: # No deltas remaining, had only loops continue if DEBUG: print('Working on connections: ', node, delta) # Check all possible connections through this node deltas_temp = copyDeltas(network._deltas) for src in deltas_temp: for input in deltas_temp[src]: tempDeltaDest = network.getDelta(src)[input] if node in tempDeltaDest: tempDeltaDest.remove(node) if len(tempDeltaDest) == 0: network.remDelta(src, input) for input2 in delta: for dest in delta[input2]: if not (src == dest and (input + loopRegex + input2) == ''): network.addDelta( src, input + loopRegex + input2, dest) if DEBUG: print('New Delta:', src, input, loopRegex, input2, dest, network) # Extract common prefix/suffix branches = network.getDelta(network.start).keys() if len(branches) == 1: regex = branches[0] else: prefix = commonprefix(branches) suffix = commonsuffix(branches) branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):] for i in branches] branches.sort(key=len) if len(prefix) or len(suffix): regex = prefix + \ '(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix else: regex = '+'.join([i or LAMBDA for i in branches]) or PHI return regex
normal
{ "blob_id": "2fe20f28fc7bba6b8188f5068e2b3c8b87c15edc", "index": 94, "step-1": "<mask token>\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\n<mask token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\n<mask token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n", "step-4": "from util import AutomataError\nfrom automata import NFA\nfrom base import Node\nfrom copy import copy, deepcopy\nfrom os.path import commonprefix\nDEBUG = False\nLAMBDA = u'λ'\nPHI = u'Ø'\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n", "step-5": "from util import AutomataError\nfrom automata import NFA\nfrom base import Node\nfrom copy import copy, deepcopy\nfrom os.path import commonprefix\n\nDEBUG = False\n\nLAMBDA = u'\\u03bb'\nPHI = u'\\u00d8'\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n\n if all([len(i) == 1 for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set(['{%s}' % i for i in nfa._charset])\n\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - (self._charset.union(set('()+*'))):\n raise AutomataError('%s contains symbols not in charset.' % input)\n\n if type(node) is Node:\n if type(dest) is set and all([type(i) is Node for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][input].union(\n dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)\n else:\n raise AutomataError(\n 'Delta source must be Node, not %s.' % type(node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - (self._charset.union(set('()+*'))):\n raise AutomataError('%s contains symbols not in charset.' % input)\n\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError(\n 'Delta source must be a Node, not %s' % type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n\n for i in self._terminals:\n if i not in self._nodes:\n return False\n\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - (self._charset.union(set('()+*'))):\n return False\n\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join(\n [i.label for i in self._terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, input or 'lambda', ','.join(\n [i.label for i in self._deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n\n network = NetworkNFA(nfa)\n\n if DEBUG:\n print('START', network)\n\n# Take care of multi-terminals\n# if len(network.terminals) > 1:\n## end = Node('qf')\n# network.addNode(end)\n# for i in copy(network.terminals):\n## network.addDelta(i, '', end)\n# network.remTerminal(i)\n# network.addTerminal(end)\n\n # Add a dummy start and end nodes\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n\n # Collapse connections\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n\n # Collect pliable nodes\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n\n # Build a distance-from-terminal table\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes) # Lazy\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n\n # Sort pliable nodes by distance from terminal\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n\n for node in pliableNodes:\n # Remove Node\n network.remNode(node)\n\n # Save delta\n delta = copy(network.getDelta(node))\n\n # Convert loops to regex\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n\n # Remove loops\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n\n # Search lambda-closure equivalence\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n\n # Remove delta\n try:\n del network._deltas[node]\n except KeyError: # No deltas remaining, had only loops\n continue\n\n if DEBUG:\n print('Working on connections: ', node, delta)\n # Check all possible connections through this node\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and (input + loopRegex + input2) == ''):\n network.addDelta(\n src, input + loopRegex + input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n\n # Extract common prefix/suffix\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]\n for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + \\\n '(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix\n else:\n regex = '+'.join([i or LAMBDA for i in branches]) or PHI\n\n return regex\n", "step-ids": [ 8, 9, 11, 13, 14 ] }
[ 8, 9, 11, 13, 14 ]
from conans import * class GlibConan(ConanFile): name = "glib" description = "Common C routines used by Gtk+ and other libs" license = "LGPL" settings = {"os": ["Linux"], "arch": ["x86_64", "armv8"]} build_requires = ( "generators/1.0.0", "autotools/1.0.0", ) requires = ( "glibc/[>=2.31]", "sh/[>=]", ) def source(self): tools.get(f"ftp://ftp.gnome.org/pub/gnome/sources/glib/1.2/glib-{self.version}.tar.gz") def build(self): args = [ "--disable-static", ] autotools = AutoToolsBuildEnvironment(self) autotools.configure(args=args, configure_dir=f"{self.name}-{self.version}") autotools.make() autotools.install()
normal
{ "blob_id": "e49c5c6475a1210a9657d7bbd0490c8d20863718", "index": 2285, "step-1": "<mask token>\n\n\nclass GlibConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def build(self):\n args = ['--disable-static']\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(args=args, configure_dir=\n f'{self.name}-{self.version}')\n autotools.make()\n autotools.install()\n", "step-2": "<mask token>\n\n\nclass GlibConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def source(self):\n tools.get(\n f'ftp://ftp.gnome.org/pub/gnome/sources/glib/1.2/glib-{self.version}.tar.gz'\n )\n\n def build(self):\n args = ['--disable-static']\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(args=args, configure_dir=\n f'{self.name}-{self.version}')\n autotools.make()\n autotools.install()\n", "step-3": "<mask token>\n\n\nclass GlibConan(ConanFile):\n name = 'glib'\n description = 'Common C routines used by Gtk+ and other libs'\n license = 'LGPL'\n settings = {'os': ['Linux'], 'arch': ['x86_64', 'armv8']}\n build_requires = 'generators/1.0.0', 'autotools/1.0.0'\n requires = 'glibc/[>=2.31]', 'sh/[>=]'\n\n def source(self):\n tools.get(\n f'ftp://ftp.gnome.org/pub/gnome/sources/glib/1.2/glib-{self.version}.tar.gz'\n )\n\n def build(self):\n args = ['--disable-static']\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(args=args, configure_dir=\n f'{self.name}-{self.version}')\n autotools.make()\n autotools.install()\n", "step-4": "from conans import *\n\n\nclass GlibConan(ConanFile):\n name = 'glib'\n description = 'Common C routines used by Gtk+ and other libs'\n license = 'LGPL'\n settings = {'os': ['Linux'], 'arch': ['x86_64', 'armv8']}\n build_requires = 'generators/1.0.0', 'autotools/1.0.0'\n requires = 'glibc/[>=2.31]', 'sh/[>=]'\n\n def source(self):\n tools.get(\n f'ftp://ftp.gnome.org/pub/gnome/sources/glib/1.2/glib-{self.version}.tar.gz'\n )\n\n def build(self):\n args = ['--disable-static']\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(args=args, configure_dir=\n f'{self.name}-{self.version}')\n autotools.make()\n autotools.install()\n", "step-5": "from conans import *\n\nclass GlibConan(ConanFile):\n name = \"glib\"\n description = \"Common C routines used by Gtk+ and other libs\"\n license = \"LGPL\"\n settings = {\"os\": [\"Linux\"], \"arch\": [\"x86_64\", \"armv8\"]}\n build_requires = (\n \"generators/1.0.0\",\n \"autotools/1.0.0\",\n )\n requires = (\n \"glibc/[>=2.31]\",\n \"sh/[>=]\",\n )\n\n def source(self):\n tools.get(f\"ftp://ftp.gnome.org/pub/gnome/sources/glib/1.2/glib-{self.version}.tar.gz\")\n\n def build(self):\n args = [\n \"--disable-static\",\n ]\n autotools = AutoToolsBuildEnvironment(self)\n autotools.configure(args=args, configure_dir=f\"{self.name}-{self.version}\")\n autotools.make()\n autotools.install()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
__author__ = 'Or'
normal
{ "blob_id": "54c1b294d826deb43978591cad590c5e969bebd7", "index": 6655, "step-1": "<mask token>\n", "step-2": "__author__ = 'Or'\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
""" Card rarity parameters """ from typing import List, Optional from django.db.models.query import Q from cards.models.rarity import Rarity from cardsearch.parameters.base_parameters import ( OPERATOR_MAPPING, OPERATOR_TO_WORDY_MAPPING, CardTextParameter, CardSearchContext, ParameterArgs, QueryContext, QueryValidationError, ) class CardRarityParam(CardTextParameter): """ The parameter for searching by a card's rarity """ @classmethod def get_parameter_name(cls) -> str: return "rarity" @classmethod def get_search_operators(cls) -> List[str]: return [":", "=", "<=", "<", ">", ">="] @classmethod def get_search_keywords(cls) -> List[str]: return ["rarity", "r"] def get_default_search_context(self) -> CardSearchContext: return CardSearchContext.PRINTING def __init__(self, negated: bool, param_args: ParameterArgs): super().__init__(negated, param_args) self.rarity: Optional[Rarity] = None if self.operator == ":": self.operator = "=" def validate(self, query_context: QueryContext) -> None: try: self.rarity = Rarity.objects.get( Q(symbol__iexact=self.value) | Q(name__iexact=self.value) ) except Rarity.DoesNotExist: raise QueryValidationError(f'Couldn\'t find rarity "{self.value}"') def query(self, query_context: QueryContext) -> Q: if self.operator == "=": query = Q(rarity=self.rarity) else: filter_ = f"rarity__display_order{OPERATOR_MAPPING[self.operator]}" query = Q(**{filter_: self.rarity.display_order}) return ~query if self.negated else query def get_pretty_str(self, query_context: QueryContext) -> str: return ( "the rarity " + ("isn't" if self.negated else "is") + ( " " + OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in (":", "=") else "" ) + f" {self.rarity.name.lower()}" )
normal
{ "blob_id": "c7d9bbdff9148c5d928de66f4406ee8b4e1bcdac", "index": 2672, "step-1": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n <mask token>\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n", "step-2": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == ':':\n self.operator = '='\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n <mask token>\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n", "step-3": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n <mask token>\n <mask token>\n\n def get_default_search_context(self) ->CardSearchContext:\n return CardSearchContext.PRINTING\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == ':':\n self.operator = '='\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n <mask token>\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n", "step-4": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n\n @classmethod\n def get_search_operators(cls) ->List[str]:\n return [':', '=', '<=', '<', '>', '>=']\n <mask token>\n\n def get_default_search_context(self) ->CardSearchContext:\n return CardSearchContext.PRINTING\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == ':':\n self.operator = '='\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n\n def query(self, query_context: QueryContext) ->Q:\n if self.operator == '=':\n query = Q(rarity=self.rarity)\n else:\n filter_ = f'rarity__display_order{OPERATOR_MAPPING[self.operator]}'\n query = Q(**{filter_: self.rarity.display_order})\n return ~query if self.negated else query\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n", "step-5": "\"\"\"\nCard rarity parameters\n\"\"\"\nfrom typing import List, Optional\n\nfrom django.db.models.query import Q\n\nfrom cards.models.rarity import Rarity\nfrom cardsearch.parameters.base_parameters import (\n OPERATOR_MAPPING,\n OPERATOR_TO_WORDY_MAPPING,\n CardTextParameter,\n CardSearchContext,\n ParameterArgs,\n QueryContext,\n QueryValidationError,\n)\n\n\nclass CardRarityParam(CardTextParameter):\n \"\"\"\n The parameter for searching by a card's rarity\n \"\"\"\n\n @classmethod\n def get_parameter_name(cls) -> str:\n return \"rarity\"\n\n @classmethod\n def get_search_operators(cls) -> List[str]:\n return [\":\", \"=\", \"<=\", \"<\", \">\", \">=\"]\n\n @classmethod\n def get_search_keywords(cls) -> List[str]:\n return [\"rarity\", \"r\"]\n\n def get_default_search_context(self) -> CardSearchContext:\n return CardSearchContext.PRINTING\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == \":\":\n self.operator = \"=\"\n\n def validate(self, query_context: QueryContext) -> None:\n try:\n self.rarity = Rarity.objects.get(\n Q(symbol__iexact=self.value) | Q(name__iexact=self.value)\n )\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n\n def query(self, query_context: QueryContext) -> Q:\n if self.operator == \"=\":\n query = Q(rarity=self.rarity)\n else:\n filter_ = f\"rarity__display_order{OPERATOR_MAPPING[self.operator]}\"\n query = Q(**{filter_: self.rarity.display_order})\n return ~query if self.negated else query\n\n def get_pretty_str(self, query_context: QueryContext) -> str:\n return (\n \"the rarity \"\n + (\"isn't\" if self.negated else \"is\")\n + (\n \" \" + OPERATOR_TO_WORDY_MAPPING[self.operator]\n if self.operator not in (\":\", \"=\")\n else \"\"\n )\n + f\" {self.rarity.name.lower()}\"\n )\n", "step-ids": [ 4, 5, 6, 8, 12 ] }
[ 4, 5, 6, 8, 12 ]
import datetime class Dato: def __init__(self, id: int, dato: str, tipo: str, fecha: datetime.datetime): self.__id = id self.__dato = dato self.__tipo = tipo self.__fecha = fecha def getId(self): return self.__id def setId(self, id): self.__id = id def getDato(self): return self.__dato def setDato(self, dato): self.__dato = dato def getTipo(self): return self.__tipo def setTipo(self, tipo): self.__tipo = tipo def getFecha(self): return self.__fecha def setFecha(self, fecha): self.__fecha = fecha
normal
{ "blob_id": "95256390e1e7e9227b96dccce33082de9d2cddd3", "index": 5158, "step-1": "<mask token>\n\n\nclass Dato:\n <mask token>\n <mask token>\n\n def setId(self, id):\n self.__id = id\n <mask token>\n\n def setDato(self, dato):\n self.__dato = dato\n <mask token>\n\n def setTipo(self, tipo):\n self.__tipo = tipo\n\n def getFecha(self):\n return self.__fecha\n\n def setFecha(self, fecha):\n self.__fecha = fecha\n", "step-2": "<mask token>\n\n\nclass Dato:\n\n def __init__(self, id: int, dato: str, tipo: str, fecha: datetime.datetime\n ):\n self.__id = id\n self.__dato = dato\n self.__tipo = tipo\n self.__fecha = fecha\n <mask token>\n\n def setId(self, id):\n self.__id = id\n <mask token>\n\n def setDato(self, dato):\n self.__dato = dato\n <mask token>\n\n def setTipo(self, tipo):\n self.__tipo = tipo\n\n def getFecha(self):\n return self.__fecha\n\n def setFecha(self, fecha):\n self.__fecha = fecha\n", "step-3": "<mask token>\n\n\nclass Dato:\n\n def __init__(self, id: int, dato: str, tipo: str, fecha: datetime.datetime\n ):\n self.__id = id\n self.__dato = dato\n self.__tipo = tipo\n self.__fecha = fecha\n\n def getId(self):\n return self.__id\n\n def setId(self, id):\n self.__id = id\n <mask token>\n\n def setDato(self, dato):\n self.__dato = dato\n\n def getTipo(self):\n return self.__tipo\n\n def setTipo(self, tipo):\n self.__tipo = tipo\n\n def getFecha(self):\n return self.__fecha\n\n def setFecha(self, fecha):\n self.__fecha = fecha\n", "step-4": "<mask token>\n\n\nclass Dato:\n\n def __init__(self, id: int, dato: str, tipo: str, fecha: datetime.datetime\n ):\n self.__id = id\n self.__dato = dato\n self.__tipo = tipo\n self.__fecha = fecha\n\n def getId(self):\n return self.__id\n\n def setId(self, id):\n self.__id = id\n\n def getDato(self):\n return self.__dato\n\n def setDato(self, dato):\n self.__dato = dato\n\n def getTipo(self):\n return self.__tipo\n\n def setTipo(self, tipo):\n self.__tipo = tipo\n\n def getFecha(self):\n return self.__fecha\n\n def setFecha(self, fecha):\n self.__fecha = fecha\n", "step-5": "import datetime\n\nclass Dato:\n def __init__(self, id: int, dato: str, tipo: str, fecha: datetime.datetime):\n self.__id = id\n self.__dato = dato\n self.__tipo = tipo\n self.__fecha = fecha\n\n def getId(self):\n return self.__id\n\n def setId(self, id):\n self.__id = id\n\n def getDato(self):\n return self.__dato\n\n def setDato(self, dato):\n self.__dato = dato\n\n def getTipo(self):\n return self.__tipo\n\n def setTipo(self, tipo):\n self.__tipo = tipo\n\n def getFecha(self):\n return self.__fecha\n\n def setFecha(self, fecha):\n self.__fecha = fecha", "step-ids": [ 6, 7, 9, 10, 12 ] }
[ 6, 7, 9, 10, 12 ]
import requests def squeezed (client_name): return client_name.replace('Индивидуальный предприниматель', 'ИП') def get_kkm_filled_fn(max_fill=80): ## возвращает список ККМ с заполнением ФН больше max_fill в % LOGIN_URL = 'https://pk.platformaofd.ru/auth/login' API_URL = 'https://pk.platformaofd.ru/api/monitoring' session = requests.Session() print('-= подключение к серверу =-') session.get(LOGIN_URL) login_data = { 'email': '[email protected]', 'password': 'smart620514', 'username': '[email protected]', 'phone':''} print('-= авторизация =-') session.post(LOGIN_URL, data=login_data) # запрос всех ККМ, кроме архивных (headers обязательно !) headers = {'Content-Type': 'application/json;charset=UTF-8'} payload = '{"badgeId":17,"type":"terminal","filterValues":[],"withArchive":0}' print('-= получение данных с сервера =-') r = session.post (API_URL, data=payload, headers=headers) data_from_api = r.json() all_kkm_list = data_from_api['result']['data'] kkm_quanity = len(all_kkm_list) print('-= обработка данных =-') kkm_with_filled_fn = [] for kkm in all_kkm_list: fn_used = int(kkm['fnSpaceUsed'].strip("'%")) if fn_used >= max_fill: kkm_with_filled_fn.append(kkm) return kkm_with_filled_fn max_fill = 80 x = get_kkm_filled_fn(max_fill) print(f'ККМ с заполненностью ФН выше {max_fill}%.') for k in x: print(f"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}")
normal
{ "blob_id": "cd2e03666a890d6e9ea0fcb45fe28510d684916d", "index": 83, "step-1": "<mask token>\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': '[email protected]', 'password': 'smart620514',\n 'username': '[email protected]', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': '[email protected]', 'password': 'smart620514',\n 'username': '[email protected]', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\n<mask token>\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(\n f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\"\n )\n", "step-3": "<mask token>\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': '[email protected]', 'password': 'smart620514',\n 'username': '[email protected]', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\nmax_fill = 80\nx = get_kkm_filled_fn(max_fill)\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(\n f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\"\n )\n", "step-4": "import requests\n\n\ndef squeezed(client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\n\ndef get_kkm_filled_fn(max_fill=80):\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n login_data = {'email': '[email protected]', 'password': 'smart620514',\n 'username': '[email protected]', 'phone': ''}\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = (\n '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}')\n print('-= получение данных с сервера =-')\n r = session.post(API_URL, data=payload, headers=headers)\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\nmax_fill = 80\nx = get_kkm_filled_fn(max_fill)\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(\n f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\"\n )\n", "step-5": "import requests\n\ndef squeezed (client_name):\n return client_name.replace('Индивидуальный предприниматель', 'ИП')\n\ndef get_kkm_filled_fn(max_fill=80):\n## возвращает список ККМ с заполнением ФН больше max_fill в %\n LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'\n API_URL = 'https://pk.platformaofd.ru/api/monitoring'\n\n session = requests.Session()\n print('-= подключение к серверу =-')\n session.get(LOGIN_URL)\n\n login_data = {\n 'email': '[email protected]',\n 'password': 'smart620514',\n 'username': '[email protected]',\n 'phone':''}\n\n print('-= авторизация =-')\n session.post(LOGIN_URL, data=login_data)\n\n # запрос всех ККМ, кроме архивных (headers обязательно !)\n headers = {'Content-Type': 'application/json;charset=UTF-8'}\n payload = '{\"badgeId\":17,\"type\":\"terminal\",\"filterValues\":[],\"withArchive\":0}'\n print('-= получение данных с сервера =-')\n r = session.post (API_URL, data=payload, headers=headers)\n\n data_from_api = r.json()\n all_kkm_list = data_from_api['result']['data']\n kkm_quanity = len(all_kkm_list)\n\n print('-= обработка данных =-')\n kkm_with_filled_fn = []\n for kkm in all_kkm_list:\n fn_used = int(kkm['fnSpaceUsed'].strip(\"'%\"))\n if fn_used >= max_fill:\n kkm_with_filled_fn.append(kkm)\n return kkm_with_filled_fn\n\n\nmax_fill = 80\nx = get_kkm_filled_fn(max_fill)\nprint(f'ККМ с заполненностью ФН выше {max_fill}%.')\nfor k in x:\n print(f\"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}\")\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# pylint: skip-file from sorter.lib.request_data import read_url from urllib2 import HTTPError class fake_urllib(object): def __init__(self, should_fail=False): self.should_fail = should_fail def urlopen(self, uri): if self.should_fail == True: raise HTTPError('FAKER.GTLD', 404, 'Four Oh Four', None, None) def read(self): return "fake body" def close(self): pass class fake_logger(object): def __init__(self): self.msg = None def info(self, msg, *args): pass def warn(self, msg, *args): self.msg = msg.reason class TestRequestData(object): def test_read_url(self, monkeypatch): monkeypatch.setattr("urllib2.urlopen", lambda foo: fake_urllib()) monkeypatch.setattr("sorter.lib.request_data.LOGGER", fake_logger()) body = read_url("fakeurl") assert body == "fake body" def test_read_url_404(self, monkeypatch): faker = fake_logger() monkeypatch.setattr("sorter.lib.request_data.urllib2", fake_urllib(True)) monkeypatch.setattr("sorter.lib.request_data.LOGGER", faker) body = read_url("fakeurl") assert body == None assert faker.msg == 'Four Oh Four'
normal
{ "blob_id": "2bbfbc597a4e1f8b46f58a4c6002a9943eff557a", "index": 5644, "step-1": "<mask token>\n\n\nclass fake_logger(object):\n\n def __init__(self):\n self.msg = None\n\n def info(self, msg, *args):\n pass\n\n def warn(self, msg, *args):\n self.msg = msg.reason\n\n\nclass TestRequestData(object):\n\n def test_read_url(self, monkeypatch):\n monkeypatch.setattr('urllib2.urlopen', lambda foo: fake_urllib())\n monkeypatch.setattr('sorter.lib.request_data.LOGGER', fake_logger())\n body = read_url('fakeurl')\n assert body == 'fake body'\n\n def test_read_url_404(self, monkeypatch):\n faker = fake_logger()\n monkeypatch.setattr('sorter.lib.request_data.urllib2', fake_urllib(\n True))\n monkeypatch.setattr('sorter.lib.request_data.LOGGER', faker)\n body = read_url('fakeurl')\n assert body == None\n assert faker.msg == 'Four Oh Four'\n", "step-2": "<mask token>\n\n\nclass fake_urllib(object):\n\n def __init__(self, should_fail=False):\n self.should_fail = should_fail\n <mask token>\n <mask token>\n <mask token>\n\n\nclass fake_logger(object):\n\n def __init__(self):\n self.msg = None\n\n def info(self, msg, *args):\n pass\n\n def warn(self, msg, *args):\n self.msg = msg.reason\n\n\nclass TestRequestData(object):\n\n def test_read_url(self, monkeypatch):\n monkeypatch.setattr('urllib2.urlopen', lambda foo: fake_urllib())\n monkeypatch.setattr('sorter.lib.request_data.LOGGER', fake_logger())\n body = read_url('fakeurl')\n assert body == 'fake body'\n\n def test_read_url_404(self, monkeypatch):\n faker = fake_logger()\n monkeypatch.setattr('sorter.lib.request_data.urllib2', fake_urllib(\n True))\n monkeypatch.setattr('sorter.lib.request_data.LOGGER', faker)\n body = read_url('fakeurl')\n assert body == None\n assert faker.msg == 'Four Oh Four'\n", "step-3": "<mask token>\n\n\nclass fake_urllib(object):\n\n def __init__(self, should_fail=False):\n self.should_fail = should_fail\n\n def urlopen(self, uri):\n if self.should_fail == True:\n raise HTTPError('FAKER.GTLD', 404, 'Four Oh Four', None, None)\n <mask token>\n <mask token>\n\n\nclass fake_logger(object):\n\n def __init__(self):\n self.msg = None\n\n def info(self, msg, *args):\n pass\n\n def warn(self, msg, *args):\n self.msg = msg.reason\n\n\nclass TestRequestData(object):\n\n def test_read_url(self, monkeypatch):\n monkeypatch.setattr('urllib2.urlopen', lambda foo: fake_urllib())\n monkeypatch.setattr('sorter.lib.request_data.LOGGER', fake_logger())\n body = read_url('fakeurl')\n assert body == 'fake body'\n\n def test_read_url_404(self, monkeypatch):\n faker = fake_logger()\n monkeypatch.setattr('sorter.lib.request_data.urllib2', fake_urllib(\n True))\n monkeypatch.setattr('sorter.lib.request_data.LOGGER', faker)\n body = read_url('fakeurl')\n assert body == None\n assert faker.msg == 'Four Oh Four'\n", "step-4": "<mask token>\n\n\nclass fake_urllib(object):\n\n def __init__(self, should_fail=False):\n self.should_fail = should_fail\n\n def urlopen(self, uri):\n if self.should_fail == True:\n raise HTTPError('FAKER.GTLD', 404, 'Four Oh Four', None, None)\n\n def read(self):\n return 'fake body'\n\n def close(self):\n pass\n\n\nclass fake_logger(object):\n\n def __init__(self):\n self.msg = None\n\n def info(self, msg, *args):\n pass\n\n def warn(self, msg, *args):\n self.msg = msg.reason\n\n\nclass TestRequestData(object):\n\n def test_read_url(self, monkeypatch):\n monkeypatch.setattr('urllib2.urlopen', lambda foo: fake_urllib())\n monkeypatch.setattr('sorter.lib.request_data.LOGGER', fake_logger())\n body = read_url('fakeurl')\n assert body == 'fake body'\n\n def test_read_url_404(self, monkeypatch):\n faker = fake_logger()\n monkeypatch.setattr('sorter.lib.request_data.urllib2', fake_urllib(\n True))\n monkeypatch.setattr('sorter.lib.request_data.LOGGER', faker)\n body = read_url('fakeurl')\n assert body == None\n assert faker.msg == 'Four Oh Four'\n", "step-5": "# pylint: skip-file\nfrom sorter.lib.request_data import read_url\nfrom urllib2 import HTTPError\n\nclass fake_urllib(object):\n def __init__(self, should_fail=False):\n self.should_fail = should_fail\n\n def urlopen(self, uri):\n if self.should_fail == True:\n raise HTTPError('FAKER.GTLD', 404, 'Four Oh Four', None, None)\n\n def read(self):\n return \"fake body\"\n\n def close(self):\n pass\n\nclass fake_logger(object):\n def __init__(self):\n self.msg = None\n\n def info(self, msg, *args):\n pass\n\n def warn(self, msg, *args):\n self.msg = msg.reason\n\nclass TestRequestData(object):\n\n def test_read_url(self, monkeypatch):\n monkeypatch.setattr(\"urllib2.urlopen\", lambda foo: fake_urllib())\n monkeypatch.setattr(\"sorter.lib.request_data.LOGGER\", fake_logger())\n\n body = read_url(\"fakeurl\")\n\n assert body == \"fake body\"\n\n def test_read_url_404(self, monkeypatch):\n faker = fake_logger()\n monkeypatch.setattr(\"sorter.lib.request_data.urllib2\", fake_urllib(True))\n monkeypatch.setattr(\"sorter.lib.request_data.LOGGER\", faker)\n\n body = read_url(\"fakeurl\")\n \n assert body == None\n assert faker.msg == 'Four Oh Four'\n", "step-ids": [ 7, 9, 10, 12, 14 ] }
[ 7, 9, 10, 12, 14 ]
def patternCount(dnaText, pattern): count = 0 for i in range(0, len(dnaText) - len(pattern)): word = dnaText[i:i+len(pattern)] if (word == pattern): count = count + 1 return count def freqWordProblem(text, k): countWords = [] for i in range(0, len(text) - k): pattern = text[i:i+k] countWords.append(patternCount(text, pattern)) maxCount = 0 indexes = [] for j in range(0, len(countWords)): count = countWords[j] if (count == maxCount): indexes.append(j) elif (count > maxCount): indexes = [j] maxCount = count result = set() for index in indexes: result.add(text[index:index+k]) return list(result) mapDNA = { "A": "T", "G": "C", "T": "A", "C": "G" } def complimentDNA(text): result = "" for letter in text: result = result + mapDNA[letter] return result[::-1] def patternFind(text, pattern): index = [] for i in range(0,len(text)-len(pattern)): word = text[i:i+len(pattern)] if word == pattern: index.append(i) return index for word in patternFind("AATGGTCGAAGCGCCACTGCGCCACGACTAAACGCGCCACTAAATCTCCATCAGAGCGCCACTGCGCCACGTGGCGCCACTAGCGCCACCCGTTGGCGCCACTGGGCGCCACGCGCCACGCGCGCCACGCGCCACTGCGCCACCTGAGTAGCGCCACATAGACCGGCGCCACTAAGCGCCACTGCGCCACAGAGGGCGCCACTAACTGCGCCACGCGCCACGTGCGCGCCACGGGCGAACGTTGCGCCACCTTGCCCCGCGCCACGCGCCACGCGCCACGCGCCACCGGGGGCATTGCGCCACGCGCCACGCGCCACCCCGCGCCACGTGTGCCGCGCCACAGCGCCACTTGGCGCGCCACACGCGCCACGCGCCACCTAGCGCCACATAGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACATCTGCGCCACCAGCGCCACGAGCGCCACCATACTGGCGCCACAGCGCGCCACGGCGCCACGCGCCACCCATGCGCCACATAGCCTGTGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACCGCGCCACTGAGCGCCACCCCTAGGCGCCACGCGCCACGCGCCACTGCGCCACGCGCCACGAGGGGGTGCGCCACACGGCCCCAGCGCGAGAGTGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGGCGCCACGCGCCACATCGGGCGCCACTTAAGCCTTGGATTTGCGCCACGGCGGGCGCCACGCCAGCGCCACGTCACAGCGGGCGCCACACGCGCCACTGCGCCACGCGCCACAGCGCGCCACGGCGCCACGAAACGCGCCACCCGGCGCCACGCGCCACTACAAGCGCCACAAGGCGCGCCACTGGCGCCACTGCGCCACACAGGCGCCACGCGCCACGCGCCACATGCGCCACAGGCGCCACGGGGCGCGCCACTTCGCGCCACGCGCCACGGCGCCACGCGCCACAGCGCCACCCTCGCGCCACCGCGCCACGCGCCACGCGCCACGCGCCACGGGCGCCACTCGCGCCACTTCGCGCCACGCGCCACTTCGGGCGCCACGCGCCACGAAATGCGCCACCTGAGGCGCCACTGCGCCACGCGCCACCAGCGCCACAGCGCCACGGGCGCGCCACGCGCCACTCCGCGCCACCGCGCGCCACGCGCCACGAAGCGCCACGCGCCACTGCGCCACACCTGGATAAGCGCCACTGCGCCACACGCTGCGCCACGGCGCCACTGGGGCGCCACGCGCCACAGCGCCACGCGCCACGCTCGCGCCACGCGCCACGTGCGCCACTCCGCGCCACATTTGCGCCACGCGCCACAAGCGCCACGGTGCGCCACTACATGTGTGGTGCGCCACACGAGCGCCACGTCTGTTCACGCGCCACCCTATAGGCGCCACTCAGCGCCACAGCGCCACCTGTAGTGCGCCACGCGCCACCAGGCGCCACCCAGGCGTGCCGTGAGATAGCGCCACGGCGCCACAGCGCCACGCGCCACTCGCGCCACGTGCGCCACCGCGCCACTTTCGCGCCACCCAGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACATGCGCCACCCGCGCCACGTGCGCCACTAGATCGCGCCACTCCGGCGCCACGCGCCACTACGCGCCACACGCGCCACAGCGCCACTCTGAAATTCGCGCCACGCGCCACGCGCCACGAGCGCCACTCGCGCCACAAGCGCCACGCGCCACACCCGCGCCACGCGCCACGCGCCACGCTCTGTTGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACGGCTTGGTTGCGCCACAGCGCCACGGCGCCACGCGCCACTCACCGAAGTGATGGCGCCACACGAGGAGGCGCCACAGCGCCACTGCGCCACTGCGCCACTGCGCCACGCGCCACTTGGCGCGCCACTGGCGCCACGCGCCACAGCGCCACTACGCGCCACCGCGCCACGCGCCACTGCGCCACCGCGCCACGCGCCACGGCGCCACTGATGCGCCACCGCGCCACTGTCGTGCGCCACGTCTGGGCTACGGCGCCACCGCGCCACATGGGGCGCCACGACGCGCCACCGGCGCCACACGCGCCACGTGCGCCACCGGCGCCACCGCGCCACTGAGCGCCACACTGCTTGCGCCACATAGAGGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAATTCTGCCATCTGCGGTCGCGCCACGGTATTTTGCGCCACGCGCCACCCTAGGCGCCACTAGCGCCACAAGCGCCACTCGGCGCCACCCTCAGGCGCCACAGCGCCACTACAGCGCCACTGCGCGCCACGCGCCACGCGCCACATTGCCTCAGCGCCACGGCGCCACGCGCTGTAGCGCCACGCGCCACGTTGCGCCACATGCGCCACACATGCGCCACCCTTGCGCCACGCGCCACCAAAGCGCCACGGCGCCACGCGCCACCTGACGGTGGCGCCACCCCTGCTCTTTGCGCCACGGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACTGCGCCACAATGTGCGCCACCCCTCTAGGCGCCACTGCGCCACGGCGCCACGGCGCCACGTAGCGCCACGCGCCACGCGCCACCAGTCAATCGGCGCCACCATTCGGCCCGACGCGCCACGCGCCACTGCGCCACGGGCGCCACCATAACGGCGCCACGCGCCACGCGCCACTTGGTTTGATAGCCTGCGCCACAGCGCCACGGCGGCGCCACGTGTAGCGCCACGCGCCACCTTATGCGCCACGCGCCACGCGCCACGAGCGCCACGCGCCACCTTAATTAGCGCCACATAGCGCCACTGCGCCACGCGCCACCAAGATACAAGCGCCACTGGGCGCCACGCGCCACGCGCCACGCGCCACTGCTGCAGGCAGCGCCACCACAGGCGCCACTAGCGCCACTTGAGGCGCCACCGCGCCACTAGAGCGCCACGCGCCACTGGCGCCACTCCCGTCGGCGCCACCGCGCCACAGAGCGCCACCTGCGCCACGCGCCACTCGCGCCACCGTCACGCGCCACAGGCGCGCCACCCCGGCGCCACTGCGCCACTCTAGCGCCACTGCGCGCCACATATGGCGCCACCTACAACAGCGCCACACGCGCCACGCTATTCGCGCCACGCGCCACCGAGCGCCACGCGCCACATATACTCCTATTTGCGCCACGCGCCACCGTGGCGCCACGCGCCACCAGCGCCACTTGGTGCGCCACCCAAGCGCCACCTGCGCCACAGCGCCACTCGCGCCACGATAGATTTGCAAGCGCCACGTCCCGCGCCACAAGCGCCACTCTGCGCCACCGCGCCACGCGCCACTCTATGGCGCCACCGGCGCCACGCGCCACCCAGGCGCCACTCGCGCCACAGCGCCACGCGCGCCACTTCTTAGGCGCCACGCGCCACCAGGCGCCACTCTGCGCGCCACGTAGCGCCACTCGTGCGCCACTTTCGGCATGCGCGCCACATAGGCGCCACGGCGCCACCCCTGGCGCCACAGCGCCACGCGCCACTAGGGCGCCACTCGGCTCGAGCGCCACGCGCCACCGCGCCACTCGCGCCACGCGCCACGCGCCACATCTGCGCCACGCGCCACGCGCCACAGCGCCACCGCGCCACTTGGGCTATGGCGCCACGAAGCGCCACGGCGCCACGTGCGCCACCTAACGCGCCACAGCGCCACGCGCCACATGTCGCCCAGCGCCACCGCGCCACGGCGCCACCGCGCCACCCACGCCGCGCCACTGCGCCACAGCGCCACGGCGCCACGCGCCACATGCGCCACGCAGCGCCACCCCTCCGCCGCGCCACCGGTGCGCCACGCGCCACGCGCCACTTGGCGCCACGCTTCTGCGCCACGGCGCCACCAGCGCCACTAAGCGCCACAGCGCCACCAGCGCCACAAGGGCGCCACTAATGCGCCACAAGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACACCGTCCGCGCCACCTGCGCCACAGGCGCCACGTTTTCTTACTGTATAATAGCGCCACTTCGCGCCACGCGCCACGGGCGCCACGAATGTGTGCGCCACGTCGTGCGCCACGCGCCACGCGCCACTGGGCGCCACGCGCCACCCCCCCGCGCCACTAGCGCCACGCGCCACGCGCCACTGAATTGTAGCGCCACGCGCCACGCGCGCCACTCTATAGGCAAATTGGGCGCCACGCGCCACTATGGTAGTCGTGCGCCACTGCGCCACGCGCCACGCTGCGCCACGCGCCACTGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACCACGCGCCACCTCGCGCGCCACGCGCCACCTAGTAGCGCCACAGCGCCACGAGCGCCACCAGCGCCACGGAAGGCGCGCCACAGGCGCCACTTAGCGCCACCGGCGCCACGCGCCACAGCGCCACTGTGCGCCACGCGCCACGCGCCACTGCGCCACGCCAAATGCGCGCCACCCGCGCCACAAGCGCCACATTGGGGGTCTTAGCGCCACATCGCGCCACGCGCCACCAAATTGCGCGCCACACAGGTCGCGCCACTTCGTCCAGGCGCCACTTCAGCGCCACGGCGCCACGCGCCACGCGCCACTCCGAGCGCCACTGTGCGCCACAGACAGCTCCGCGCCACGGGAGGGGCGCCACATTTTAGCGCCACGCGCCACGCGTTTTGAGCGCCACTGCGCCACCGGAAAGCGCCACTGCTGAGGCGCCACCACAGCGCCACCGCGCCACGCACTGGCGCCACGATATGCGCCACCGCGCCACCGCGCCACAAGGGCGCCACGCGCCACCTAGACGCGCCACGAAAGCGCCACCACTATGCGCCACTCGGCGCCACGCGCCACCACAGGAAAGTATGCGCCACAAAACTGCGCCACGCGCCACGCGCCACGCGCCACGGATTGCGCCACGCGCCACGCGCCACCACGCGCCACGTCCCTGGCGCGCCACGCGCCACGCGCCACGGCGCCACAATGCGCCACATGCGCCACAGGCGCCACCGCGCCACTGCGCCACGCGCCACTGCGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACTAGGCGCCACACTGCGCGCCACGCTACGGAGGACCGATAGCGCCACAGCGCCACGATAGCGCCACGCGCCACGAGTTCTGCGCCACAATGGCCTAGTCGCGCCACCGCGCCACTTACGCGCCACTGCGCCACGCGCCACTTGCGCCACGGCGCCACGCGCCACTGGCGCCACGCGCCACCTCCCGCTGCGCCACGCGCCACTATCCGTGCGCCACGCGCCACTGGCGCCACGCGGCGCCACTGCGAAGTAATCGCGCCACAGCGCCACGCTAAGCGCCACATGCGCGGTGTGCGCCACGCGCCACGCGCCACTCAAGCGCCACAAGCGCCACGAGCGCCACGCGCCACGCGCCACTAAGCGCCACGGCGCCACTTTAATATGCGCCACAAAGCGCCACTGCGCCACCTGCTGCGCCACATCACGCGCCACGAGCGCCACAGTTGCGCGCCACATAATGGGCGCCACGGCCCGTTGAGCGCCACGCGCCACGCGCCACAGTGCCACCCGCGCCACACGCGCCACAGTCGCGCCACGGCGCCACCGGGTTATTATGCGCCACGTGCGGTCGCGCCACTGCGCCACGCGCGCCACAAGCGCCACTCGGATGGCGCCACTTCTGTGGCGCCACGCGCCACACTGCGCCACAGCGCCACGCGCCACACGCGCCACCGCGCCACCGAGTATTAGCGCCACTGCGCCACAAGATGGCGCCACGCGCCACCAGGCGCCACTCGCGCCACGGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACCTGCGCCACAAGGGGCGCGCCACGCGCCACGCGCCACAGGACCATCGGCGCCACGTCATGGCGCCACCTGGGTCCATGCGCGCCACCTCCTGCGCCACAGCGCCACCGTCTAAAGCGCCACGAGCGCCACGGCGCCACGCGCCACAGCGCCACGCGCCACCTTCACCGAGTCCGCAGGCGCCACCGTCATAATTGAGTGCGCCACCCCATGCCGCGCCACATAGCGCCACGCTGGCGCCACGGCGCCACGCGCCACGCGCGCCACAGCGCCACGCGCCACACGCGCGCCACAGCGCCACGCGCCACTGTATCGCGCCACGAGCGCCACGTGTGCCACCTACGCTATGCGCCACAAGCGCCACGAGCGCCACAAAGCGCCACTGCGCCACTGGCGCCACTGTGGCAGCGCCACTCGCGCCACTGAAGCGCCACAGCGCCACGGCGCCACGCGCCACCCGGATGCGCCACAGCGCCACATGCGCCACTGCGCCACCATGCTGCGCCACAGCGCCACAGTCGCGCCACTGCGCCACGCGCCACCTCGCGCCACTTGCGCCACGCGCCACGCTGGCAGCGCCACTGCGCCACACAAGATTTCTGAGCGCCACCGACCCCTATAAGCGCCACTGAAGGTGCGCCACGCGCCACTCGCGCCACCTAAGGCGCCACTCTGCGCCACGTGCGCCACCCCTGCGCCACTGCGCCACCTGCGCCACTGAGCGCGCCACGGGCGCCACGCGCCACCCTGCGCCACTGCGCCACTGCGCCACGCTGCGCCACCTGAACTTGGCGCCACGTAGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACTCGCGCCACTCGTGCGCCACAAGAGCGCCACTGGTTAAGCGATTTGCGCCACGCGCCACACTTGCGCCACGCGCCACAAGTCGGGGCGCCACCTCGCCCCTCGCGCCACTAGCGCCACGCGCCACCACCTGAAAGCGCCACACTACTAGGCGCCACAGGCGCCACGCGCCACGCGCCACCGTCTGCGCCACGCGCGCCACCAGCGCCACAGGGTGCGCCACCGCGCCACGCGCCACCGCGCCACGCGCCACTCCCGGTCAGGGGCGCCACAAAAGGCGCCACAGGGAGGTTCTATCCGCGCCACACACGCGCCACGACGAACATGCGCGCCACTGCGCCACCGCGCCACTAAGCGCCACGGGCGCCACGCGCCACCTAGTCGCGCCACAAAGTGATCGCGGCGCCACCTGCGCCACCGGCGCCACCCAAGCGCCACTTAGGCGCCACGTGCGCCACCCTTGCGCCACCTTGCGCCACCGCGCCACGGCGCCACTCGTGGTGCGCCACTGCCCGCGCCACATAGCGCCACCCGAGCGCCACAGCGTGTTATTTGCGCCACAGCGCCACGCGCCACTGCGCCACGCATGCCGCGCCACAGTGCGCCACGACGCGCCACTGCGCCACGTCGTCTTCGCGCCACTTGACCGACATTTGCCTAGCGCCACCGCGCCACGGCGCCACGGCGCCACCAGCCGATTGCTTTCTTGTTCCACAGGTGCGCCACTGGCGCCACAGAGCGCCACCGGCGCCACGGCGCCACTGCGCCACGCGCCACGCGCCACATGCGCCACTAGCGCCACGCGCCACCCCGCGCCACTGCGCCACATGGCGCCACCAAAGGGTTGCGCCACTTGCGCCACTGCGCCACTCGCGCCACGTTTGCCGCGCCACAAGGCGCCACTTGCGCCACGCGCCACTGCGCCACGTTACCGCCGCGCCACTTAAACCCTGCTTGCGCCACCTGCGCCACGCGCCACGCGCCACAAAACTAGCGCCACTTCAGTACGCGCCACAGCGCCACCTCTCAGCGCGCCACCATCTGGCGCCACACGTGGCGCCACTGCGCCACAGCGCCACTGTGCGCCACCGCGCCACATTTAGCGCCACCAGCGCCACGGCTGCGCCACCCCCCCTTTGCGCCACAAAGCGCCACGCCAGCGCCACTTGGCGCCACAGCGCCACGGGCGGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAGAAAAGCGCCACAGAGCGCCACAAATCTGGCAGCAGCGCCACAAGTGCGCCACCGCGCCACGAAAGAGGACCCAGCGCCACAGCGCCACATGCGCCACGTGCGCCACGCGCCACTGTGATCGCGCCACGCGCCACGCGCCACCAGAATAGCGCCACAGCGCCACCGCGCCACAGCGCCACAAGGTTCACTGGCGCCACGCGCCACCGCGCCACTCGCGCCACCGGCGCCACCGACCGCGCCACTATGCGCCACCATGCGCCACGCGCCACCTGCGCCACGAGCGCCACAGCGCCACCTTTATGCGCCACGCACCGCGCCACGAGCTTGCGCCACTTTCGCGCCACGCGCCACCTTTATAAAGCGCCACTCGCTGCGCCACGCGCCACCTAATCGCGCCACTGGCGCCACGCGCCACCTGCGCCACGCAAGGCGCCACGGCGCCACAGCGCCACGCGCCACTCGGCGCCACGGAGGCGCCACAGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGTGATAAGCGCCACAGACTCGCGGCGCCACTTCTGCGCCACGCGCCACTCGGGCGCCACAATGCGCCACGGCGCCACACGCGCCACAGGCGCCACCTGTTGCGCCACCGCGCCACGGCTCAATGCGCCACTAGGCGCCACTGCGCCACGCGCCACTTGCGCCACGCGCCACCCATGTATGCGCCACGCGCCACGTGGTGGGCGCCACTTCTCGTGCGCCACAACTCCATAAAACGGCGCCACCGCGCCACACAGCGCCACAGCGCCACGTTCAGCGCCACTTGGCGCCACTGCGCCACCGCGCCACTGTGCGCCACCGCGCCACGCGCCACGTGGCGCCACGCGCCACTGCGCCACTTGCGCCACAGCGCCACGCTACTTAGTCGTGGCGCCACTTGTGTGGCACCATTAGCGAGGGCGCCACGGCGCCACCAAGCGCCACTAAATGGTGGGCGGCGCCACTGCGCCACTTGCGCCACTTAGCGCCACCTAGCGCCACCACCGCGAAAGCGCCACTTCAGCGCCACGAATGCGCCACGCGCCACTTCCTAACTTGCGCCACGGCACGGCGCCACGCTGCGCCACGGCACGAAAGCGCCACTGCGCCACGGCGCCACACCAAGCGCCACGCGCCACGGCGCCACAGCGCCACAGCGCCACACAGCCGGCGCCACAGCGCCACGGCGCCACTCCAGCGCCACCCTAGCGCCACGGCGCCACCTCGTGCGCCACTGCGCCACTAGCGCCACTGCGCCACGTTGCGCCACGCGCCACTGGGCGCCACATAAACAATAGCGCCACGGGCGCCACGCGCCACGGCGAGCGCCACTCGCGCCACGCGCCACGCGCCACTGCGCCACATGCCCCAGCGCCACGCGCCACTGCGCCACGCGCCACAGCGCCAC","GCGCCACGC"): print(word)
normal
{ "blob_id": "29c1a989365408bf5c3d6196f7afc969be63df85", "index": 5942, "step-1": "<mask token>\n\n\ndef complimentDNA(text):\n result = ''\n for letter in text:\n result = result + mapDNA[letter]\n return result[::-1]\n\n\ndef patternFind(text, pattern):\n index = []\n for i in range(0, len(text) - len(pattern)):\n word = text[i:i + len(pattern)]\n if word == pattern:\n index.append(i)\n return index\n\n\n<mask token>\n", "step-2": "def patternCount(dnaText, pattern):\n count = 0\n for i in range(0, len(dnaText) - len(pattern)):\n word = dnaText[i:i + len(pattern)]\n if word == pattern:\n count = count + 1\n return count\n\n\ndef freqWordProblem(text, k):\n countWords = []\n for i in range(0, len(text) - k):\n pattern = text[i:i + k]\n countWords.append(patternCount(text, pattern))\n maxCount = 0\n indexes = []\n for j in range(0, len(countWords)):\n count = countWords[j]\n if count == maxCount:\n indexes.append(j)\n elif count > maxCount:\n indexes = [j]\n maxCount = count\n result = set()\n for index in indexes:\n result.add(text[index:index + k])\n return list(result)\n\n\n<mask token>\n\n\ndef complimentDNA(text):\n result = ''\n for letter in text:\n result = result + mapDNA[letter]\n return result[::-1]\n\n\ndef patternFind(text, pattern):\n index = []\n for i in range(0, len(text) - len(pattern)):\n word = text[i:i + len(pattern)]\n if word == pattern:\n index.append(i)\n return index\n\n\n<mask token>\n", "step-3": "def patternCount(dnaText, pattern):\n count = 0\n for i in range(0, len(dnaText) - len(pattern)):\n word = dnaText[i:i + len(pattern)]\n if word == pattern:\n count = count + 1\n return count\n\n\ndef freqWordProblem(text, k):\n countWords = []\n for i in range(0, len(text) - k):\n pattern = text[i:i + k]\n countWords.append(patternCount(text, pattern))\n maxCount = 0\n indexes = []\n for j in range(0, len(countWords)):\n count = countWords[j]\n if count == maxCount:\n indexes.append(j)\n elif count > maxCount:\n indexes = [j]\n maxCount = count\n result = set()\n for index in indexes:\n result.add(text[index:index + k])\n return list(result)\n\n\n<mask token>\n\n\ndef complimentDNA(text):\n result = ''\n for letter in text:\n result = result + mapDNA[letter]\n return result[::-1]\n\n\ndef patternFind(text, pattern):\n index = []\n for i in range(0, len(text) - len(pattern)):\n word = text[i:i + len(pattern)]\n if word == pattern:\n index.append(i)\n return index\n\n\nfor word in patternFind(\n 'AATGGTCGAAGCGCCACTGCGCCACGACTAAACGCGCCACTAAATCTCCATCAGAGCGCCACTGCGCCACGTGGCGCCACTAGCGCCACCCGTTGGCGCCACTGGGCGCCACGCGCCACGCGCGCCACGCGCCACTGCGCCACCTGAGTAGCGCCACATAGACCGGCGCCACTAAGCGCCACTGCGCCACAGAGGGCGCCACTAACTGCGCCACGCGCCACGTGCGCGCCACGGGCGAACGTTGCGCCACCTTGCCCCGCGCCACGCGCCACGCGCCACGCGCCACCGGGGGCATTGCGCCACGCGCCACGCGCCACCCCGCGCCACGTGTGCCGCGCCACAGCGCCACTTGGCGCGCCACACGCGCCACGCGCCACCTAGCGCCACATAGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACATCTGCGCCACCAGCGCCACGAGCGCCACCATACTGGCGCCACAGCGCGCCACGGCGCCACGCGCCACCCATGCGCCACATAGCCTGTGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACCGCGCCACTGAGCGCCACCCCTAGGCGCCACGCGCCACGCGCCACTGCGCCACGCGCCACGAGGGGGTGCGCCACACGGCCCCAGCGCGAGAGTGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGGCGCCACGCGCCACATCGGGCGCCACTTAAGCCTTGGATTTGCGCCACGGCGGGCGCCACGCCAGCGCCACGTCACAGCGGGCGCCACACGCGCCACTGCGCCACGCGCCACAGCGCGCCACGGCGCCACGAAACGCGCCACCCGGCGCCACGCGCCACTACAAGCGCCACAAGGCGCGCCACTGGCGCCACTGCGCCACACAGGCGCCACGCGCCACGCGCCACATGCGCCACAGGCGCCACGGGGCGCGCCACTTCGCGCCACGCGCCACGGCGCCACGCGCCACAGCGCCACCCTCGCGCCACCGCGCCACGCGCCACGCGCCACGCGCCACGGGCGCCACTCGCGCCACTTCGCGCCACGCGCCACTTCGGGCGCCACGCGCCACGAAATGCGCCACCTGAGGCGCCACTGCGCCACGCGCCACCAGCGCCACAGCGCCACGGGCGCGCCACGCGCCACTCCGCGCCACCGCGCGCCACGCGCCACGAAGCGCCACGCGCCACTGCGCCACACCTGGATAAGCGCCACTGCGCCACACGCTGCGCCACGGCGCCACTGGGGCGCCACGCGCCACAGCGCCACGCGCCACGCTCGCGCCACGCGCCACGTGCGCCACTCCGCGCCACATTTGCGCCACGCGCCACAAGCGCCACGGTGCGCCACTACATGTGTGGTGCGCCACACGAGCGCCACGTCTGTTCACGCGCCACCCTATAGGCGCCACTCAGCGCCACAGCGCCACCTGTAGTGCGCCACGCGCCACCAGGCGCCACCCAGGCGTGCCGTGAGATAGCGCCACGGCGCCACAGCGCCACGCGCCACTCGCGCCACGTGCGCCACCGCGCCACTTTCGCGCCACCCAGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACATGCGCCACCCGCGCCACGTGCGCCACTAGATCGCGCCACTCCGGCGCCACGCGCCACTACGCGCCACACGCGCCACAGCGCCACTCTGAAATTCGCGCCACGCGCCACGCGCCACGAGCGCCACTCGCGCCACAAGCGCCACGCGCCACACCCGCGCCACGCGCCACGCGCCACGCTCTGTTGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACGGCTTGGTTGCGCCACAGCGCCACGGCGCCACGCGCCACTCACCGAAGTGATGGCGCCACACGAGGAGGCGCCACAGCGCCACTGCGCCACTGCGCCACTGCGCCACGCGCCACTTGGCGCGCCACTGGCGCCACGCGCCACAGCGCCACTACGCGCCACCGCGCCACGCGCCACTGCGCCACCGCGCCACGCGCCACGGCGCCACTGATGCGCCACCGCGCCACTGTCGTGCGCCACGTCTGGGCTACGGCGCCACCGCGCCACATGGGGCGCCACGACGCGCCACCGGCGCCACACGCGCCACGTGCGCCACCGGCGCCACCGCGCCACTGAGCGCCACACTGCTTGCGCCACATAGAGGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAATTCTGCCATCTGCGGTCGCGCCACGGTATTTTGCGCCACGCGCCACCCTAGGCGCCACTAGCGCCACAAGCGCCACTCGGCGCCACCCTCAGGCGCCACAGCGCCACTACAGCGCCACTGCGCGCCACGCGCCACGCGCCACATTGCCTCAGCGCCACGGCGCCACGCGCTGTAGCGCCACGCGCCACGTTGCGCCACATGCGCCACACATGCGCCACCCTTGCGCCACGCGCCACCAAAGCGCCACGGCGCCACGCGCCACCTGACGGTGGCGCCACCCCTGCTCTTTGCGCCACGGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACTGCGCCACAATGTGCGCCACCCCTCTAGGCGCCACTGCGCCACGGCGCCACGGCGCCACGTAGCGCCACGCGCCACGCGCCACCAGTCAATCGGCGCCACCATTCGGCCCGACGCGCCACGCGCCACTGCGCCACGGGCGCCACCATAACGGCGCCACGCGCCACGCGCCACTTGGTTTGATAGCCTGCGCCACAGCGCCACGGCGGCGCCACGTGTAGCGCCACGCGCCACCTTATGCGCCACGCGCCACGCGCCACGAGCGCCACGCGCCACCTTAATTAGCGCCACATAGCGCCACTGCGCCACGCGCCACCAAGATACAAGCGCCACTGGGCGCCACGCGCCACGCGCCACGCGCCACTGCTGCAGGCAGCGCCACCACAGGCGCCACTAGCGCCACTTGAGGCGCCACCGCGCCACTAGAGCGCCACGCGCCACTGGCGCCACTCCCGTCGGCGCCACCGCGCCACAGAGCGCCACCTGCGCCACGCGCCACTCGCGCCACCGTCACGCGCCACAGGCGCGCCACCCCGGCGCCACTGCGCCACTCTAGCGCCACTGCGCGCCACATATGGCGCCACCTACAACAGCGCCACACGCGCCACGCTATTCGCGCCACGCGCCACCGAGCGCCACGCGCCACATATACTCCTATTTGCGCCACGCGCCACCGTGGCGCCACGCGCCACCAGCGCCACTTGGTGCGCCACCCAAGCGCCACCTGCGCCACAGCGCCACTCGCGCCACGATAGATTTGCAAGCGCCACGTCCCGCGCCACAAGCGCCACTCTGCGCCACCGCGCCACGCGCCACTCTATGGCGCCACCGGCGCCACGCGCCACCCAGGCGCCACTCGCGCCACAGCGCCACGCGCGCCACTTCTTAGGCGCCACGCGCCACCAGGCGCCACTCTGCGCGCCACGTAGCGCCACTCGTGCGCCACTTTCGGCATGCGCGCCACATAGGCGCCACGGCGCCACCCCTGGCGCCACAGCGCCACGCGCCACTAGGGCGCCACTCGGCTCGAGCGCCACGCGCCACCGCGCCACTCGCGCCACGCGCCACGCGCCACATCTGCGCCACGCGCCACGCGCCACAGCGCCACCGCGCCACTTGGGCTATGGCGCCACGAAGCGCCACGGCGCCACGTGCGCCACCTAACGCGCCACAGCGCCACGCGCCACATGTCGCCCAGCGCCACCGCGCCACGGCGCCACCGCGCCACCCACGCCGCGCCACTGCGCCACAGCGCCACGGCGCCACGCGCCACATGCGCCACGCAGCGCCACCCCTCCGCCGCGCCACCGGTGCGCCACGCGCCACGCGCCACTTGGCGCCACGCTTCTGCGCCACGGCGCCACCAGCGCCACTAAGCGCCACAGCGCCACCAGCGCCACAAGGGCGCCACTAATGCGCCACAAGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACACCGTCCGCGCCACCTGCGCCACAGGCGCCACGTTTTCTTACTGTATAATAGCGCCACTTCGCGCCACGCGCCACGGGCGCCACGAATGTGTGCGCCACGTCGTGCGCCACGCGCCACGCGCCACTGGGCGCCACGCGCCACCCCCCCGCGCCACTAGCGCCACGCGCCACGCGCCACTGAATTGTAGCGCCACGCGCCACGCGCGCCACTCTATAGGCAAATTGGGCGCCACGCGCCACTATGGTAGTCGTGCGCCACTGCGCCACGCGCCACGCTGCGCCACGCGCCACTGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACCACGCGCCACCTCGCGCGCCACGCGCCACCTAGTAGCGCCACAGCGCCACGAGCGCCACCAGCGCCACGGAAGGCGCGCCACAGGCGCCACTTAGCGCCACCGGCGCCACGCGCCACAGCGCCACTGTGCGCCACGCGCCACGCGCCACTGCGCCACGCCAAATGCGCGCCACCCGCGCCACAAGCGCCACATTGGGGGTCTTAGCGCCACATCGCGCCACGCGCCACCAAATTGCGCGCCACACAGGTCGCGCCACTTCGTCCAGGCGCCACTTCAGCGCCACGGCGCCACGCGCCACGCGCCACTCCGAGCGCCACTGTGCGCCACAGACAGCTCCGCGCCACGGGAGGGGCGCCACATTTTAGCGCCACGCGCCACGCGTTTTGAGCGCCACTGCGCCACCGGAAAGCGCCACTGCTGAGGCGCCACCACAGCGCCACCGCGCCACGCACTGGCGCCACGATATGCGCCACCGCGCCACCGCGCCACAAGGGCGCCACGCGCCACCTAGACGCGCCACGAAAGCGCCACCACTATGCGCCACTCGGCGCCACGCGCCACCACAGGAAAGTATGCGCCACAAAACTGCGCCACGCGCCACGCGCCACGCGCCACGGATTGCGCCACGCGCCACGCGCCACCACGCGCCACGTCCCTGGCGCGCCACGCGCCACGCGCCACGGCGCCACAATGCGCCACATGCGCCACAGGCGCCACCGCGCCACTGCGCCACGCGCCACTGCGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACTAGGCGCCACACTGCGCGCCACGCTACGGAGGACCGATAGCGCCACAGCGCCACGATAGCGCCACGCGCCACGAGTTCTGCGCCACAATGGCCTAGTCGCGCCACCGCGCCACTTACGCGCCACTGCGCCACGCGCCACTTGCGCCACGGCGCCACGCGCCACTGGCGCCACGCGCCACCTCCCGCTGCGCCACGCGCCACTATCCGTGCGCCACGCGCCACTGGCGCCACGCGGCGCCACTGCGAAGTAATCGCGCCACAGCGCCACGCTAAGCGCCACATGCGCGGTGTGCGCCACGCGCCACGCGCCACTCAAGCGCCACAAGCGCCACGAGCGCCACGCGCCACGCGCCACTAAGCGCCACGGCGCCACTTTAATATGCGCCACAAAGCGCCACTGCGCCACCTGCTGCGCCACATCACGCGCCACGAGCGCCACAGTTGCGCGCCACATAATGGGCGCCACGGCCCGTTGAGCGCCACGCGCCACGCGCCACAGTGCCACCCGCGCCACACGCGCCACAGTCGCGCCACGGCGCCACCGGGTTATTATGCGCCACGTGCGGTCGCGCCACTGCGCCACGCGCGCCACAAGCGCCACTCGGATGGCGCCACTTCTGTGGCGCCACGCGCCACACTGCGCCACAGCGCCACGCGCCACACGCGCCACCGCGCCACCGAGTATTAGCGCCACTGCGCCACAAGATGGCGCCACGCGCCACCAGGCGCCACTCGCGCCACGGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACCTGCGCCACAAGGGGCGCGCCACGCGCCACGCGCCACAGGACCATCGGCGCCACGTCATGGCGCCACCTGGGTCCATGCGCGCCACCTCCTGCGCCACAGCGCCACCGTCTAAAGCGCCACGAGCGCCACGGCGCCACGCGCCACAGCGCCACGCGCCACCTTCACCGAGTCCGCAGGCGCCACCGTCATAATTGAGTGCGCCACCCCATGCCGCGCCACATAGCGCCACGCTGGCGCCACGGCGCCACGCGCCACGCGCGCCACAGCGCCACGCGCCACACGCGCGCCACAGCGCCACGCGCCACTGTATCGCGCCACGAGCGCCACGTGTGCCACCTACGCTATGCGCCACAAGCGCCACGAGCGCCACAAAGCGCCACTGCGCCACTGGCGCCACTGTGGCAGCGCCACTCGCGCCACTGAAGCGCCACAGCGCCACGGCGCCACGCGCCACCCGGATGCGCCACAGCGCCACATGCGCCACTGCGCCACCATGCTGCGCCACAGCGCCACAGTCGCGCCACTGCGCCACGCGCCACCTCGCGCCACTTGCGCCACGCGCCACGCTGGCAGCGCCACTGCGCCACACAAGATTTCTGAGCGCCACCGACCCCTATAAGCGCCACTGAAGGTGCGCCACGCGCCACTCGCGCCACCTAAGGCGCCACTCTGCGCCACGTGCGCCACCCCTGCGCCACTGCGCCACCTGCGCCACTGAGCGCGCCACGGGCGCCACGCGCCACCCTGCGCCACTGCGCCACTGCGCCACGCTGCGCCACCTGAACTTGGCGCCACGTAGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACTCGCGCCACTCGTGCGCCACAAGAGCGCCACTGGTTAAGCGATTTGCGCCACGCGCCACACTTGCGCCACGCGCCACAAGTCGGGGCGCCACCTCGCCCCTCGCGCCACTAGCGCCACGCGCCACCACCTGAAAGCGCCACACTACTAGGCGCCACAGGCGCCACGCGCCACGCGCCACCGTCTGCGCCACGCGCGCCACCAGCGCCACAGGGTGCGCCACCGCGCCACGCGCCACCGCGCCACGCGCCACTCCCGGTCAGGGGCGCCACAAAAGGCGCCACAGGGAGGTTCTATCCGCGCCACACACGCGCCACGACGAACATGCGCGCCACTGCGCCACCGCGCCACTAAGCGCCACGGGCGCCACGCGCCACCTAGTCGCGCCACAAAGTGATCGCGGCGCCACCTGCGCCACCGGCGCCACCCAAGCGCCACTTAGGCGCCACGTGCGCCACCCTTGCGCCACCTTGCGCCACCGCGCCACGGCGCCACTCGTGGTGCGCCACTGCCCGCGCCACATAGCGCCACCCGAGCGCCACAGCGTGTTATTTGCGCCACAGCGCCACGCGCCACTGCGCCACGCATGCCGCGCCACAGTGCGCCACGACGCGCCACTGCGCCACGTCGTCTTCGCGCCACTTGACCGACATTTGCCTAGCGCCACCGCGCCACGGCGCCACGGCGCCACCAGCCGATTGCTTTCTTGTTCCACAGGTGCGCCACTGGCGCCACAGAGCGCCACCGGCGCCACGGCGCCACTGCGCCACGCGCCACGCGCCACATGCGCCACTAGCGCCACGCGCCACCCCGCGCCACTGCGCCACATGGCGCCACCAAAGGGTTGCGCCACTTGCGCCACTGCGCCACTCGCGCCACGTTTGCCGCGCCACAAGGCGCCACTTGCGCCACGCGCCACTGCGCCACGTTACCGCCGCGCCACTTAAACCCTGCTTGCGCCACCTGCGCCACGCGCCACGCGCCACAAAACTAGCGCCACTTCAGTACGCGCCACAGCGCCACCTCTCAGCGCGCCACCATCTGGCGCCACACGTGGCGCCACTGCGCCACAGCGCCACTGTGCGCCACCGCGCCACATTTAGCGCCACCAGCGCCACGGCTGCGCCACCCCCCCTTTGCGCCACAAAGCGCCACGCCAGCGCCACTTGGCGCCACAGCGCCACGGGCGGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAGAAAAGCGCCACAGAGCGCCACAAATCTGGCAGCAGCGCCACAAGTGCGCCACCGCGCCACGAAAGAGGACCCAGCGCCACAGCGCCACATGCGCCACGTGCGCCACGCGCCACTGTGATCGCGCCACGCGCCACGCGCCACCAGAATAGCGCCACAGCGCCACCGCGCCACAGCGCCACAAGGTTCACTGGCGCCACGCGCCACCGCGCCACTCGCGCCACCGGCGCCACCGACCGCGCCACTATGCGCCACCATGCGCCACGCGCCACCTGCGCCACGAGCGCCACAGCGCCACCTTTATGCGCCACGCACCGCGCCACGAGCTTGCGCCACTTTCGCGCCACGCGCCACCTTTATAAAGCGCCACTCGCTGCGCCACGCGCCACCTAATCGCGCCACTGGCGCCACGCGCCACCTGCGCCACGCAAGGCGCCACGGCGCCACAGCGCCACGCGCCACTCGGCGCCACGGAGGCGCCACAGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGTGATAAGCGCCACAGACTCGCGGCGCCACTTCTGCGCCACGCGCCACTCGGGCGCCACAATGCGCCACGGCGCCACACGCGCCACAGGCGCCACCTGTTGCGCCACCGCGCCACGGCTCAATGCGCCACTAGGCGCCACTGCGCCACGCGCCACTTGCGCCACGCGCCACCCATGTATGCGCCACGCGCCACGTGGTGGGCGCCACTTCTCGTGCGCCACAACTCCATAAAACGGCGCCACCGCGCCACACAGCGCCACAGCGCCACGTTCAGCGCCACTTGGCGCCACTGCGCCACCGCGCCACTGTGCGCCACCGCGCCACGCGCCACGTGGCGCCACGCGCCACTGCGCCACTTGCGCCACAGCGCCACGCTACTTAGTCGTGGCGCCACTTGTGTGGCACCATTAGCGAGGGCGCCACGGCGCCACCAAGCGCCACTAAATGGTGGGCGGCGCCACTGCGCCACTTGCGCCACTTAGCGCCACCTAGCGCCACCACCGCGAAAGCGCCACTTCAGCGCCACGAATGCGCCACGCGCCACTTCCTAACTTGCGCCACGGCACGGCGCCACGCTGCGCCACGGCACGAAAGCGCCACTGCGCCACGGCGCCACACCAAGCGCCACGCGCCACGGCGCCACAGCGCCACAGCGCCACACAGCCGGCGCCACAGCGCCACGGCGCCACTCCAGCGCCACCCTAGCGCCACGGCGCCACCTCGTGCGCCACTGCGCCACTAGCGCCACTGCGCCACGTTGCGCCACGCGCCACTGGGCGCCACATAAACAATAGCGCCACGGGCGCCACGCGCCACGGCGAGCGCCACTCGCGCCACGCGCCACGCGCCACTGCGCCACATGCCCCAGCGCCACGCGCCACTGCGCCACGCGCCACAGCGCCAC'\n , 'GCGCCACGC'):\n print(word)\n", "step-4": "def patternCount(dnaText, pattern):\n count = 0\n for i in range(0, len(dnaText) - len(pattern)):\n word = dnaText[i:i + len(pattern)]\n if word == pattern:\n count = count + 1\n return count\n\n\ndef freqWordProblem(text, k):\n countWords = []\n for i in range(0, len(text) - k):\n pattern = text[i:i + k]\n countWords.append(patternCount(text, pattern))\n maxCount = 0\n indexes = []\n for j in range(0, len(countWords)):\n count = countWords[j]\n if count == maxCount:\n indexes.append(j)\n elif count > maxCount:\n indexes = [j]\n maxCount = count\n result = set()\n for index in indexes:\n result.add(text[index:index + k])\n return list(result)\n\n\nmapDNA = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G'}\n\n\ndef complimentDNA(text):\n result = ''\n for letter in text:\n result = result + mapDNA[letter]\n return result[::-1]\n\n\ndef patternFind(text, pattern):\n index = []\n for i in range(0, len(text) - len(pattern)):\n word = text[i:i + len(pattern)]\n if word == pattern:\n index.append(i)\n return index\n\n\nfor word in patternFind(\n 'AATGGTCGAAGCGCCACTGCGCCACGACTAAACGCGCCACTAAATCTCCATCAGAGCGCCACTGCGCCACGTGGCGCCACTAGCGCCACCCGTTGGCGCCACTGGGCGCCACGCGCCACGCGCGCCACGCGCCACTGCGCCACCTGAGTAGCGCCACATAGACCGGCGCCACTAAGCGCCACTGCGCCACAGAGGGCGCCACTAACTGCGCCACGCGCCACGTGCGCGCCACGGGCGAACGTTGCGCCACCTTGCCCCGCGCCACGCGCCACGCGCCACGCGCCACCGGGGGCATTGCGCCACGCGCCACGCGCCACCCCGCGCCACGTGTGCCGCGCCACAGCGCCACTTGGCGCGCCACACGCGCCACGCGCCACCTAGCGCCACATAGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACATCTGCGCCACCAGCGCCACGAGCGCCACCATACTGGCGCCACAGCGCGCCACGGCGCCACGCGCCACCCATGCGCCACATAGCCTGTGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACCGCGCCACTGAGCGCCACCCCTAGGCGCCACGCGCCACGCGCCACTGCGCCACGCGCCACGAGGGGGTGCGCCACACGGCCCCAGCGCGAGAGTGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGGCGCCACGCGCCACATCGGGCGCCACTTAAGCCTTGGATTTGCGCCACGGCGGGCGCCACGCCAGCGCCACGTCACAGCGGGCGCCACACGCGCCACTGCGCCACGCGCCACAGCGCGCCACGGCGCCACGAAACGCGCCACCCGGCGCCACGCGCCACTACAAGCGCCACAAGGCGCGCCACTGGCGCCACTGCGCCACACAGGCGCCACGCGCCACGCGCCACATGCGCCACAGGCGCCACGGGGCGCGCCACTTCGCGCCACGCGCCACGGCGCCACGCGCCACAGCGCCACCCTCGCGCCACCGCGCCACGCGCCACGCGCCACGCGCCACGGGCGCCACTCGCGCCACTTCGCGCCACGCGCCACTTCGGGCGCCACGCGCCACGAAATGCGCCACCTGAGGCGCCACTGCGCCACGCGCCACCAGCGCCACAGCGCCACGGGCGCGCCACGCGCCACTCCGCGCCACCGCGCGCCACGCGCCACGAAGCGCCACGCGCCACTGCGCCACACCTGGATAAGCGCCACTGCGCCACACGCTGCGCCACGGCGCCACTGGGGCGCCACGCGCCACAGCGCCACGCGCCACGCTCGCGCCACGCGCCACGTGCGCCACTCCGCGCCACATTTGCGCCACGCGCCACAAGCGCCACGGTGCGCCACTACATGTGTGGTGCGCCACACGAGCGCCACGTCTGTTCACGCGCCACCCTATAGGCGCCACTCAGCGCCACAGCGCCACCTGTAGTGCGCCACGCGCCACCAGGCGCCACCCAGGCGTGCCGTGAGATAGCGCCACGGCGCCACAGCGCCACGCGCCACTCGCGCCACGTGCGCCACCGCGCCACTTTCGCGCCACCCAGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACATGCGCCACCCGCGCCACGTGCGCCACTAGATCGCGCCACTCCGGCGCCACGCGCCACTACGCGCCACACGCGCCACAGCGCCACTCTGAAATTCGCGCCACGCGCCACGCGCCACGAGCGCCACTCGCGCCACAAGCGCCACGCGCCACACCCGCGCCACGCGCCACGCGCCACGCTCTGTTGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACGGCTTGGTTGCGCCACAGCGCCACGGCGCCACGCGCCACTCACCGAAGTGATGGCGCCACACGAGGAGGCGCCACAGCGCCACTGCGCCACTGCGCCACTGCGCCACGCGCCACTTGGCGCGCCACTGGCGCCACGCGCCACAGCGCCACTACGCGCCACCGCGCCACGCGCCACTGCGCCACCGCGCCACGCGCCACGGCGCCACTGATGCGCCACCGCGCCACTGTCGTGCGCCACGTCTGGGCTACGGCGCCACCGCGCCACATGGGGCGCCACGACGCGCCACCGGCGCCACACGCGCCACGTGCGCCACCGGCGCCACCGCGCCACTGAGCGCCACACTGCTTGCGCCACATAGAGGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAATTCTGCCATCTGCGGTCGCGCCACGGTATTTTGCGCCACGCGCCACCCTAGGCGCCACTAGCGCCACAAGCGCCACTCGGCGCCACCCTCAGGCGCCACAGCGCCACTACAGCGCCACTGCGCGCCACGCGCCACGCGCCACATTGCCTCAGCGCCACGGCGCCACGCGCTGTAGCGCCACGCGCCACGTTGCGCCACATGCGCCACACATGCGCCACCCTTGCGCCACGCGCCACCAAAGCGCCACGGCGCCACGCGCCACCTGACGGTGGCGCCACCCCTGCTCTTTGCGCCACGGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACTGCGCCACAATGTGCGCCACCCCTCTAGGCGCCACTGCGCCACGGCGCCACGGCGCCACGTAGCGCCACGCGCCACGCGCCACCAGTCAATCGGCGCCACCATTCGGCCCGACGCGCCACGCGCCACTGCGCCACGGGCGCCACCATAACGGCGCCACGCGCCACGCGCCACTTGGTTTGATAGCCTGCGCCACAGCGCCACGGCGGCGCCACGTGTAGCGCCACGCGCCACCTTATGCGCCACGCGCCACGCGCCACGAGCGCCACGCGCCACCTTAATTAGCGCCACATAGCGCCACTGCGCCACGCGCCACCAAGATACAAGCGCCACTGGGCGCCACGCGCCACGCGCCACGCGCCACTGCTGCAGGCAGCGCCACCACAGGCGCCACTAGCGCCACTTGAGGCGCCACCGCGCCACTAGAGCGCCACGCGCCACTGGCGCCACTCCCGTCGGCGCCACCGCGCCACAGAGCGCCACCTGCGCCACGCGCCACTCGCGCCACCGTCACGCGCCACAGGCGCGCCACCCCGGCGCCACTGCGCCACTCTAGCGCCACTGCGCGCCACATATGGCGCCACCTACAACAGCGCCACACGCGCCACGCTATTCGCGCCACGCGCCACCGAGCGCCACGCGCCACATATACTCCTATTTGCGCCACGCGCCACCGTGGCGCCACGCGCCACCAGCGCCACTTGGTGCGCCACCCAAGCGCCACCTGCGCCACAGCGCCACTCGCGCCACGATAGATTTGCAAGCGCCACGTCCCGCGCCACAAGCGCCACTCTGCGCCACCGCGCCACGCGCCACTCTATGGCGCCACCGGCGCCACGCGCCACCCAGGCGCCACTCGCGCCACAGCGCCACGCGCGCCACTTCTTAGGCGCCACGCGCCACCAGGCGCCACTCTGCGCGCCACGTAGCGCCACTCGTGCGCCACTTTCGGCATGCGCGCCACATAGGCGCCACGGCGCCACCCCTGGCGCCACAGCGCCACGCGCCACTAGGGCGCCACTCGGCTCGAGCGCCACGCGCCACCGCGCCACTCGCGCCACGCGCCACGCGCCACATCTGCGCCACGCGCCACGCGCCACAGCGCCACCGCGCCACTTGGGCTATGGCGCCACGAAGCGCCACGGCGCCACGTGCGCCACCTAACGCGCCACAGCGCCACGCGCCACATGTCGCCCAGCGCCACCGCGCCACGGCGCCACCGCGCCACCCACGCCGCGCCACTGCGCCACAGCGCCACGGCGCCACGCGCCACATGCGCCACGCAGCGCCACCCCTCCGCCGCGCCACCGGTGCGCCACGCGCCACGCGCCACTTGGCGCCACGCTTCTGCGCCACGGCGCCACCAGCGCCACTAAGCGCCACAGCGCCACCAGCGCCACAAGGGCGCCACTAATGCGCCACAAGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACACCGTCCGCGCCACCTGCGCCACAGGCGCCACGTTTTCTTACTGTATAATAGCGCCACTTCGCGCCACGCGCCACGGGCGCCACGAATGTGTGCGCCACGTCGTGCGCCACGCGCCACGCGCCACTGGGCGCCACGCGCCACCCCCCCGCGCCACTAGCGCCACGCGCCACGCGCCACTGAATTGTAGCGCCACGCGCCACGCGCGCCACTCTATAGGCAAATTGGGCGCCACGCGCCACTATGGTAGTCGTGCGCCACTGCGCCACGCGCCACGCTGCGCCACGCGCCACTGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACCACGCGCCACCTCGCGCGCCACGCGCCACCTAGTAGCGCCACAGCGCCACGAGCGCCACCAGCGCCACGGAAGGCGCGCCACAGGCGCCACTTAGCGCCACCGGCGCCACGCGCCACAGCGCCACTGTGCGCCACGCGCCACGCGCCACTGCGCCACGCCAAATGCGCGCCACCCGCGCCACAAGCGCCACATTGGGGGTCTTAGCGCCACATCGCGCCACGCGCCACCAAATTGCGCGCCACACAGGTCGCGCCACTTCGTCCAGGCGCCACTTCAGCGCCACGGCGCCACGCGCCACGCGCCACTCCGAGCGCCACTGTGCGCCACAGACAGCTCCGCGCCACGGGAGGGGCGCCACATTTTAGCGCCACGCGCCACGCGTTTTGAGCGCCACTGCGCCACCGGAAAGCGCCACTGCTGAGGCGCCACCACAGCGCCACCGCGCCACGCACTGGCGCCACGATATGCGCCACCGCGCCACCGCGCCACAAGGGCGCCACGCGCCACCTAGACGCGCCACGAAAGCGCCACCACTATGCGCCACTCGGCGCCACGCGCCACCACAGGAAAGTATGCGCCACAAAACTGCGCCACGCGCCACGCGCCACGCGCCACGGATTGCGCCACGCGCCACGCGCCACCACGCGCCACGTCCCTGGCGCGCCACGCGCCACGCGCCACGGCGCCACAATGCGCCACATGCGCCACAGGCGCCACCGCGCCACTGCGCCACGCGCCACTGCGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACTAGGCGCCACACTGCGCGCCACGCTACGGAGGACCGATAGCGCCACAGCGCCACGATAGCGCCACGCGCCACGAGTTCTGCGCCACAATGGCCTAGTCGCGCCACCGCGCCACTTACGCGCCACTGCGCCACGCGCCACTTGCGCCACGGCGCCACGCGCCACTGGCGCCACGCGCCACCTCCCGCTGCGCCACGCGCCACTATCCGTGCGCCACGCGCCACTGGCGCCACGCGGCGCCACTGCGAAGTAATCGCGCCACAGCGCCACGCTAAGCGCCACATGCGCGGTGTGCGCCACGCGCCACGCGCCACTCAAGCGCCACAAGCGCCACGAGCGCCACGCGCCACGCGCCACTAAGCGCCACGGCGCCACTTTAATATGCGCCACAAAGCGCCACTGCGCCACCTGCTGCGCCACATCACGCGCCACGAGCGCCACAGTTGCGCGCCACATAATGGGCGCCACGGCCCGTTGAGCGCCACGCGCCACGCGCCACAGTGCCACCCGCGCCACACGCGCCACAGTCGCGCCACGGCGCCACCGGGTTATTATGCGCCACGTGCGGTCGCGCCACTGCGCCACGCGCGCCACAAGCGCCACTCGGATGGCGCCACTTCTGTGGCGCCACGCGCCACACTGCGCCACAGCGCCACGCGCCACACGCGCCACCGCGCCACCGAGTATTAGCGCCACTGCGCCACAAGATGGCGCCACGCGCCACCAGGCGCCACTCGCGCCACGGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACCTGCGCCACAAGGGGCGCGCCACGCGCCACGCGCCACAGGACCATCGGCGCCACGTCATGGCGCCACCTGGGTCCATGCGCGCCACCTCCTGCGCCACAGCGCCACCGTCTAAAGCGCCACGAGCGCCACGGCGCCACGCGCCACAGCGCCACGCGCCACCTTCACCGAGTCCGCAGGCGCCACCGTCATAATTGAGTGCGCCACCCCATGCCGCGCCACATAGCGCCACGCTGGCGCCACGGCGCCACGCGCCACGCGCGCCACAGCGCCACGCGCCACACGCGCGCCACAGCGCCACGCGCCACTGTATCGCGCCACGAGCGCCACGTGTGCCACCTACGCTATGCGCCACAAGCGCCACGAGCGCCACAAAGCGCCACTGCGCCACTGGCGCCACTGTGGCAGCGCCACTCGCGCCACTGAAGCGCCACAGCGCCACGGCGCCACGCGCCACCCGGATGCGCCACAGCGCCACATGCGCCACTGCGCCACCATGCTGCGCCACAGCGCCACAGTCGCGCCACTGCGCCACGCGCCACCTCGCGCCACTTGCGCCACGCGCCACGCTGGCAGCGCCACTGCGCCACACAAGATTTCTGAGCGCCACCGACCCCTATAAGCGCCACTGAAGGTGCGCCACGCGCCACTCGCGCCACCTAAGGCGCCACTCTGCGCCACGTGCGCCACCCCTGCGCCACTGCGCCACCTGCGCCACTGAGCGCGCCACGGGCGCCACGCGCCACCCTGCGCCACTGCGCCACTGCGCCACGCTGCGCCACCTGAACTTGGCGCCACGTAGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACTCGCGCCACTCGTGCGCCACAAGAGCGCCACTGGTTAAGCGATTTGCGCCACGCGCCACACTTGCGCCACGCGCCACAAGTCGGGGCGCCACCTCGCCCCTCGCGCCACTAGCGCCACGCGCCACCACCTGAAAGCGCCACACTACTAGGCGCCACAGGCGCCACGCGCCACGCGCCACCGTCTGCGCCACGCGCGCCACCAGCGCCACAGGGTGCGCCACCGCGCCACGCGCCACCGCGCCACGCGCCACTCCCGGTCAGGGGCGCCACAAAAGGCGCCACAGGGAGGTTCTATCCGCGCCACACACGCGCCACGACGAACATGCGCGCCACTGCGCCACCGCGCCACTAAGCGCCACGGGCGCCACGCGCCACCTAGTCGCGCCACAAAGTGATCGCGGCGCCACCTGCGCCACCGGCGCCACCCAAGCGCCACTTAGGCGCCACGTGCGCCACCCTTGCGCCACCTTGCGCCACCGCGCCACGGCGCCACTCGTGGTGCGCCACTGCCCGCGCCACATAGCGCCACCCGAGCGCCACAGCGTGTTATTTGCGCCACAGCGCCACGCGCCACTGCGCCACGCATGCCGCGCCACAGTGCGCCACGACGCGCCACTGCGCCACGTCGTCTTCGCGCCACTTGACCGACATTTGCCTAGCGCCACCGCGCCACGGCGCCACGGCGCCACCAGCCGATTGCTTTCTTGTTCCACAGGTGCGCCACTGGCGCCACAGAGCGCCACCGGCGCCACGGCGCCACTGCGCCACGCGCCACGCGCCACATGCGCCACTAGCGCCACGCGCCACCCCGCGCCACTGCGCCACATGGCGCCACCAAAGGGTTGCGCCACTTGCGCCACTGCGCCACTCGCGCCACGTTTGCCGCGCCACAAGGCGCCACTTGCGCCACGCGCCACTGCGCCACGTTACCGCCGCGCCACTTAAACCCTGCTTGCGCCACCTGCGCCACGCGCCACGCGCCACAAAACTAGCGCCACTTCAGTACGCGCCACAGCGCCACCTCTCAGCGCGCCACCATCTGGCGCCACACGTGGCGCCACTGCGCCACAGCGCCACTGTGCGCCACCGCGCCACATTTAGCGCCACCAGCGCCACGGCTGCGCCACCCCCCCTTTGCGCCACAAAGCGCCACGCCAGCGCCACTTGGCGCCACAGCGCCACGGGCGGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAGAAAAGCGCCACAGAGCGCCACAAATCTGGCAGCAGCGCCACAAGTGCGCCACCGCGCCACGAAAGAGGACCCAGCGCCACAGCGCCACATGCGCCACGTGCGCCACGCGCCACTGTGATCGCGCCACGCGCCACGCGCCACCAGAATAGCGCCACAGCGCCACCGCGCCACAGCGCCACAAGGTTCACTGGCGCCACGCGCCACCGCGCCACTCGCGCCACCGGCGCCACCGACCGCGCCACTATGCGCCACCATGCGCCACGCGCCACCTGCGCCACGAGCGCCACAGCGCCACCTTTATGCGCCACGCACCGCGCCACGAGCTTGCGCCACTTTCGCGCCACGCGCCACCTTTATAAAGCGCCACTCGCTGCGCCACGCGCCACCTAATCGCGCCACTGGCGCCACGCGCCACCTGCGCCACGCAAGGCGCCACGGCGCCACAGCGCCACGCGCCACTCGGCGCCACGGAGGCGCCACAGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGTGATAAGCGCCACAGACTCGCGGCGCCACTTCTGCGCCACGCGCCACTCGGGCGCCACAATGCGCCACGGCGCCACACGCGCCACAGGCGCCACCTGTTGCGCCACCGCGCCACGGCTCAATGCGCCACTAGGCGCCACTGCGCCACGCGCCACTTGCGCCACGCGCCACCCATGTATGCGCCACGCGCCACGTGGTGGGCGCCACTTCTCGTGCGCCACAACTCCATAAAACGGCGCCACCGCGCCACACAGCGCCACAGCGCCACGTTCAGCGCCACTTGGCGCCACTGCGCCACCGCGCCACTGTGCGCCACCGCGCCACGCGCCACGTGGCGCCACGCGCCACTGCGCCACTTGCGCCACAGCGCCACGCTACTTAGTCGTGGCGCCACTTGTGTGGCACCATTAGCGAGGGCGCCACGGCGCCACCAAGCGCCACTAAATGGTGGGCGGCGCCACTGCGCCACTTGCGCCACTTAGCGCCACCTAGCGCCACCACCGCGAAAGCGCCACTTCAGCGCCACGAATGCGCCACGCGCCACTTCCTAACTTGCGCCACGGCACGGCGCCACGCTGCGCCACGGCACGAAAGCGCCACTGCGCCACGGCGCCACACCAAGCGCCACGCGCCACGGCGCCACAGCGCCACAGCGCCACACAGCCGGCGCCACAGCGCCACGGCGCCACTCCAGCGCCACCCTAGCGCCACGGCGCCACCTCGTGCGCCACTGCGCCACTAGCGCCACTGCGCCACGTTGCGCCACGCGCCACTGGGCGCCACATAAACAATAGCGCCACGGGCGCCACGCGCCACGGCGAGCGCCACTCGCGCCACGCGCCACGCGCCACTGCGCCACATGCCCCAGCGCCACGCGCCACTGCGCCACGCGCCACAGCGCCAC'\n , 'GCGCCACGC'):\n print(word)\n", "step-5": "\ndef patternCount(dnaText, pattern):\n count = 0\n for i in range(0, len(dnaText) - len(pattern)):\n word = dnaText[i:i+len(pattern)]\n if (word == pattern):\n count = count + 1\n return count\n\ndef freqWordProblem(text, k):\n countWords = []\n for i in range(0, len(text) - k):\n pattern = text[i:i+k]\n countWords.append(patternCount(text, pattern))\n\n maxCount = 0\n indexes = []\n for j in range(0, len(countWords)):\n count = countWords[j]\n if (count == maxCount):\n indexes.append(j)\n elif (count > maxCount):\n indexes = [j]\n maxCount = count\n\n result = set()\n for index in indexes:\n result.add(text[index:index+k])\n\n return list(result)\n\nmapDNA = {\n \"A\": \"T\",\n \"G\": \"C\",\n \"T\": \"A\",\n \"C\": \"G\"\n}\ndef complimentDNA(text):\n result = \"\"\n for letter in text:\n result = result + mapDNA[letter]\n return result[::-1]\n\ndef patternFind(text, pattern):\n index = []\n for i in range(0,len(text)-len(pattern)):\n word = text[i:i+len(pattern)]\n if word == pattern:\n index.append(i)\n return index\n\nfor word in patternFind(\"AATGGTCGAAGCGCCACTGCGCCACGACTAAACGCGCCACTAAATCTCCATCAGAGCGCCACTGCGCCACGTGGCGCCACTAGCGCCACCCGTTGGCGCCACTGGGCGCCACGCGCCACGCGCGCCACGCGCCACTGCGCCACCTGAGTAGCGCCACATAGACCGGCGCCACTAAGCGCCACTGCGCCACAGAGGGCGCCACTAACTGCGCCACGCGCCACGTGCGCGCCACGGGCGAACGTTGCGCCACCTTGCCCCGCGCCACGCGCCACGCGCCACGCGCCACCGGGGGCATTGCGCCACGCGCCACGCGCCACCCCGCGCCACGTGTGCCGCGCCACAGCGCCACTTGGCGCGCCACACGCGCCACGCGCCACCTAGCGCCACATAGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACATCTGCGCCACCAGCGCCACGAGCGCCACCATACTGGCGCCACAGCGCGCCACGGCGCCACGCGCCACCCATGCGCCACATAGCCTGTGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACCGCGCCACTGAGCGCCACCCCTAGGCGCCACGCGCCACGCGCCACTGCGCCACGCGCCACGAGGGGGTGCGCCACACGGCCCCAGCGCGAGAGTGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGGCGCCACGCGCCACATCGGGCGCCACTTAAGCCTTGGATTTGCGCCACGGCGGGCGCCACGCCAGCGCCACGTCACAGCGGGCGCCACACGCGCCACTGCGCCACGCGCCACAGCGCGCCACGGCGCCACGAAACGCGCCACCCGGCGCCACGCGCCACTACAAGCGCCACAAGGCGCGCCACTGGCGCCACTGCGCCACACAGGCGCCACGCGCCACGCGCCACATGCGCCACAGGCGCCACGGGGCGCGCCACTTCGCGCCACGCGCCACGGCGCCACGCGCCACAGCGCCACCCTCGCGCCACCGCGCCACGCGCCACGCGCCACGCGCCACGGGCGCCACTCGCGCCACTTCGCGCCACGCGCCACTTCGGGCGCCACGCGCCACGAAATGCGCCACCTGAGGCGCCACTGCGCCACGCGCCACCAGCGCCACAGCGCCACGGGCGCGCCACGCGCCACTCCGCGCCACCGCGCGCCACGCGCCACGAAGCGCCACGCGCCACTGCGCCACACCTGGATAAGCGCCACTGCGCCACACGCTGCGCCACGGCGCCACTGGGGCGCCACGCGCCACAGCGCCACGCGCCACGCTCGCGCCACGCGCCACGTGCGCCACTCCGCGCCACATTTGCGCCACGCGCCACAAGCGCCACGGTGCGCCACTACATGTGTGGTGCGCCACACGAGCGCCACGTCTGTTCACGCGCCACCCTATAGGCGCCACTCAGCGCCACAGCGCCACCTGTAGTGCGCCACGCGCCACCAGGCGCCACCCAGGCGTGCCGTGAGATAGCGCCACGGCGCCACAGCGCCACGCGCCACTCGCGCCACGTGCGCCACCGCGCCACTTTCGCGCCACCCAGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACATGCGCCACCCGCGCCACGTGCGCCACTAGATCGCGCCACTCCGGCGCCACGCGCCACTACGCGCCACACGCGCCACAGCGCCACTCTGAAATTCGCGCCACGCGCCACGCGCCACGAGCGCCACTCGCGCCACAAGCGCCACGCGCCACACCCGCGCCACGCGCCACGCGCCACGCTCTGTTGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACGGCTTGGTTGCGCCACAGCGCCACGGCGCCACGCGCCACTCACCGAAGTGATGGCGCCACACGAGGAGGCGCCACAGCGCCACTGCGCCACTGCGCCACTGCGCCACGCGCCACTTGGCGCGCCACTGGCGCCACGCGCCACAGCGCCACTACGCGCCACCGCGCCACGCGCCACTGCGCCACCGCGCCACGCGCCACGGCGCCACTGATGCGCCACCGCGCCACTGTCGTGCGCCACGTCTGGGCTACGGCGCCACCGCGCCACATGGGGCGCCACGACGCGCCACCGGCGCCACACGCGCCACGTGCGCCACCGGCGCCACCGCGCCACTGAGCGCCACACTGCTTGCGCCACATAGAGGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAATTCTGCCATCTGCGGTCGCGCCACGGTATTTTGCGCCACGCGCCACCCTAGGCGCCACTAGCGCCACAAGCGCCACTCGGCGCCACCCTCAGGCGCCACAGCGCCACTACAGCGCCACTGCGCGCCACGCGCCACGCGCCACATTGCCTCAGCGCCACGGCGCCACGCGCTGTAGCGCCACGCGCCACGTTGCGCCACATGCGCCACACATGCGCCACCCTTGCGCCACGCGCCACCAAAGCGCCACGGCGCCACGCGCCACCTGACGGTGGCGCCACCCCTGCTCTTTGCGCCACGGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACTGCGCCACAATGTGCGCCACCCCTCTAGGCGCCACTGCGCCACGGCGCCACGGCGCCACGTAGCGCCACGCGCCACGCGCCACCAGTCAATCGGCGCCACCATTCGGCCCGACGCGCCACGCGCCACTGCGCCACGGGCGCCACCATAACGGCGCCACGCGCCACGCGCCACTTGGTTTGATAGCCTGCGCCACAGCGCCACGGCGGCGCCACGTGTAGCGCCACGCGCCACCTTATGCGCCACGCGCCACGCGCCACGAGCGCCACGCGCCACCTTAATTAGCGCCACATAGCGCCACTGCGCCACGCGCCACCAAGATACAAGCGCCACTGGGCGCCACGCGCCACGCGCCACGCGCCACTGCTGCAGGCAGCGCCACCACAGGCGCCACTAGCGCCACTTGAGGCGCCACCGCGCCACTAGAGCGCCACGCGCCACTGGCGCCACTCCCGTCGGCGCCACCGCGCCACAGAGCGCCACCTGCGCCACGCGCCACTCGCGCCACCGTCACGCGCCACAGGCGCGCCACCCCGGCGCCACTGCGCCACTCTAGCGCCACTGCGCGCCACATATGGCGCCACCTACAACAGCGCCACACGCGCCACGCTATTCGCGCCACGCGCCACCGAGCGCCACGCGCCACATATACTCCTATTTGCGCCACGCGCCACCGTGGCGCCACGCGCCACCAGCGCCACTTGGTGCGCCACCCAAGCGCCACCTGCGCCACAGCGCCACTCGCGCCACGATAGATTTGCAAGCGCCACGTCCCGCGCCACAAGCGCCACTCTGCGCCACCGCGCCACGCGCCACTCTATGGCGCCACCGGCGCCACGCGCCACCCAGGCGCCACTCGCGCCACAGCGCCACGCGCGCCACTTCTTAGGCGCCACGCGCCACCAGGCGCCACTCTGCGCGCCACGTAGCGCCACTCGTGCGCCACTTTCGGCATGCGCGCCACATAGGCGCCACGGCGCCACCCCTGGCGCCACAGCGCCACGCGCCACTAGGGCGCCACTCGGCTCGAGCGCCACGCGCCACCGCGCCACTCGCGCCACGCGCCACGCGCCACATCTGCGCCACGCGCCACGCGCCACAGCGCCACCGCGCCACTTGGGCTATGGCGCCACGAAGCGCCACGGCGCCACGTGCGCCACCTAACGCGCCACAGCGCCACGCGCCACATGTCGCCCAGCGCCACCGCGCCACGGCGCCACCGCGCCACCCACGCCGCGCCACTGCGCCACAGCGCCACGGCGCCACGCGCCACATGCGCCACGCAGCGCCACCCCTCCGCCGCGCCACCGGTGCGCCACGCGCCACGCGCCACTTGGCGCCACGCTTCTGCGCCACGGCGCCACCAGCGCCACTAAGCGCCACAGCGCCACCAGCGCCACAAGGGCGCCACTAATGCGCCACAAGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACACCGTCCGCGCCACCTGCGCCACAGGCGCCACGTTTTCTTACTGTATAATAGCGCCACTTCGCGCCACGCGCCACGGGCGCCACGAATGTGTGCGCCACGTCGTGCGCCACGCGCCACGCGCCACTGGGCGCCACGCGCCACCCCCCCGCGCCACTAGCGCCACGCGCCACGCGCCACTGAATTGTAGCGCCACGCGCCACGCGCGCCACTCTATAGGCAAATTGGGCGCCACGCGCCACTATGGTAGTCGTGCGCCACTGCGCCACGCGCCACGCTGCGCCACGCGCCACTGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACCACGCGCCACCTCGCGCGCCACGCGCCACCTAGTAGCGCCACAGCGCCACGAGCGCCACCAGCGCCACGGAAGGCGCGCCACAGGCGCCACTTAGCGCCACCGGCGCCACGCGCCACAGCGCCACTGTGCGCCACGCGCCACGCGCCACTGCGCCACGCCAAATGCGCGCCACCCGCGCCACAAGCGCCACATTGGGGGTCTTAGCGCCACATCGCGCCACGCGCCACCAAATTGCGCGCCACACAGGTCGCGCCACTTCGTCCAGGCGCCACTTCAGCGCCACGGCGCCACGCGCCACGCGCCACTCCGAGCGCCACTGTGCGCCACAGACAGCTCCGCGCCACGGGAGGGGCGCCACATTTTAGCGCCACGCGCCACGCGTTTTGAGCGCCACTGCGCCACCGGAAAGCGCCACTGCTGAGGCGCCACCACAGCGCCACCGCGCCACGCACTGGCGCCACGATATGCGCCACCGCGCCACCGCGCCACAAGGGCGCCACGCGCCACCTAGACGCGCCACGAAAGCGCCACCACTATGCGCCACTCGGCGCCACGCGCCACCACAGGAAAGTATGCGCCACAAAACTGCGCCACGCGCCACGCGCCACGCGCCACGGATTGCGCCACGCGCCACGCGCCACCACGCGCCACGTCCCTGGCGCGCCACGCGCCACGCGCCACGGCGCCACAATGCGCCACATGCGCCACAGGCGCCACCGCGCCACTGCGCCACGCGCCACTGCGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACTAGGCGCCACACTGCGCGCCACGCTACGGAGGACCGATAGCGCCACAGCGCCACGATAGCGCCACGCGCCACGAGTTCTGCGCCACAATGGCCTAGTCGCGCCACCGCGCCACTTACGCGCCACTGCGCCACGCGCCACTTGCGCCACGGCGCCACGCGCCACTGGCGCCACGCGCCACCTCCCGCTGCGCCACGCGCCACTATCCGTGCGCCACGCGCCACTGGCGCCACGCGGCGCCACTGCGAAGTAATCGCGCCACAGCGCCACGCTAAGCGCCACATGCGCGGTGTGCGCCACGCGCCACGCGCCACTCAAGCGCCACAAGCGCCACGAGCGCCACGCGCCACGCGCCACTAAGCGCCACGGCGCCACTTTAATATGCGCCACAAAGCGCCACTGCGCCACCTGCTGCGCCACATCACGCGCCACGAGCGCCACAGTTGCGCGCCACATAATGGGCGCCACGGCCCGTTGAGCGCCACGCGCCACGCGCCACAGTGCCACCCGCGCCACACGCGCCACAGTCGCGCCACGGCGCCACCGGGTTATTATGCGCCACGTGCGGTCGCGCCACTGCGCCACGCGCGCCACAAGCGCCACTCGGATGGCGCCACTTCTGTGGCGCCACGCGCCACACTGCGCCACAGCGCCACGCGCCACACGCGCCACCGCGCCACCGAGTATTAGCGCCACTGCGCCACAAGATGGCGCCACGCGCCACCAGGCGCCACTCGCGCCACGGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACCTGCGCCACAAGGGGCGCGCCACGCGCCACGCGCCACAGGACCATCGGCGCCACGTCATGGCGCCACCTGGGTCCATGCGCGCCACCTCCTGCGCCACAGCGCCACCGTCTAAAGCGCCACGAGCGCCACGGCGCCACGCGCCACAGCGCCACGCGCCACCTTCACCGAGTCCGCAGGCGCCACCGTCATAATTGAGTGCGCCACCCCATGCCGCGCCACATAGCGCCACGCTGGCGCCACGGCGCCACGCGCCACGCGCGCCACAGCGCCACGCGCCACACGCGCGCCACAGCGCCACGCGCCACTGTATCGCGCCACGAGCGCCACGTGTGCCACCTACGCTATGCGCCACAAGCGCCACGAGCGCCACAAAGCGCCACTGCGCCACTGGCGCCACTGTGGCAGCGCCACTCGCGCCACTGAAGCGCCACAGCGCCACGGCGCCACGCGCCACCCGGATGCGCCACAGCGCCACATGCGCCACTGCGCCACCATGCTGCGCCACAGCGCCACAGTCGCGCCACTGCGCCACGCGCCACCTCGCGCCACTTGCGCCACGCGCCACGCTGGCAGCGCCACTGCGCCACACAAGATTTCTGAGCGCCACCGACCCCTATAAGCGCCACTGAAGGTGCGCCACGCGCCACTCGCGCCACCTAAGGCGCCACTCTGCGCCACGTGCGCCACCCCTGCGCCACTGCGCCACCTGCGCCACTGAGCGCGCCACGGGCGCCACGCGCCACCCTGCGCCACTGCGCCACTGCGCCACGCTGCGCCACCTGAACTTGGCGCCACGTAGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACTCGCGCCACTCGTGCGCCACAAGAGCGCCACTGGTTAAGCGATTTGCGCCACGCGCCACACTTGCGCCACGCGCCACAAGTCGGGGCGCCACCTCGCCCCTCGCGCCACTAGCGCCACGCGCCACCACCTGAAAGCGCCACACTACTAGGCGCCACAGGCGCCACGCGCCACGCGCCACCGTCTGCGCCACGCGCGCCACCAGCGCCACAGGGTGCGCCACCGCGCCACGCGCCACCGCGCCACGCGCCACTCCCGGTCAGGGGCGCCACAAAAGGCGCCACAGGGAGGTTCTATCCGCGCCACACACGCGCCACGACGAACATGCGCGCCACTGCGCCACCGCGCCACTAAGCGCCACGGGCGCCACGCGCCACCTAGTCGCGCCACAAAGTGATCGCGGCGCCACCTGCGCCACCGGCGCCACCCAAGCGCCACTTAGGCGCCACGTGCGCCACCCTTGCGCCACCTTGCGCCACCGCGCCACGGCGCCACTCGTGGTGCGCCACTGCCCGCGCCACATAGCGCCACCCGAGCGCCACAGCGTGTTATTTGCGCCACAGCGCCACGCGCCACTGCGCCACGCATGCCGCGCCACAGTGCGCCACGACGCGCCACTGCGCCACGTCGTCTTCGCGCCACTTGACCGACATTTGCCTAGCGCCACCGCGCCACGGCGCCACGGCGCCACCAGCCGATTGCTTTCTTGTTCCACAGGTGCGCCACTGGCGCCACAGAGCGCCACCGGCGCCACGGCGCCACTGCGCCACGCGCCACGCGCCACATGCGCCACTAGCGCCACGCGCCACCCCGCGCCACTGCGCCACATGGCGCCACCAAAGGGTTGCGCCACTTGCGCCACTGCGCCACTCGCGCCACGTTTGCCGCGCCACAAGGCGCCACTTGCGCCACGCGCCACTGCGCCACGTTACCGCCGCGCCACTTAAACCCTGCTTGCGCCACCTGCGCCACGCGCCACGCGCCACAAAACTAGCGCCACTTCAGTACGCGCCACAGCGCCACCTCTCAGCGCGCCACCATCTGGCGCCACACGTGGCGCCACTGCGCCACAGCGCCACTGTGCGCCACCGCGCCACATTTAGCGCCACCAGCGCCACGGCTGCGCCACCCCCCCTTTGCGCCACAAAGCGCCACGCCAGCGCCACTTGGCGCCACAGCGCCACGGGCGGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAGAAAAGCGCCACAGAGCGCCACAAATCTGGCAGCAGCGCCACAAGTGCGCCACCGCGCCACGAAAGAGGACCCAGCGCCACAGCGCCACATGCGCCACGTGCGCCACGCGCCACTGTGATCGCGCCACGCGCCACGCGCCACCAGAATAGCGCCACAGCGCCACCGCGCCACAGCGCCACAAGGTTCACTGGCGCCACGCGCCACCGCGCCACTCGCGCCACCGGCGCCACCGACCGCGCCACTATGCGCCACCATGCGCCACGCGCCACCTGCGCCACGAGCGCCACAGCGCCACCTTTATGCGCCACGCACCGCGCCACGAGCTTGCGCCACTTTCGCGCCACGCGCCACCTTTATAAAGCGCCACTCGCTGCGCCACGCGCCACCTAATCGCGCCACTGGCGCCACGCGCCACCTGCGCCACGCAAGGCGCCACGGCGCCACAGCGCCACGCGCCACTCGGCGCCACGGAGGCGCCACAGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGTGATAAGCGCCACAGACTCGCGGCGCCACTTCTGCGCCACGCGCCACTCGGGCGCCACAATGCGCCACGGCGCCACACGCGCCACAGGCGCCACCTGTTGCGCCACCGCGCCACGGCTCAATGCGCCACTAGGCGCCACTGCGCCACGCGCCACTTGCGCCACGCGCCACCCATGTATGCGCCACGCGCCACGTGGTGGGCGCCACTTCTCGTGCGCCACAACTCCATAAAACGGCGCCACCGCGCCACACAGCGCCACAGCGCCACGTTCAGCGCCACTTGGCGCCACTGCGCCACCGCGCCACTGTGCGCCACCGCGCCACGCGCCACGTGGCGCCACGCGCCACTGCGCCACTTGCGCCACAGCGCCACGCTACTTAGTCGTGGCGCCACTTGTGTGGCACCATTAGCGAGGGCGCCACGGCGCCACCAAGCGCCACTAAATGGTGGGCGGCGCCACTGCGCCACTTGCGCCACTTAGCGCCACCTAGCGCCACCACCGCGAAAGCGCCACTTCAGCGCCACGAATGCGCCACGCGCCACTTCCTAACTTGCGCCACGGCACGGCGCCACGCTGCGCCACGGCACGAAAGCGCCACTGCGCCACGGCGCCACACCAAGCGCCACGCGCCACGGCGCCACAGCGCCACAGCGCCACACAGCCGGCGCCACAGCGCCACGGCGCCACTCCAGCGCCACCCTAGCGCCACGGCGCCACCTCGTGCGCCACTGCGCCACTAGCGCCACTGCGCCACGTTGCGCCACGCGCCACTGGGCGCCACATAAACAATAGCGCCACGGGCGCCACGCGCCACGGCGAGCGCCACTCGCGCCACGCGCCACGCGCCACTGCGCCACATGCCCCAGCGCCACGCGCCACTGCGCCACGCGCCACAGCGCCAC\",\"GCGCCACGC\"):\n print(word)\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
#encoding:utf-8 x="There are %d types of peopel."%10 #定义字符串变量x,将10以%d方式输出 binary="binary" do_not="don't" #定义字符串变量binary和do_not y="Those who know %s and those who %s."%(binary,do_not) #使用binary和do_not定义字符串变量y print x print y #打印以上两个变量 print "I said:%r"%x print "I also said:%r."%y #用%r的格式输出以上两个变量 hilarious=False joke_evaluation="Isn't that joke funny?!%r" #定义两个变量hilarious和joke_evaluation print joke_evaluation%hilarious #把变量joke_evaluation中的格式化字符用hilarious打印出 w="This is the left side of ..." a="a string with the right side." #定义字符串变量w和a print w+a #使用加号连接w和a联合输出 #因为+作为操作符,可以将两个字符串变量连接后输出
normal
{ "blob_id": "c2ba60a321eff63f6321831093d7254f6939549b", "index": 9040, "step-1": "#encoding:utf-8\nx=\"There are %d types of peopel.\"%10\n#定义字符串变量x,将10以%d方式输出\nbinary=\"binary\"\ndo_not=\"don't\"\n#定义字符串变量binary和do_not\ny=\"Those who know %s and those who %s.\"%(binary,do_not)\n#使用binary和do_not定义字符串变量y\n\nprint x\nprint y\n#打印以上两个变量\n\nprint \"I said:%r\"%x\nprint \"I also said:%r.\"%y\n#用%r的格式输出以上两个变量\n\nhilarious=False\njoke_evaluation=\"Isn't that joke funny?!%r\"\n#定义两个变量hilarious和joke_evaluation\n\nprint joke_evaluation%hilarious\n#把变量joke_evaluation中的格式化字符用hilarious打印出\n\nw=\"This is the left side of ...\"\na=\"a string with the right side.\"\n#定义字符串变量w和a\n\nprint w+a\n#使用加号连接w和a联合输出\n#因为+作为操作符,可以将两个字符串变量连接后输出", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
def interseccao_chaves(lis_dic): lista = [] for dic1 in lis_dic[0]: for cahves in dic1: lista.append(dic1) for dic2 in lis_dic[1]: for cahves in dic2: lista.append(dic2) return lista
normal
{ "blob_id": "f3ff453655d7938cb417ce212f3836fabafaea43", "index": 1696, "step-1": "<mask token>\n", "step-2": "def interseccao_chaves(lis_dic):\n lista = []\n for dic1 in lis_dic[0]:\n for cahves in dic1:\n lista.append(dic1)\n for dic2 in lis_dic[1]:\n for cahves in dic2:\n lista.append(dic2)\n return lista\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include from home import views from order import views as OV urlpatterns = [ path('user', include('user.urls')), path('order', include('order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path('product',include('product.urls')), path('',include('home.urls')),# '' - bu home path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls), path('ckeditor', include('ckeditor_uploader.urls')), path('about/', views.about, name='about'), path('contact/', views.contact, name='about'), path('search/', views.search,name='search'), path('search_auto', views.search_auto, name='search_auto'), path('category/<int:id>/<slug:slug>/', views.category_products, name='category_products'), path('product/<int:id>/<slug:slug>/',views.product_detail, name='product_detail'), path('lic/',views.lic,name='lic'), path('post/',views.post,name='post'), path('post/<int:id>/',views.post_detail, name='post_detail'), path('lic/<int:id>/',views.lic_detail, name='lic_detail'), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
normal
{ "blob_id": "97cc29e0d54e5d5e05dff16c92ecc4046363185f", "index": 344, "step-1": "<mask token>\n", "step-2": "<mask token>\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n", "step-3": "<mask token>\nurlpatterns = [path('user', include('user.urls')), path('order', include(\n 'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path(\n 'product', include('product.urls')), path('', include('home.urls')),\n path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')), path('about/',\n views.about, name='about'), path('contact/', views.contact, name=\n 'about'), path('search/', views.search, name='search'), path(\n 'search_auto', views.search_auto, name='search_auto'), path(\n 'category/<int:id>/<slug:slug>/', views.category_products, name=\n 'category_products'), path('product/<int:id>/<slug:slug>/', views.\n product_detail, name='product_detail'), path('lic/', views.lic, name=\n 'lic'), path('post/', views.post, name='post'), path('post/<int:id>/',\n views.post_detail, name='post_detail'), path('lic/<int:id>/', views.\n lic_detail, name='lic_detail')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n", "step-4": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom home import views\nfrom order import views as OV\nurlpatterns = [path('user', include('user.urls')), path('order', include(\n 'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path(\n 'product', include('product.urls')), path('', include('home.urls')),\n path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')), path('about/',\n views.about, name='about'), path('contact/', views.contact, name=\n 'about'), path('search/', views.search, name='search'), path(\n 'search_auto', views.search_auto, name='search_auto'), path(\n 'category/<int:id>/<slug:slug>/', views.category_products, name=\n 'category_products'), path('product/<int:id>/<slug:slug>/', views.\n product_detail, name='product_detail'), path('lic/', views.lic, name=\n 'lic'), path('post/', views.post, name='post'), path('post/<int:id>/',\n views.post_detail, name='post_detail'), path('lic/<int:id>/', views.\n lic_detail, name='lic_detail')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n", "step-5": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom home import views\nfrom order import views as OV\n\nurlpatterns = [\n path('user', include('user.urls')),\n path('order', include('order.urls')),\n path('shopcart/', OV.shopcart, name='shopcart'),\n path('product',include('product.urls')),\n path('',include('home.urls')),# '' - bu home\n path('faq/', views.faq, name='faq'),\n path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')),\n path('about/', views.about, name='about'),\n path('contact/', views.contact, name='about'),\n path('search/', views.search,name='search'),\n path('search_auto', views.search_auto, name='search_auto'),\n path('category/<int:id>/<slug:slug>/', views.category_products, name='category_products'),\n path('product/<int:id>/<slug:slug>/',views.product_detail, name='product_detail'),\n path('lic/',views.lic,name='lic'),\n path('post/',views.post,name='post'),\n path('post/<int:id>/',views.post_detail, name='post_detail'),\n path('lic/<int:id>/',views.lic_detail, name='lic_detail'),\n\n\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/python3 import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2' os.environ['KERAS_BACKEND'] = 'tensorflow' import numpy as np import sys from util import load_model from keras.preprocessing.text import hashing_trick from keras.preprocessing.sequence import pad_sequences from southpark.southpark_generative import string_one_hot, char_one_hot MODEL_NAME = "script_gen_demo_model" def main(): print("Loading model...") model, charset = load_model(MODEL_NAME) print(charset) seed_text = input("Enter a String: ").strip() print() generate_script(seed_text, model, charset) def generate_script(seed_text, model, charset): sys.stdout.write(seed_text) sys.stdout.flush() next_char = None should_stop = False while not should_stop: prev_char = next_char next_char = sample(model, seed_text, charset, temp = 0.2) sys.stdout.write(next_char) sys.stdout.flush() if prev_char == '\n' and prev_char == next_char: should_stop = True def sample(model, string, charset, temp = 1.0): inputs = [string_one_hot(string, charset)] inputs = pad_sequences(inputs, padding = 'post', maxlen = 64) preds = model.predict(inputs)[0] return charset[sample_preds(preds, temp)] def sample_preds(results, temperature = 1.0): # helper function to sample an index from a probability array if temperature <= 0.0: return np.argmax(results) #num_choices = results.shape[0] # (batch, outputs) probs = np.exp(np.log(results) / temperature) probs /= np.sum(probs) return np.random.choice(len(results), p = probs) #preds = np.asarray(preds).astype('float64') #preds = np.log(preds) / temperature #exp_preds = np.exp(preds) #preds = exp_preds / np.sum(exp_preds) #probas = np.random.multinomial(1, preds, 1) # #print(probas) #return np.argmax(probas) if __name__ == "__main__": main()
normal
{ "blob_id": "ed7b29a4d7f3a48884434373418c3528f2f397ac", "index": 271, "step-1": "<mask token>\n\n\ndef main():\n print('Loading model...')\n model, charset = load_model(MODEL_NAME)\n print(charset)\n seed_text = input('Enter a String: ').strip()\n print()\n generate_script(seed_text, model, charset)\n\n\ndef generate_script(seed_text, model, charset):\n sys.stdout.write(seed_text)\n sys.stdout.flush()\n next_char = None\n should_stop = False\n while not should_stop:\n prev_char = next_char\n next_char = sample(model, seed_text, charset, temp=0.2)\n sys.stdout.write(next_char)\n sys.stdout.flush()\n if prev_char == '\\n' and prev_char == next_char:\n should_stop = True\n\n\ndef sample(model, string, charset, temp=1.0):\n inputs = [string_one_hot(string, charset)]\n inputs = pad_sequences(inputs, padding='post', maxlen=64)\n preds = model.predict(inputs)[0]\n return charset[sample_preds(preds, temp)]\n\n\ndef sample_preds(results, temperature=1.0):\n if temperature <= 0.0:\n return np.argmax(results)\n probs = np.exp(np.log(results) / temperature)\n probs /= np.sum(probs)\n return np.random.choice(len(results), p=probs)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n print('Loading model...')\n model, charset = load_model(MODEL_NAME)\n print(charset)\n seed_text = input('Enter a String: ').strip()\n print()\n generate_script(seed_text, model, charset)\n\n\ndef generate_script(seed_text, model, charset):\n sys.stdout.write(seed_text)\n sys.stdout.flush()\n next_char = None\n should_stop = False\n while not should_stop:\n prev_char = next_char\n next_char = sample(model, seed_text, charset, temp=0.2)\n sys.stdout.write(next_char)\n sys.stdout.flush()\n if prev_char == '\\n' and prev_char == next_char:\n should_stop = True\n\n\ndef sample(model, string, charset, temp=1.0):\n inputs = [string_one_hot(string, charset)]\n inputs = pad_sequences(inputs, padding='post', maxlen=64)\n preds = model.predict(inputs)[0]\n return charset[sample_preds(preds, temp)]\n\n\ndef sample_preds(results, temperature=1.0):\n if temperature <= 0.0:\n return np.argmax(results)\n probs = np.exp(np.log(results) / temperature)\n probs /= np.sum(probs)\n return np.random.choice(len(results), p=probs)\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['KERAS_BACKEND'] = 'tensorflow'\n<mask token>\nMODEL_NAME = 'script_gen_demo_model'\n\n\ndef main():\n print('Loading model...')\n model, charset = load_model(MODEL_NAME)\n print(charset)\n seed_text = input('Enter a String: ').strip()\n print()\n generate_script(seed_text, model, charset)\n\n\ndef generate_script(seed_text, model, charset):\n sys.stdout.write(seed_text)\n sys.stdout.flush()\n next_char = None\n should_stop = False\n while not should_stop:\n prev_char = next_char\n next_char = sample(model, seed_text, charset, temp=0.2)\n sys.stdout.write(next_char)\n sys.stdout.flush()\n if prev_char == '\\n' and prev_char == next_char:\n should_stop = True\n\n\ndef sample(model, string, charset, temp=1.0):\n inputs = [string_one_hot(string, charset)]\n inputs = pad_sequences(inputs, padding='post', maxlen=64)\n preds = model.predict(inputs)[0]\n return charset[sample_preds(preds, temp)]\n\n\ndef sample_preds(results, temperature=1.0):\n if temperature <= 0.0:\n return np.argmax(results)\n probs = np.exp(np.log(results) / temperature)\n probs /= np.sum(probs)\n return np.random.choice(len(results), p=probs)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['KERAS_BACKEND'] = 'tensorflow'\nimport numpy as np\nimport sys\nfrom util import load_model\nfrom keras.preprocessing.text import hashing_trick\nfrom keras.preprocessing.sequence import pad_sequences\nfrom southpark.southpark_generative import string_one_hot, char_one_hot\nMODEL_NAME = 'script_gen_demo_model'\n\n\ndef main():\n print('Loading model...')\n model, charset = load_model(MODEL_NAME)\n print(charset)\n seed_text = input('Enter a String: ').strip()\n print()\n generate_script(seed_text, model, charset)\n\n\ndef generate_script(seed_text, model, charset):\n sys.stdout.write(seed_text)\n sys.stdout.flush()\n next_char = None\n should_stop = False\n while not should_stop:\n prev_char = next_char\n next_char = sample(model, seed_text, charset, temp=0.2)\n sys.stdout.write(next_char)\n sys.stdout.flush()\n if prev_char == '\\n' and prev_char == next_char:\n should_stop = True\n\n\ndef sample(model, string, charset, temp=1.0):\n inputs = [string_one_hot(string, charset)]\n inputs = pad_sequences(inputs, padding='post', maxlen=64)\n preds = model.predict(inputs)[0]\n return charset[sample_preds(preds, temp)]\n\n\ndef sample_preds(results, temperature=1.0):\n if temperature <= 0.0:\n return np.argmax(results)\n probs = np.exp(np.log(results) / temperature)\n probs /= np.sum(probs)\n return np.random.choice(len(results), p=probs)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/python3\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'\nos.environ['KERAS_BACKEND'] = 'tensorflow'\n\nimport numpy as np\nimport sys\n\nfrom util import load_model\nfrom keras.preprocessing.text import hashing_trick\nfrom keras.preprocessing.sequence import pad_sequences \n\nfrom southpark.southpark_generative import string_one_hot, char_one_hot\n\n\nMODEL_NAME = \"script_gen_demo_model\"\n\ndef main():\n print(\"Loading model...\") \n model, charset = load_model(MODEL_NAME)\n \n print(charset)\n\n seed_text = input(\"Enter a String: \").strip()\n print()\n generate_script(seed_text, model, charset)\n\ndef generate_script(seed_text, model, charset):\n \n sys.stdout.write(seed_text)\n sys.stdout.flush()\n next_char = None\n should_stop = False\n while not should_stop:\n prev_char = next_char\n next_char = sample(model, seed_text, charset, temp = 0.2)\n \n sys.stdout.write(next_char)\n sys.stdout.flush()\n \n if prev_char == '\\n' and prev_char == next_char:\n should_stop = True\n\n \ndef sample(model, string, charset, temp = 1.0):\n inputs = [string_one_hot(string, charset)]\n inputs = pad_sequences(inputs, padding = 'post', maxlen = 64)\n preds = model.predict(inputs)[0]\n \n return charset[sample_preds(preds, temp)]\n\n\ndef sample_preds(results, temperature = 1.0):\n # helper function to sample an index from a probability array\n\n if temperature <= 0.0:\n return np.argmax(results)\n \n #num_choices = results.shape[0] # (batch, outputs)\n probs = np.exp(np.log(results) / temperature)\n probs /= np.sum(probs)\n return np.random.choice(len(results), p = probs)\n\n\n #preds = np.asarray(preds).astype('float64')\n #preds = np.log(preds) / temperature\n #exp_preds = np.exp(preds)\n #preds = exp_preds / np.sum(exp_preds)\n #probas = np.random.multinomial(1, preds, 1)\n #\n #print(probas)\n\n #return np.argmax(probas)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import os import random import pygame # Class for all the game's obstacles class Obstacle(pygame.sprite.Sprite): # Class constructor def __init__(self, game_params, game_speed): self.obs_type = random.randrange(0, 3) # Becomes a pterodactyl obstacle if (self.obs_type == 0): self.create_pterodactyl(game_params) # Becomes large cacti obstacle elif (self.obs_type == 1): self.create_lg_cacti(game_params) # Becomes small cacti obstacle else: self.create_sm_cacti(game_params) # Gets the sprites and rect of the obstacle pygame.sprite.Sprite.__init__(self, self.containers) self.sprites = self.load_sprites() self.rect = self.sprites[0].get_rect() self.sprite_idx = random.randrange(0, self.sprite_num) self.image = self.sprites[self.sprite_idx] self.counter = 0 # Sets the obstacle's position and movement self.rect.bottom = self.y_pos self.rect.left = game_params['scr_width'] self.speed = game_speed self.movement = [-self.speed, 0] # To detect if dino succesfully avoids an obstacle self.reward_rect = pygame.Rect((game_params['scr_width'], # left 0, # top self.width, # width game_params['scr_height'])) # height self.avoided = False self.min_gap_coeff = game_params['min_gap_coeff'] self.max_gap_coeff = game_params['max_gap_coeff'] # To determine when to create a new obstacle self.min_gap = round(self.width * game_speed + self.gap * self.min_gap_coeff) self.max_gap = round(self.min_gap * self.max_gap_coeff) # Creates a pterodactyl using the parameters in game_params def create_pterodactyl(self, game_params): idx = random.randrange(0, len(game_params['pter_y_pos'])) self.y_pos = game_params['pter_y_pos'][idx] self.width = game_params['pter_width'] self.height = game_params['pter_height'] self.gap = game_params['pter_gap'] self.sprite_num = 2 self.sprite_move = True self.img_name = game_params['pter_img'] # Creates large cacti using the parameters in game_params def create_lg_cacti(self, game_params): length = random.randrange(1, game_params['max_cacti_length']+1) self.y_pos = game_params['ground_pos'] self.width = length * game_params['lg_cacti_width'] self.height = game_params['lg_cacti_height'] self.gap = game_params['lg_cacti_gap'] self.sprite_num = 6 / length self.sprite_move = False self.img_name = game_params['lg_cacti_img'] # Creates small cacti using the parameters in game_params def create_sm_cacti(self, game_params): length = random.randrange(1, game_params['max_cacti_length']+1) self.y_pos = game_params['ground_pos'] self.width = length * game_params['sm_cacti_width'] self.height = game_params['sm_cacti_height'] self.gap = game_params['sm_cacti_gap'] self.sprite_num = 6 / length self.sprite_move = False self.img_name = game_params['sm_cacti_img'] # Returns a list of images corresponding to this # obstacle's sprites. def load_sprites(self): # Loads the sprite sheet path = os.path.join('game_classes/sprites', self.img_name) sheet = pygame.image.load(path).convert() sheet_rect = sheet.get_rect() # Gets the original dimensions for each sprite size_x = sheet_rect.width/self.sprite_num size_y = sheet_rect.height sprites = [] # Loops through all sprites in the sprite sheet # and appends them to the sprites list for i in range(int(self.sprite_num)): rect = pygame.Rect((i*size_x, 0, size_x, size_y)) image = pygame.Surface(rect.size).convert() image.blit(sheet, (0, 0), rect) colorkey = image.get_at((0, 0)) image.set_colorkey(colorkey, pygame.RLEACCEL) image = pygame.transform.scale(image, (self.width, self.height)) sprites.append(image) return sprites # Update's the min and max gaps between this obstacle and a new # obstacle based on this obstacle's speed def update_gaps(self): self.min_gap = round(self.rect.width * self.speed + self.gap * self.min_gap_coeff) self.max_gap = round(self.min_gap * self.max_gap_coeff) # Draws the obstacle on the screen def draw(self, screen): screen.blit(self.image, self.rect) # Updates the obstacle's speed, position, and sprite def update(self, game_speed): # updates the obstacle's speed self.speed = game_speed self.movement[0] = -self.speed # Updates this obstacles sprites if self.counter % 10 == 0 and self.sprite_move: self.sprite_idx = (self.sprite_idx+1) % self.sprite_num self.image = self.sprites[self.sprite_idx] self.counter += 1 # Updates the obstacle's position self.rect = self.rect.move(self.movement) self.reward_rect = self.reward_rect.move(self.movement) self.update_gaps() # Removes obstacle from screen if it moves beyond screen if self.rect.right < 0: self.kill()
normal
{ "blob_id": "09dac7bfe98a15b3e79edcb0d0a53c0ab4d771ca", "index": 7053, "step-1": "<mask token>\n\n\nclass Obstacle(pygame.sprite.Sprite):\n\n def __init__(self, game_params, game_speed):\n self.obs_type = random.randrange(0, 3)\n if self.obs_type == 0:\n self.create_pterodactyl(game_params)\n elif self.obs_type == 1:\n self.create_lg_cacti(game_params)\n else:\n self.create_sm_cacti(game_params)\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.sprites = self.load_sprites()\n self.rect = self.sprites[0].get_rect()\n self.sprite_idx = random.randrange(0, self.sprite_num)\n self.image = self.sprites[self.sprite_idx]\n self.counter = 0\n self.rect.bottom = self.y_pos\n self.rect.left = game_params['scr_width']\n self.speed = game_speed\n self.movement = [-self.speed, 0]\n self.reward_rect = pygame.Rect((game_params['scr_width'], 0, self.\n width, game_params['scr_height']))\n self.avoided = False\n self.min_gap_coeff = game_params['min_gap_coeff']\n self.max_gap_coeff = game_params['max_gap_coeff']\n self.min_gap = round(self.width * game_speed + self.gap * self.\n min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n <mask token>\n <mask token>\n\n def create_sm_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['sm_cacti_width']\n self.height = game_params['sm_cacti_height']\n self.gap = game_params['sm_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['sm_cacti_img']\n\n def load_sprites(self):\n path = os.path.join('game_classes/sprites', self.img_name)\n sheet = pygame.image.load(path).convert()\n sheet_rect = sheet.get_rect()\n size_x = sheet_rect.width / self.sprite_num\n size_y = sheet_rect.height\n sprites = []\n for i in range(int(self.sprite_num)):\n rect = pygame.Rect((i * size_x, 0, size_x, size_y))\n image = pygame.Surface(rect.size).convert()\n image.blit(sheet, (0, 0), rect)\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n image = pygame.transform.scale(image, (self.width, self.height))\n sprites.append(image)\n return sprites\n\n def update_gaps(self):\n self.min_gap = round(self.rect.width * self.speed + self.gap * self\n .min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n def update(self, game_speed):\n self.speed = game_speed\n self.movement[0] = -self.speed\n if self.counter % 10 == 0 and self.sprite_move:\n self.sprite_idx = (self.sprite_idx + 1) % self.sprite_num\n self.image = self.sprites[self.sprite_idx]\n self.counter += 1\n self.rect = self.rect.move(self.movement)\n self.reward_rect = self.reward_rect.move(self.movement)\n self.update_gaps()\n if self.rect.right < 0:\n self.kill()\n", "step-2": "<mask token>\n\n\nclass Obstacle(pygame.sprite.Sprite):\n\n def __init__(self, game_params, game_speed):\n self.obs_type = random.randrange(0, 3)\n if self.obs_type == 0:\n self.create_pterodactyl(game_params)\n elif self.obs_type == 1:\n self.create_lg_cacti(game_params)\n else:\n self.create_sm_cacti(game_params)\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.sprites = self.load_sprites()\n self.rect = self.sprites[0].get_rect()\n self.sprite_idx = random.randrange(0, self.sprite_num)\n self.image = self.sprites[self.sprite_idx]\n self.counter = 0\n self.rect.bottom = self.y_pos\n self.rect.left = game_params['scr_width']\n self.speed = game_speed\n self.movement = [-self.speed, 0]\n self.reward_rect = pygame.Rect((game_params['scr_width'], 0, self.\n width, game_params['scr_height']))\n self.avoided = False\n self.min_gap_coeff = game_params['min_gap_coeff']\n self.max_gap_coeff = game_params['max_gap_coeff']\n self.min_gap = round(self.width * game_speed + self.gap * self.\n min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def create_pterodactyl(self, game_params):\n idx = random.randrange(0, len(game_params['pter_y_pos']))\n self.y_pos = game_params['pter_y_pos'][idx]\n self.width = game_params['pter_width']\n self.height = game_params['pter_height']\n self.gap = game_params['pter_gap']\n self.sprite_num = 2\n self.sprite_move = True\n self.img_name = game_params['pter_img']\n <mask token>\n\n def create_sm_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['sm_cacti_width']\n self.height = game_params['sm_cacti_height']\n self.gap = game_params['sm_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['sm_cacti_img']\n\n def load_sprites(self):\n path = os.path.join('game_classes/sprites', self.img_name)\n sheet = pygame.image.load(path).convert()\n sheet_rect = sheet.get_rect()\n size_x = sheet_rect.width / self.sprite_num\n size_y = sheet_rect.height\n sprites = []\n for i in range(int(self.sprite_num)):\n rect = pygame.Rect((i * size_x, 0, size_x, size_y))\n image = pygame.Surface(rect.size).convert()\n image.blit(sheet, (0, 0), rect)\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n image = pygame.transform.scale(image, (self.width, self.height))\n sprites.append(image)\n return sprites\n\n def update_gaps(self):\n self.min_gap = round(self.rect.width * self.speed + self.gap * self\n .min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n def update(self, game_speed):\n self.speed = game_speed\n self.movement[0] = -self.speed\n if self.counter % 10 == 0 and self.sprite_move:\n self.sprite_idx = (self.sprite_idx + 1) % self.sprite_num\n self.image = self.sprites[self.sprite_idx]\n self.counter += 1\n self.rect = self.rect.move(self.movement)\n self.reward_rect = self.reward_rect.move(self.movement)\n self.update_gaps()\n if self.rect.right < 0:\n self.kill()\n", "step-3": "<mask token>\n\n\nclass Obstacle(pygame.sprite.Sprite):\n\n def __init__(self, game_params, game_speed):\n self.obs_type = random.randrange(0, 3)\n if self.obs_type == 0:\n self.create_pterodactyl(game_params)\n elif self.obs_type == 1:\n self.create_lg_cacti(game_params)\n else:\n self.create_sm_cacti(game_params)\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.sprites = self.load_sprites()\n self.rect = self.sprites[0].get_rect()\n self.sprite_idx = random.randrange(0, self.sprite_num)\n self.image = self.sprites[self.sprite_idx]\n self.counter = 0\n self.rect.bottom = self.y_pos\n self.rect.left = game_params['scr_width']\n self.speed = game_speed\n self.movement = [-self.speed, 0]\n self.reward_rect = pygame.Rect((game_params['scr_width'], 0, self.\n width, game_params['scr_height']))\n self.avoided = False\n self.min_gap_coeff = game_params['min_gap_coeff']\n self.max_gap_coeff = game_params['max_gap_coeff']\n self.min_gap = round(self.width * game_speed + self.gap * self.\n min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def create_pterodactyl(self, game_params):\n idx = random.randrange(0, len(game_params['pter_y_pos']))\n self.y_pos = game_params['pter_y_pos'][idx]\n self.width = game_params['pter_width']\n self.height = game_params['pter_height']\n self.gap = game_params['pter_gap']\n self.sprite_num = 2\n self.sprite_move = True\n self.img_name = game_params['pter_img']\n\n def create_lg_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['lg_cacti_width']\n self.height = game_params['lg_cacti_height']\n self.gap = game_params['lg_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['lg_cacti_img']\n\n def create_sm_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['sm_cacti_width']\n self.height = game_params['sm_cacti_height']\n self.gap = game_params['sm_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['sm_cacti_img']\n\n def load_sprites(self):\n path = os.path.join('game_classes/sprites', self.img_name)\n sheet = pygame.image.load(path).convert()\n sheet_rect = sheet.get_rect()\n size_x = sheet_rect.width / self.sprite_num\n size_y = sheet_rect.height\n sprites = []\n for i in range(int(self.sprite_num)):\n rect = pygame.Rect((i * size_x, 0, size_x, size_y))\n image = pygame.Surface(rect.size).convert()\n image.blit(sheet, (0, 0), rect)\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n image = pygame.transform.scale(image, (self.width, self.height))\n sprites.append(image)\n return sprites\n\n def update_gaps(self):\n self.min_gap = round(self.rect.width * self.speed + self.gap * self\n .min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n def update(self, game_speed):\n self.speed = game_speed\n self.movement[0] = -self.speed\n if self.counter % 10 == 0 and self.sprite_move:\n self.sprite_idx = (self.sprite_idx + 1) % self.sprite_num\n self.image = self.sprites[self.sprite_idx]\n self.counter += 1\n self.rect = self.rect.move(self.movement)\n self.reward_rect = self.reward_rect.move(self.movement)\n self.update_gaps()\n if self.rect.right < 0:\n self.kill()\n", "step-4": "import os\nimport random\nimport pygame\n\n\nclass Obstacle(pygame.sprite.Sprite):\n\n def __init__(self, game_params, game_speed):\n self.obs_type = random.randrange(0, 3)\n if self.obs_type == 0:\n self.create_pterodactyl(game_params)\n elif self.obs_type == 1:\n self.create_lg_cacti(game_params)\n else:\n self.create_sm_cacti(game_params)\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.sprites = self.load_sprites()\n self.rect = self.sprites[0].get_rect()\n self.sprite_idx = random.randrange(0, self.sprite_num)\n self.image = self.sprites[self.sprite_idx]\n self.counter = 0\n self.rect.bottom = self.y_pos\n self.rect.left = game_params['scr_width']\n self.speed = game_speed\n self.movement = [-self.speed, 0]\n self.reward_rect = pygame.Rect((game_params['scr_width'], 0, self.\n width, game_params['scr_height']))\n self.avoided = False\n self.min_gap_coeff = game_params['min_gap_coeff']\n self.max_gap_coeff = game_params['max_gap_coeff']\n self.min_gap = round(self.width * game_speed + self.gap * self.\n min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def create_pterodactyl(self, game_params):\n idx = random.randrange(0, len(game_params['pter_y_pos']))\n self.y_pos = game_params['pter_y_pos'][idx]\n self.width = game_params['pter_width']\n self.height = game_params['pter_height']\n self.gap = game_params['pter_gap']\n self.sprite_num = 2\n self.sprite_move = True\n self.img_name = game_params['pter_img']\n\n def create_lg_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['lg_cacti_width']\n self.height = game_params['lg_cacti_height']\n self.gap = game_params['lg_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['lg_cacti_img']\n\n def create_sm_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length'] + 1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['sm_cacti_width']\n self.height = game_params['sm_cacti_height']\n self.gap = game_params['sm_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['sm_cacti_img']\n\n def load_sprites(self):\n path = os.path.join('game_classes/sprites', self.img_name)\n sheet = pygame.image.load(path).convert()\n sheet_rect = sheet.get_rect()\n size_x = sheet_rect.width / self.sprite_num\n size_y = sheet_rect.height\n sprites = []\n for i in range(int(self.sprite_num)):\n rect = pygame.Rect((i * size_x, 0, size_x, size_y))\n image = pygame.Surface(rect.size).convert()\n image.blit(sheet, (0, 0), rect)\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n image = pygame.transform.scale(image, (self.width, self.height))\n sprites.append(image)\n return sprites\n\n def update_gaps(self):\n self.min_gap = round(self.rect.width * self.speed + self.gap * self\n .min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n def update(self, game_speed):\n self.speed = game_speed\n self.movement[0] = -self.speed\n if self.counter % 10 == 0 and self.sprite_move:\n self.sprite_idx = (self.sprite_idx + 1) % self.sprite_num\n self.image = self.sprites[self.sprite_idx]\n self.counter += 1\n self.rect = self.rect.move(self.movement)\n self.reward_rect = self.reward_rect.move(self.movement)\n self.update_gaps()\n if self.rect.right < 0:\n self.kill()\n", "step-5": "import os\nimport random\nimport pygame\n\n\n# Class for all the game's obstacles\nclass Obstacle(pygame.sprite.Sprite):\n # Class constructor\n def __init__(self, game_params, game_speed):\n self.obs_type = random.randrange(0, 3)\n # Becomes a pterodactyl obstacle\n if (self.obs_type == 0):\n self.create_pterodactyl(game_params)\n # Becomes large cacti obstacle\n elif (self.obs_type == 1):\n self.create_lg_cacti(game_params)\n # Becomes small cacti obstacle\n else:\n self.create_sm_cacti(game_params)\n\n # Gets the sprites and rect of the obstacle\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.sprites = self.load_sprites()\n self.rect = self.sprites[0].get_rect()\n self.sprite_idx = random.randrange(0, self.sprite_num)\n self.image = self.sprites[self.sprite_idx]\n self.counter = 0\n\n # Sets the obstacle's position and movement\n self.rect.bottom = self.y_pos\n self.rect.left = game_params['scr_width']\n self.speed = game_speed\n self.movement = [-self.speed, 0]\n\n # To detect if dino succesfully avoids an obstacle\n self.reward_rect = pygame.Rect((game_params['scr_width'], # left\n 0, # top\n self.width, # width\n game_params['scr_height'])) # height\n self.avoided = False\n\n self.min_gap_coeff = game_params['min_gap_coeff']\n self.max_gap_coeff = game_params['max_gap_coeff']\n\n # To determine when to create a new obstacle\n self.min_gap = round(self.width * game_speed\n + self.gap * self.min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n # Creates a pterodactyl using the parameters in game_params\n def create_pterodactyl(self, game_params):\n idx = random.randrange(0, len(game_params['pter_y_pos']))\n self.y_pos = game_params['pter_y_pos'][idx]\n self.width = game_params['pter_width']\n self.height = game_params['pter_height']\n self.gap = game_params['pter_gap']\n self.sprite_num = 2\n self.sprite_move = True\n self.img_name = game_params['pter_img']\n\n # Creates large cacti using the parameters in game_params\n def create_lg_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length']+1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['lg_cacti_width']\n self.height = game_params['lg_cacti_height']\n self.gap = game_params['lg_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['lg_cacti_img']\n\n # Creates small cacti using the parameters in game_params\n def create_sm_cacti(self, game_params):\n length = random.randrange(1, game_params['max_cacti_length']+1)\n self.y_pos = game_params['ground_pos']\n self.width = length * game_params['sm_cacti_width']\n self.height = game_params['sm_cacti_height']\n self.gap = game_params['sm_cacti_gap']\n self.sprite_num = 6 / length\n self.sprite_move = False\n self.img_name = game_params['sm_cacti_img']\n\n # Returns a list of images corresponding to this\n # obstacle's sprites.\n def load_sprites(self):\n # Loads the sprite sheet\n path = os.path.join('game_classes/sprites', self.img_name)\n sheet = pygame.image.load(path).convert()\n sheet_rect = sheet.get_rect()\n\n # Gets the original dimensions for each sprite\n size_x = sheet_rect.width/self.sprite_num\n size_y = sheet_rect.height\n\n sprites = []\n\n # Loops through all sprites in the sprite sheet\n # and appends them to the sprites list\n for i in range(int(self.sprite_num)):\n rect = pygame.Rect((i*size_x, 0, size_x, size_y))\n\n image = pygame.Surface(rect.size).convert()\n image.blit(sheet, (0, 0), rect)\n\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pygame.RLEACCEL)\n\n image = pygame.transform.scale(image, (self.width, self.height))\n sprites.append(image)\n\n return sprites\n\n # Update's the min and max gaps between this obstacle and a new\n # obstacle based on this obstacle's speed\n def update_gaps(self):\n self.min_gap = round(self.rect.width * self.speed\n + self.gap * self.min_gap_coeff)\n self.max_gap = round(self.min_gap * self.max_gap_coeff)\n\n # Draws the obstacle on the screen\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n # Updates the obstacle's speed, position, and sprite\n def update(self, game_speed):\n # updates the obstacle's speed\n self.speed = game_speed\n self.movement[0] = -self.speed\n\n # Updates this obstacles sprites\n if self.counter % 10 == 0 and self.sprite_move:\n self.sprite_idx = (self.sprite_idx+1) % self.sprite_num\n self.image = self.sprites[self.sprite_idx]\n self.counter += 1\n\n # Updates the obstacle's position\n self.rect = self.rect.move(self.movement)\n self.reward_rect = self.reward_rect.move(self.movement)\n self.update_gaps()\n\n # Removes obstacle from screen if it moves beyond screen\n if self.rect.right < 0:\n self.kill()\n", "step-ids": [ 7, 8, 9, 10, 11 ] }
[ 7, 8, 9, 10, 11 ]
from typing import List from fastapi import Depends, APIRouter from sqlalchemy.orm import Session from attendance.database import get_db from attendance import schemas from attendance.models import User from attendance import crud from attendance.dependency import get_current_user router = APIRouter() #BASE_SALARY #hr @router.get("/salary/{user_id}", status_code=200) def read_base_salary(user_id: int, db: Session = Depends(get_db), current_user: User=Depends(get_current_user)): return crud.get_base_salarys(db, user_id=user_id, current=current_user) @router.post("/salary", status_code=201) def create_base_salary(salary: schemas.BaseSalaryCreate, db: Session = Depends(get_db), current_user: User=Depends(get_current_user)): return crud.create_base_salary(db, base_salary=salary, current=current_user)
normal
{ "blob_id": "f10e20d5c409930d697c36d1897ebcb648511e27", "index": 3694, "step-1": "<mask token>\n\n\[email protected]('/salary/{user_id}', status_code=200)\ndef read_base_salary(user_id: int, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user)):\n return crud.get_base_salarys(db, user_id=user_id, current=current_user)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/salary/{user_id}', status_code=200)\ndef read_base_salary(user_id: int, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user)):\n return crud.get_base_salarys(db, user_id=user_id, current=current_user)\n\n\[email protected]('/salary', status_code=201)\ndef create_base_salary(salary: schemas.BaseSalaryCreate, db: Session=\n Depends(get_db), current_user: User=Depends(get_current_user)):\n return crud.create_base_salary(db, base_salary=salary, current=current_user\n )\n", "step-3": "<mask token>\nrouter = APIRouter()\n\n\[email protected]('/salary/{user_id}', status_code=200)\ndef read_base_salary(user_id: int, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user)):\n return crud.get_base_salarys(db, user_id=user_id, current=current_user)\n\n\[email protected]('/salary', status_code=201)\ndef create_base_salary(salary: schemas.BaseSalaryCreate, db: Session=\n Depends(get_db), current_user: User=Depends(get_current_user)):\n return crud.create_base_salary(db, base_salary=salary, current=current_user\n )\n", "step-4": "from typing import List\nfrom fastapi import Depends, APIRouter\nfrom sqlalchemy.orm import Session\nfrom attendance.database import get_db\nfrom attendance import schemas\nfrom attendance.models import User\nfrom attendance import crud\nfrom attendance.dependency import get_current_user\nrouter = APIRouter()\n\n\[email protected]('/salary/{user_id}', status_code=200)\ndef read_base_salary(user_id: int, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user)):\n return crud.get_base_salarys(db, user_id=user_id, current=current_user)\n\n\[email protected]('/salary', status_code=201)\ndef create_base_salary(salary: schemas.BaseSalaryCreate, db: Session=\n Depends(get_db), current_user: User=Depends(get_current_user)):\n return crud.create_base_salary(db, base_salary=salary, current=current_user\n )\n", "step-5": "from typing import List\n\nfrom fastapi import Depends, APIRouter\nfrom sqlalchemy.orm import Session\n\nfrom attendance.database import get_db\nfrom attendance import schemas\nfrom attendance.models import User\nfrom attendance import crud\nfrom attendance.dependency import get_current_user\n\nrouter = APIRouter()\n#BASE_SALARY\n#hr\[email protected](\"/salary/{user_id}\", status_code=200)\ndef read_base_salary(user_id: int, db: Session = Depends(get_db), current_user: User=Depends(get_current_user)):\n return crud.get_base_salarys(db, user_id=user_id, current=current_user)\n\[email protected](\"/salary\", status_code=201)\ndef create_base_salary(salary: schemas.BaseSalaryCreate, db: Session = Depends(get_db), current_user: User=Depends(get_current_user)):\n return crud.create_base_salary(db, base_salary=salary, current=current_user)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import pandas as pd triples = pd.read_csv("SollTripel.csv", sep=",", skip_blank_lines=True, skipinitialspace=True) triples.columns = ["triple", "found"] triples = triples["#" not in triples.triple] print(triples)
normal
{ "blob_id": "97afa67cbe20900e2388994481abebe772e22818", "index": 5301, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(triples)\n", "step-3": "<mask token>\ntriples = pd.read_csv('SollTripel.csv', sep=',', skip_blank_lines=True,\n skipinitialspace=True)\ntriples.columns = ['triple', 'found']\ntriples = triples['#' not in triples.triple]\nprint(triples)\n", "step-4": "import pandas as pd\ntriples = pd.read_csv('SollTripel.csv', sep=',', skip_blank_lines=True,\n skipinitialspace=True)\ntriples.columns = ['triple', 'found']\ntriples = triples['#' not in triples.triple]\nprint(triples)\n", "step-5": "import pandas as pd\n\ntriples = pd.read_csv(\"SollTripel.csv\", sep=\",\", skip_blank_lines=True, skipinitialspace=True)\ntriples.columns = [\"triple\", \"found\"]\ntriples = triples[\"#\" not in triples.triple]\n\nprint(triples)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from bs4 import BeautifulSoup from cybersource.constants import CHECKOUT_BASKET_ID, CHECKOUT_ORDER_NUM, CHECKOUT_SHIPPING_CODE, CHECKOUT_ORDER_ID from cybersource.tests import factories as cs_factories from decimal import Decimal as D from django.core import mail from django.core.urlresolvers import reverse from mock import patch from oscar.core.loading import get_class, get_model from oscar.test import factories from random import randrange from rest_framework.test import APITestCase import datetime import requests # Needed for external calls! Basket = get_model('basket', 'Basket') Product = get_model('catalogue', 'Product') Order = get_model('order', 'Order') class BaseCheckoutTest(APITestCase): fixtures = ['cybersource-test.yaml'] def create_product(self, price=D('10.00')): product = factories.create_product( title='My Product', product_class='My Product Class') record = factories.create_stockrecord( currency='USD', product=product, num_in_stock=10, price_excl_tax=price) factories.create_purchase_info(record) return product def do_add_to_basket(self, product_id, quantity=1): url = reverse('api-basket-add-product') data = { "url": reverse('product-detail', args=[product_id]), "quantity": quantity } return self.client.post(url, data) def do_get_basket(self): url = reverse('api-basket') return self.client.get(url) def do_sign_auth_request(self, basket_id=None, data=None): if data is None: data = { "guest_email": "[email protected]", "basket": reverse('basket-detail', args=[basket_id]), "shipping_address": { "first_name": "fadsf", "last_name": "fad", "line1": "234 5th Ave", "line4": "Manhattan", "postcode": "10001", "state": "NY", "country": reverse('country-detail', args=['US']), "phone_number": "+1 (717) 467-1111", } } url = reverse('cybersource-sign-auth-request') res = self.client.post(url, data, format='json') self.assertEqual(res.status_code, 200) next_year = datetime.date.today().year + 1 cs_data = { 'card_type': '001', 'card_number': '4111111111111111', 'card_cvn': '123', 'card_expiry_date': '12-{}'.format(next_year), 'bill_to_forename': 'Testy', 'bill_to_surname': 'McUnitTest', 'bill_to_address_line1': '234 5th Ave', 'bill_to_address_line2': 'apt 5', 'bill_to_address_city': 'Manhattan', 'bill_to_address_state': 'NY', 'bill_to_address_postal_code': '10001', 'bill_to_address_country': 'US', 'bill_to_phone': '17174671111', } for field in res.data['fields']: if not field['editable'] or field['key'] not in cs_data: cs_data[field['key']] = field['value'] cs_url = res.data['url'] return cs_url, cs_data def do_cybersource_post(self, cs_url, cs_data): res = requests.post(cs_url, cs_data) self.assertEqual(res.status_code, 200) soup = BeautifulSoup(res.content, 'html.parser') form_data = {} for element in soup.find_all('input'): form_data[element['name']] = element['value'] # We have the data from cybersource, send it to our cybersource callback url = reverse('cybersource-reply') return self.client.post(url, form_data) def check_finished_order(self, number, product_id, quantity=1): # Order exists and was paid for self.assertEqual(Order.objects.all().count(), 1) order = Order.objects.get() self.assertEqual(order.number, number) lines = order.lines.all() self.assertEqual(lines.count(), 1) line = lines[0] self.assertEqual(line.quantity, quantity) self.assertEqual(line.product_id, product_id) payment_events = order.payment_events.filter(event_type__name="Authorise") self.assertEqual(payment_events.count(), 1) self.assertEqual(payment_events[0].amount, order.total_incl_tax) payment_sources = order.sources.all() self.assertEqual(payment_sources.count(), 1) self.assertEqual(payment_sources[0].currency, order.currency) self.assertEqual(payment_sources[0].amount_allocated, order.total_incl_tax) self.assertEqual(payment_sources[0].amount_debited, D('0.00')) self.assertEqual(payment_sources[0].amount_refunded, D('0.00')) transactions = payment_sources[0].transactions.all() self.assertEqual(transactions.count(), 1) self.assertEqual(transactions[0].txn_type, 'Authorise') self.assertEqual(transactions[0].amount, order.total_incl_tax) self.assertEqual(transactions[0].status, 'ACCEPT') self.assertEqual(transactions[0].log_field('req_reference_number'), order.number) self.assertEqual(transactions[0].token.card_last4, '1111') self.assertEqual(len(mail.outbox), 1) class CheckoutIntegrationTest(BaseCheckoutTest): """Full Integration Test of Checkout""" def test_checkout_process(self): """Full checkout process using minimal api calls""" product = self.create_product() res = self.do_get_basket() self.assertEqual(res.status_code, 200) basket_id = res.data['id'] res = self.do_add_to_basket(product.id) self.assertEqual(res.status_code, 200) cs_url, cs_data = self.do_sign_auth_request(basket_id) res = self.do_cybersource_post(cs_url, cs_data) self.assertEqual(res.status_code, 302) self.check_finished_order(cs_data['reference_number'], product.id) def test_add_product_during_auth(self): """Test attempting to add a product during the authorize flow""" product = self.create_product() res = self.do_get_basket() self.assertEqual(res.status_code, 200) basket_id = res.data['id'] # Adding a product here should succeed res = self.do_add_to_basket(product.id) basket1 = res.data['id'] self.assertEqual(res.status_code, 200) cs_url, cs_data = self.do_sign_auth_request(basket_id) # Adding a product here should go to a new basket, not the one we're auth'ing res = self.do_add_to_basket(product.id) self.assertEqual(res.status_code, 200) basket2 = res.data['id'] self.assertNotEqual(basket1, basket2) res = self.do_cybersource_post(cs_url, cs_data) self.assertEqual(res.status_code, 302) self.check_finished_order(cs_data['reference_number'], product.id) # Adding a product here should go to basket2, not basket1 res = self.do_add_to_basket(product.id) self.assertEqual(res.status_code, 200) basket3 = res.data['id'] self.assertEqual(basket2, basket3) def test_pay_for_nothing(self): """Test attempting to pay for an empty basket""" res = self.do_get_basket() self.assertEqual(res.status_code, 200) basket_id = res.data['id'] data = { "guest_email": "[email protected]", "basket": reverse('basket-detail', args=[basket_id]), "shipping_address": { "first_name": "fadsf", "last_name": "fad", "line1": "234 5th Ave", "line4": "Manhattan", "postcode": "10001", "state": "NY", "country": reverse('country-detail', args=['US']), "phone_number": "+1 (717) 467-1111", } } url = reverse('cybersource-sign-auth-request') res = self.client.post(url, data, format='json') self.assertEqual(res.status_code, 406) def test_manipulate_total_pre_auth(self): """Test attempting to manipulate basket price when requesting an auth form""" product = self.create_product() res = self.do_get_basket() self.assertEqual(res.status_code, 200) basket_id = res.data['id'] res = self.do_add_to_basket(product.id) self.assertEqual(res.status_code, 200) self.assertEqual(res.data['total_incl_tax'], '10.00') url = reverse('cybersource-sign-auth-request') data = { "guest_email": "[email protected]", "basket": reverse('basket-detail', args=[basket_id]), "total": "2.00", # Try and get $10 of product for only $2 "shipping_address": { "first_name": "fadsf", "last_name": "fad", "line1": "234 5th Ave", "line4": "Manhattan", "postcode": "10001", "state": "NY", "country": reverse('country-detail', args=['US']), "phone_number": "+1 (717) 467-1111", } } res = self.client.post(url, data, format='json') self.assertEqual(res.status_code, 406) def test_manipulate_total_during_auth(self): """Test attempting to manipulate basket price when requesting auth from CyberSource""" product = self.create_product() res = self.do_get_basket() self.assertEqual(res.status_code, 200) basket_id = res.data['id'] res = self.do_add_to_basket(product.id) self.assertEqual(res.status_code, 200) self.assertEqual(res.data['total_incl_tax'], '10.00') cs_url, cs_data = self.do_sign_auth_request(basket_id) cs_data['amount'] = '2.00' res = requests.post(cs_url, cs_data) self.assertEqual(res.status_code, 403) def test_free_product(self): """Full checkout process using minimal api calls""" product = self.create_product(price=D('0.00')) res = self.do_get_basket() self.assertEqual(res.status_code, 200) basket_id = res.data['id'] res = self.do_add_to_basket(product.id) self.assertEqual(res.status_code, 200) cs_url, cs_data = self.do_sign_auth_request(basket_id) self.assertEqual(cs_data['amount'], '0.00') res = self.do_cybersource_post(cs_url, cs_data) self.assertEqual(res.status_code, 302) self.check_finished_order(cs_data['reference_number'], product.id) class CSReplyViewTest(BaseCheckoutTest): """Test the CybersourceReplyView with fixtured requests""" def prepare_basket(self): """Setup a basket and session like SignAuthorizePaymentFormView would normally""" product = self.create_product() res = self.do_get_basket() self.assertEqual(res.status_code, 200) basket_id = res.data['id'] res = self.do_add_to_basket(product.id) self.assertEqual(res.status_code, 200) session = self.client.session session[CHECKOUT_BASKET_ID] = basket_id session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999)) session[CHECKOUT_SHIPPING_CODE] = 'free-shipping' session.save() return session, basket_id, session[CHECKOUT_ORDER_NUM] @patch('cybersource.signals.order_placed.send') def test_invalid_signature(self, order_placed): """Invalid signature should result in 400 Bad Request""" session, basket_id, order_number = self.prepare_basket() data = cs_factories.build_declined_reply_data(order_number) data = cs_factories.sign_reply_data(data) data['signature'] = 'abcdef' url = reverse('cybersource-reply') resp = self.client.post(url, data) self.assertEqual(resp.status_code, 400) self.assertEqual(len(mail.outbox), 0, 'Should not send email') self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal') self.assertEqual(Order.objects.count(), 0, 'Should not make order') @patch('cybersource.signals.order_placed.send') def test_invalid_request_type(self, order_placed): """Bad request type should result in 400 Bad Request""" session, basket_id, order_number = self.prepare_basket() data = cs_factories.build_declined_reply_data(order_number) data["req_transaction_type"] = "payment", data = cs_factories.sign_reply_data(data) url = reverse('cybersource-reply') resp = self.client.post(url, data) self.assertEqual(resp.status_code, 400) self.assertEqual(len(mail.outbox), 0, 'Should not send email') self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal') self.assertEqual(Order.objects.count(), 0, 'Should not make order') @patch('cybersource.signals.order_placed.send') def test_duplicate_transaction_id(self, order_placed): """Duplicate Transaction ID should result in redirect to the success page""" session, basket_id, order_number = self.prepare_basket() data = cs_factories.build_accepted_reply_data(order_number) data = cs_factories.sign_reply_data(data) url = reverse('cybersource-reply') self.assertEqual(order_placed.call_count, 0) self.assertEqual(Order.objects.count(), 0) resp = self.client.post(url, data) self.assertRedirects(resp, reverse('checkout:thank-you')) self.assertEqual(order_placed.call_count, 1) self.assertEqual(Order.objects.count(), 1) resp = self.client.post(url, data) self.assertRedirects(resp, reverse('checkout:thank-you')) self.assertEqual(order_placed.call_count, 1) self.assertEqual(Order.objects.count(), 1) @patch('cybersource.signals.order_placed.send') def test_invalid_reference_number(self, order_placed): """Mismatched reference number should result in 400 Bad Request""" session, basket_id, order_number = self.prepare_basket() data = cs_factories.build_accepted_reply_data(order_number + 'ABC') data = cs_factories.sign_reply_data(data) url = reverse('cybersource-reply') resp = self.client.post(url, data) self.assertEqual(resp.status_code, 400) self.assertEqual(order_placed.call_count, 0) self.assertEqual(Order.objects.count(), 0) @patch('cybersource.signals.order_placed.send') def test_missing_basket(self, order_placed): """Missing basket should result in 400 Bad Request""" session, basket_id, order_number = self.prepare_basket() del session[CHECKOUT_BASKET_ID] session.save() data = cs_factories.build_accepted_reply_data(order_number) data = cs_factories.sign_reply_data(data) url = reverse('cybersource-reply') resp = self.client.post(url, data) self.assertEqual(resp.status_code, 400) self.assertEqual(order_placed.call_count, 0) self.assertEqual(Order.objects.count(), 0) @patch('cybersource.signals.order_placed.send') def test_declined_card(self, order_placed): """Declined card should should result in redirect to failure page""" session, basket_id, order_number = self.prepare_basket() data = cs_factories.build_declined_reply_data(order_number) data = cs_factories.sign_reply_data(data) url = reverse('cybersource-reply') resp = self.client.post(url, data) self.assertRedirects(resp, reverse('checkout:index'), fetch_redirect_response=False) self.assertEqual(len(mail.outbox), 0, 'Should not send email') self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal') self.assertEqual(Order.objects.count(), 0, 'Should not make order') @patch('cybersource.signals.order_placed.send') def test_success(self, order_placed): """Successful authorization should create an order and redirect to the success page""" session, basket_id, order_number = self.prepare_basket() data = cs_factories.build_accepted_reply_data(order_number) data = cs_factories.sign_reply_data(data) url = reverse('cybersource-reply') self.assertEqual(order_placed.call_count, 0) resp = self.client.post(url, data) self.assertRedirects(resp, reverse('checkout:thank-you')) self.assertEqual(len(mail.outbox), 1, 'Should send email') self.assertEqual(order_placed.call_count, 1, 'Should trigger order_placed signal') order = order_placed.call_args[1]['order'] self.assertEqual(order.status, 'Authorized', 'Should set order status') self.assertEqual(order.basket.id, basket_id, 'Should use basket from session') self.assertEqual(order.number, order_number, 'Should use order number from CS request') session = self.client.session self.assertEquals(session[CHECKOUT_ORDER_ID], order.id, 'Should save order_id in session') self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource') source = order.sources.first() self.assertEqual(source.currency, 'USD') self.assertEqual(source.amount_allocated, D('99.99')) self.assertEqual(source.amount_refunded, D('0.00')) self.assertEqual(source.amount_debited, D('0.00')) self.assertEqual(source.transactions.count(), 1, 'Should save Transaction') transaction = source.transactions.first() self.assertEqual(transaction.log.data, data) self.assertEqual(transaction.token.log, transaction.log) self.assertEqual(transaction.token.masked_card_number, 'xxxxxxxxxxxx1111') self.assertEqual(transaction.token.card_type, '001') self.assertEqual(transaction.txn_type, 'Authorise') self.assertEqual(transaction.amount, D('99.99')) self.assertEqual(transaction.reference, data['transaction_id']) self.assertEqual(transaction.status, 'ACCEPT') self.assertEqual(transaction.request_token, data['request_token']) self.assertEqual(order.payment_events.count(), 1, 'Should save PaymentEvent') event = order.payment_events.first() self.assertEqual(event.amount, D('99.99')) self.assertEqual(event.reference, data['transaction_id']) self.assertEqual(event.event_type.name, 'Authorise') self.assertEqual(event.line_quantities.count(), 1, 'Should save PaymentEventQuantity') lq = event.line_quantities.first() self.assertEqual(lq.line, order.lines.first()) self.assertEqual(lq.quantity, 1) class AuthPaymentFormViewTest(BaseCheckoutTest): """Test the SignAuthorizePaymentFormView""" def prepare_basket(self): """Setup a basket so that we can pay for it""" product = self.create_product() res = self.do_get_basket() self.assertEqual(res.status_code, 200) basket_id = res.data['id'] res = self.do_add_to_basket(product.id) self.assertEqual(res.status_code, 200) return basket_id @patch('cybersource.signals.pre_build_auth_request.send') @patch('cybersource.signals.pre_calculate_auth_total.send') def test_request_auth_form_success(self, pre_calculate_auth_total, pre_build_auth_request): basket_id = self.prepare_basket() # Add some taxes to the basket def add_taxes(sender, basket, shipping_address, **kwargs): for line in basket.all_lines(): line.purchase_info.price.tax = D('0.42') pre_calculate_auth_total.side_effect = add_taxes # Add an extra field into the request def add_a_field(sender, extra_fields, request, basket, **kwargs): extra_fields['my_custom_field'] = 'ABC' pre_build_auth_request.side_effect = add_a_field # Pregenerate the order number session = self.client.session session[CHECKOUT_ORDER_NUM] = '10000042' session.save() cs_url, data = self.do_sign_auth_request(basket_id=basket_id) # CS URL should be correct self.assertEqual(cs_url, 'https://testsecureacceptance.cybersource.com/silent/pay') # Basket ID should be stored in the session session = self.client.session self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id) # Basket must be frozen basket = Basket.objects.get(id=basket_id) self.assertFalse(basket.can_be_edited) # Make sure each signal got called self.assertEqual(pre_calculate_auth_total.call_count, 1) self.assertEqual(pre_build_auth_request.call_count, 1) # Check response fields self.assertEquals(data['amount'], '10.42') self.assertEquals(data['bill_to_address_city'], 'Manhattan') self.assertEquals(data['bill_to_address_country'], 'US') self.assertEquals(data['bill_to_address_line1'], '234 5th Ave') self.assertEquals(data['bill_to_address_line2'], 'apt 5') self.assertEquals(data['bill_to_address_postal_code'], '10001') self.assertEquals(data['bill_to_address_state'], 'NY') self.assertEquals(data['bill_to_email'], '[email protected]') self.assertEquals(data['bill_to_forename'], 'Testy') self.assertEquals(data['bill_to_phone'], '17174671111') self.assertEquals(data['bill_to_surname'], 'McUnitTest') self.assertEquals(data['card_cvn'], '123') self.assertEquals(data['card_expiry_date'], '12-2017') self.assertEquals(data['card_number'], '4111111111111111') self.assertEquals(data['card_type'], '001') self.assertEquals(data['currency'], 'USD') self.assertEquals(data['customer_ip_address'], '127.0.0.1') self.assertEquals(data['device_fingerprint_id'], '') self.assertEquals(data['item_0_name'], 'My Product') self.assertEquals(data['item_0_quantity'], '1') self.assertEquals(data['item_0_sku'], basket.all_lines()[0].stockrecord.partner_sku) self.assertEquals(data['item_0_unit_price'], '10.42') self.assertEquals(data['line_item_count'], '1') self.assertEquals(data['locale'], 'en') self.assertEquals(data['my_custom_field'], 'ABC') self.assertEquals(data['payment_method'], 'card') self.assertEquals(data['reference_number'], '10000042') self.assertEquals(data['ship_to_address_city'], 'Manhattan') self.assertEquals(data['ship_to_address_country'], 'US') self.assertEquals(data['ship_to_address_line1'], '234 5th Ave') self.assertEquals(data['ship_to_address_line2'], '') self.assertEquals(data['ship_to_address_postal_code'], '10001') self.assertEquals(data['ship_to_address_state'], 'NY') self.assertEquals(data['ship_to_forename'], 'fadsf') self.assertEquals(data['ship_to_phone'], '17174671111') self.assertEquals(data['ship_to_surname'], 'fad') self.assertEquals(data['transaction_type'], 'authorization,create_payment_token')
normal
{ "blob_id": "9155b3eed8ac79b94a033801dbf142392b50720b", "index": 5123, "step-1": "<mask token>\n\n\nclass CheckoutIntegrationTest(BaseCheckoutTest):\n <mask token>\n\n def test_checkout_process(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n def test_add_product_during_auth(self):\n \"\"\"Test attempting to add a product during the authorize flow\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)\n\n def test_pay_for_nothing(self):\n \"\"\"Test attempting to pay for an empty basket\"\"\"\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n data = {'guest_email': '[email protected]', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=['US']\n ), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_pre_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting an auth form\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n url = reverse('cybersource-sign-auth-request')\n data = {'guest_email': '[email protected]', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'total': '2.00',\n 'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',\n 'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':\n '10001', 'state': 'NY', 'country': reverse('country-detail',\n args=['US']), 'phone_number': '+1 (717) 467-1111'}}\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n <mask token>\n <mask token>\n\n\nclass CSReplyViewTest(BaseCheckoutTest):\n \"\"\"Test the CybersourceReplyView with fixtured requests\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket and session like SignAuthorizePaymentFormView would normally\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n session = self.client.session\n session[CHECKOUT_BASKET_ID] = basket_id\n session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))\n session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'\n session.save()\n return session, basket_id, session[CHECKOUT_ORDER_NUM]\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_signature(self, order_placed):\n \"\"\"Invalid signature should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n data['signature'] = 'abcdef'\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_request_type(self, order_placed):\n \"\"\"Bad request type should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data['req_transaction_type'] = 'payment',\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_duplicate_transaction_id(self, order_placed):\n \"\"\"Duplicate Transaction ID should result in redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_reference_number(self, order_placed):\n \"\"\"Mismatched reference number should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number + 'ABC')\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_missing_basket(self, order_placed):\n \"\"\"Missing basket should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n del session[CHECKOUT_BASKET_ID]\n session.save()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_declined_card(self, order_placed):\n \"\"\"Declined card should should result in redirect to failure page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'),\n fetch_redirect_response=False)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_success(self, order_placed):\n \"\"\"Successful authorization should create an order and redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(len(mail.outbox), 1, 'Should send email')\n self.assertEqual(order_placed.call_count, 1,\n 'Should trigger order_placed signal')\n order = order_placed.call_args[1]['order']\n self.assertEqual(order.status, 'Authorized', 'Should set order status')\n self.assertEqual(order.basket.id, basket_id,\n 'Should use basket from session')\n self.assertEqual(order.number, order_number,\n 'Should use order number from CS request')\n session = self.client.session\n self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,\n 'Should save order_id in session')\n self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'\n )\n source = order.sources.first()\n self.assertEqual(source.currency, 'USD')\n self.assertEqual(source.amount_allocated, D('99.99'))\n self.assertEqual(source.amount_refunded, D('0.00'))\n self.assertEqual(source.amount_debited, D('0.00'))\n self.assertEqual(source.transactions.count(), 1,\n 'Should save Transaction')\n transaction = source.transactions.first()\n self.assertEqual(transaction.log.data, data)\n self.assertEqual(transaction.token.log, transaction.log)\n self.assertEqual(transaction.token.masked_card_number,\n 'xxxxxxxxxxxx1111')\n self.assertEqual(transaction.token.card_type, '001')\n self.assertEqual(transaction.txn_type, 'Authorise')\n self.assertEqual(transaction.amount, D('99.99'))\n self.assertEqual(transaction.reference, data['transaction_id'])\n self.assertEqual(transaction.status, 'ACCEPT')\n self.assertEqual(transaction.request_token, data['request_token'])\n self.assertEqual(order.payment_events.count(), 1,\n 'Should save PaymentEvent')\n event = order.payment_events.first()\n self.assertEqual(event.amount, D('99.99'))\n self.assertEqual(event.reference, data['transaction_id'])\n self.assertEqual(event.event_type.name, 'Authorise')\n self.assertEqual(event.line_quantities.count(), 1,\n 'Should save PaymentEventQuantity')\n lq = event.line_quantities.first()\n self.assertEqual(lq.line, order.lines.first())\n self.assertEqual(lq.quantity, 1)\n\n\nclass AuthPaymentFormViewTest(BaseCheckoutTest):\n \"\"\"Test the SignAuthorizePaymentFormView\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket so that we can pay for it\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n return basket_id\n\n @patch('cybersource.signals.pre_build_auth_request.send')\n @patch('cybersource.signals.pre_calculate_auth_total.send')\n def test_request_auth_form_success(self, pre_calculate_auth_total,\n pre_build_auth_request):\n basket_id = self.prepare_basket()\n\n def add_taxes(sender, basket, shipping_address, **kwargs):\n for line in basket.all_lines():\n line.purchase_info.price.tax = D('0.42')\n pre_calculate_auth_total.side_effect = add_taxes\n\n def add_a_field(sender, extra_fields, request, basket, **kwargs):\n extra_fields['my_custom_field'] = 'ABC'\n pre_build_auth_request.side_effect = add_a_field\n session = self.client.session\n session[CHECKOUT_ORDER_NUM] = '10000042'\n session.save()\n cs_url, data = self.do_sign_auth_request(basket_id=basket_id)\n self.assertEqual(cs_url,\n 'https://testsecureacceptance.cybersource.com/silent/pay')\n session = self.client.session\n self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)\n basket = Basket.objects.get(id=basket_id)\n self.assertFalse(basket.can_be_edited)\n self.assertEqual(pre_calculate_auth_total.call_count, 1)\n self.assertEqual(pre_build_auth_request.call_count, 1)\n self.assertEquals(data['amount'], '10.42')\n self.assertEquals(data['bill_to_address_city'], 'Manhattan')\n self.assertEquals(data['bill_to_address_country'], 'US')\n self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['bill_to_address_line2'], 'apt 5')\n self.assertEquals(data['bill_to_address_postal_code'], '10001')\n self.assertEquals(data['bill_to_address_state'], 'NY')\n self.assertEquals(data['bill_to_email'], '[email protected]')\n self.assertEquals(data['bill_to_forename'], 'Testy')\n self.assertEquals(data['bill_to_phone'], '17174671111')\n self.assertEquals(data['bill_to_surname'], 'McUnitTest')\n self.assertEquals(data['card_cvn'], '123')\n self.assertEquals(data['card_expiry_date'], '12-2017')\n self.assertEquals(data['card_number'], '4111111111111111')\n self.assertEquals(data['card_type'], '001')\n self.assertEquals(data['currency'], 'USD')\n self.assertEquals(data['customer_ip_address'], '127.0.0.1')\n self.assertEquals(data['device_fingerprint_id'], '')\n self.assertEquals(data['item_0_name'], 'My Product')\n self.assertEquals(data['item_0_quantity'], '1')\n self.assertEquals(data['item_0_sku'], basket.all_lines()[0].\n stockrecord.partner_sku)\n self.assertEquals(data['item_0_unit_price'], '10.42')\n self.assertEquals(data['line_item_count'], '1')\n self.assertEquals(data['locale'], 'en')\n self.assertEquals(data['my_custom_field'], 'ABC')\n self.assertEquals(data['payment_method'], 'card')\n self.assertEquals(data['reference_number'], '10000042')\n self.assertEquals(data['ship_to_address_city'], 'Manhattan')\n self.assertEquals(data['ship_to_address_country'], 'US')\n self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['ship_to_address_line2'], '')\n self.assertEquals(data['ship_to_address_postal_code'], '10001')\n self.assertEquals(data['ship_to_address_state'], 'NY')\n self.assertEquals(data['ship_to_forename'], 'fadsf')\n self.assertEquals(data['ship_to_phone'], '17174671111')\n self.assertEquals(data['ship_to_surname'], 'fad')\n self.assertEquals(data['transaction_type'],\n 'authorization,create_payment_token')\n", "step-2": "<mask token>\n\n\nclass BaseCheckoutTest(APITestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def do_get_basket(self):\n url = reverse('api-basket')\n return self.client.get(url)\n <mask token>\n <mask token>\n\n def check_finished_order(self, number, product_id, quantity=1):\n self.assertEqual(Order.objects.all().count(), 1)\n order = Order.objects.get()\n self.assertEqual(order.number, number)\n lines = order.lines.all()\n self.assertEqual(lines.count(), 1)\n line = lines[0]\n self.assertEqual(line.quantity, quantity)\n self.assertEqual(line.product_id, product_id)\n payment_events = order.payment_events.filter(event_type__name=\n 'Authorise')\n self.assertEqual(payment_events.count(), 1)\n self.assertEqual(payment_events[0].amount, order.total_incl_tax)\n payment_sources = order.sources.all()\n self.assertEqual(payment_sources.count(), 1)\n self.assertEqual(payment_sources[0].currency, order.currency)\n self.assertEqual(payment_sources[0].amount_allocated, order.\n total_incl_tax)\n self.assertEqual(payment_sources[0].amount_debited, D('0.00'))\n self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))\n transactions = payment_sources[0].transactions.all()\n self.assertEqual(transactions.count(), 1)\n self.assertEqual(transactions[0].txn_type, 'Authorise')\n self.assertEqual(transactions[0].amount, order.total_incl_tax)\n self.assertEqual(transactions[0].status, 'ACCEPT')\n self.assertEqual(transactions[0].log_field('req_reference_number'),\n order.number)\n self.assertEqual(transactions[0].token.card_last4, '1111')\n self.assertEqual(len(mail.outbox), 1)\n\n\nclass CheckoutIntegrationTest(BaseCheckoutTest):\n \"\"\"Full Integration Test of Checkout\"\"\"\n\n def test_checkout_process(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n def test_add_product_during_auth(self):\n \"\"\"Test attempting to add a product during the authorize flow\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)\n\n def test_pay_for_nothing(self):\n \"\"\"Test attempting to pay for an empty basket\"\"\"\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n data = {'guest_email': '[email protected]', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=['US']\n ), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_pre_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting an auth form\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n url = reverse('cybersource-sign-auth-request')\n data = {'guest_email': '[email protected]', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'total': '2.00',\n 'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',\n 'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':\n '10001', 'state': 'NY', 'country': reverse('country-detail',\n args=['US']), 'phone_number': '+1 (717) 467-1111'}}\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_during_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting auth from CyberSource\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n cs_data['amount'] = '2.00'\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 403)\n\n def test_free_product(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product(price=D('0.00'))\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n self.assertEqual(cs_data['amount'], '0.00')\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n\nclass CSReplyViewTest(BaseCheckoutTest):\n \"\"\"Test the CybersourceReplyView with fixtured requests\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket and session like SignAuthorizePaymentFormView would normally\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n session = self.client.session\n session[CHECKOUT_BASKET_ID] = basket_id\n session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))\n session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'\n session.save()\n return session, basket_id, session[CHECKOUT_ORDER_NUM]\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_signature(self, order_placed):\n \"\"\"Invalid signature should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n data['signature'] = 'abcdef'\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_request_type(self, order_placed):\n \"\"\"Bad request type should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data['req_transaction_type'] = 'payment',\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_duplicate_transaction_id(self, order_placed):\n \"\"\"Duplicate Transaction ID should result in redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_reference_number(self, order_placed):\n \"\"\"Mismatched reference number should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number + 'ABC')\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_missing_basket(self, order_placed):\n \"\"\"Missing basket should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n del session[CHECKOUT_BASKET_ID]\n session.save()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_declined_card(self, order_placed):\n \"\"\"Declined card should should result in redirect to failure page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'),\n fetch_redirect_response=False)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_success(self, order_placed):\n \"\"\"Successful authorization should create an order and redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(len(mail.outbox), 1, 'Should send email')\n self.assertEqual(order_placed.call_count, 1,\n 'Should trigger order_placed signal')\n order = order_placed.call_args[1]['order']\n self.assertEqual(order.status, 'Authorized', 'Should set order status')\n self.assertEqual(order.basket.id, basket_id,\n 'Should use basket from session')\n self.assertEqual(order.number, order_number,\n 'Should use order number from CS request')\n session = self.client.session\n self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,\n 'Should save order_id in session')\n self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'\n )\n source = order.sources.first()\n self.assertEqual(source.currency, 'USD')\n self.assertEqual(source.amount_allocated, D('99.99'))\n self.assertEqual(source.amount_refunded, D('0.00'))\n self.assertEqual(source.amount_debited, D('0.00'))\n self.assertEqual(source.transactions.count(), 1,\n 'Should save Transaction')\n transaction = source.transactions.first()\n self.assertEqual(transaction.log.data, data)\n self.assertEqual(transaction.token.log, transaction.log)\n self.assertEqual(transaction.token.masked_card_number,\n 'xxxxxxxxxxxx1111')\n self.assertEqual(transaction.token.card_type, '001')\n self.assertEqual(transaction.txn_type, 'Authorise')\n self.assertEqual(transaction.amount, D('99.99'))\n self.assertEqual(transaction.reference, data['transaction_id'])\n self.assertEqual(transaction.status, 'ACCEPT')\n self.assertEqual(transaction.request_token, data['request_token'])\n self.assertEqual(order.payment_events.count(), 1,\n 'Should save PaymentEvent')\n event = order.payment_events.first()\n self.assertEqual(event.amount, D('99.99'))\n self.assertEqual(event.reference, data['transaction_id'])\n self.assertEqual(event.event_type.name, 'Authorise')\n self.assertEqual(event.line_quantities.count(), 1,\n 'Should save PaymentEventQuantity')\n lq = event.line_quantities.first()\n self.assertEqual(lq.line, order.lines.first())\n self.assertEqual(lq.quantity, 1)\n\n\nclass AuthPaymentFormViewTest(BaseCheckoutTest):\n \"\"\"Test the SignAuthorizePaymentFormView\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket so that we can pay for it\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n return basket_id\n\n @patch('cybersource.signals.pre_build_auth_request.send')\n @patch('cybersource.signals.pre_calculate_auth_total.send')\n def test_request_auth_form_success(self, pre_calculate_auth_total,\n pre_build_auth_request):\n basket_id = self.prepare_basket()\n\n def add_taxes(sender, basket, shipping_address, **kwargs):\n for line in basket.all_lines():\n line.purchase_info.price.tax = D('0.42')\n pre_calculate_auth_total.side_effect = add_taxes\n\n def add_a_field(sender, extra_fields, request, basket, **kwargs):\n extra_fields['my_custom_field'] = 'ABC'\n pre_build_auth_request.side_effect = add_a_field\n session = self.client.session\n session[CHECKOUT_ORDER_NUM] = '10000042'\n session.save()\n cs_url, data = self.do_sign_auth_request(basket_id=basket_id)\n self.assertEqual(cs_url,\n 'https://testsecureacceptance.cybersource.com/silent/pay')\n session = self.client.session\n self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)\n basket = Basket.objects.get(id=basket_id)\n self.assertFalse(basket.can_be_edited)\n self.assertEqual(pre_calculate_auth_total.call_count, 1)\n self.assertEqual(pre_build_auth_request.call_count, 1)\n self.assertEquals(data['amount'], '10.42')\n self.assertEquals(data['bill_to_address_city'], 'Manhattan')\n self.assertEquals(data['bill_to_address_country'], 'US')\n self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['bill_to_address_line2'], 'apt 5')\n self.assertEquals(data['bill_to_address_postal_code'], '10001')\n self.assertEquals(data['bill_to_address_state'], 'NY')\n self.assertEquals(data['bill_to_email'], '[email protected]')\n self.assertEquals(data['bill_to_forename'], 'Testy')\n self.assertEquals(data['bill_to_phone'], '17174671111')\n self.assertEquals(data['bill_to_surname'], 'McUnitTest')\n self.assertEquals(data['card_cvn'], '123')\n self.assertEquals(data['card_expiry_date'], '12-2017')\n self.assertEquals(data['card_number'], '4111111111111111')\n self.assertEquals(data['card_type'], '001')\n self.assertEquals(data['currency'], 'USD')\n self.assertEquals(data['customer_ip_address'], '127.0.0.1')\n self.assertEquals(data['device_fingerprint_id'], '')\n self.assertEquals(data['item_0_name'], 'My Product')\n self.assertEquals(data['item_0_quantity'], '1')\n self.assertEquals(data['item_0_sku'], basket.all_lines()[0].\n stockrecord.partner_sku)\n self.assertEquals(data['item_0_unit_price'], '10.42')\n self.assertEquals(data['line_item_count'], '1')\n self.assertEquals(data['locale'], 'en')\n self.assertEquals(data['my_custom_field'], 'ABC')\n self.assertEquals(data['payment_method'], 'card')\n self.assertEquals(data['reference_number'], '10000042')\n self.assertEquals(data['ship_to_address_city'], 'Manhattan')\n self.assertEquals(data['ship_to_address_country'], 'US')\n self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['ship_to_address_line2'], '')\n self.assertEquals(data['ship_to_address_postal_code'], '10001')\n self.assertEquals(data['ship_to_address_state'], 'NY')\n self.assertEquals(data['ship_to_forename'], 'fadsf')\n self.assertEquals(data['ship_to_phone'], '17174671111')\n self.assertEquals(data['ship_to_surname'], 'fad')\n self.assertEquals(data['transaction_type'],\n 'authorization,create_payment_token')\n", "step-3": "<mask token>\n\n\nclass BaseCheckoutTest(APITestCase):\n <mask token>\n\n def create_product(self, price=D('10.00')):\n product = factories.create_product(title='My Product',\n product_class='My Product Class')\n record = factories.create_stockrecord(currency='USD', product=\n product, num_in_stock=10, price_excl_tax=price)\n factories.create_purchase_info(record)\n return product\n\n def do_add_to_basket(self, product_id, quantity=1):\n url = reverse('api-basket-add-product')\n data = {'url': reverse('product-detail', args=[product_id]),\n 'quantity': quantity}\n return self.client.post(url, data)\n\n def do_get_basket(self):\n url = reverse('api-basket')\n return self.client.get(url)\n\n def do_sign_auth_request(self, basket_id=None, data=None):\n if data is None:\n data = {'guest_email': '[email protected]', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=[\n 'US']), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 200)\n next_year = datetime.date.today().year + 1\n cs_data = {'card_type': '001', 'card_number': '4111111111111111',\n 'card_cvn': '123', 'card_expiry_date': '12-{}'.format(next_year\n ), 'bill_to_forename': 'Testy', 'bill_to_surname': 'McUnitTest',\n 'bill_to_address_line1': '234 5th Ave', 'bill_to_address_line2':\n 'apt 5', 'bill_to_address_city': 'Manhattan',\n 'bill_to_address_state': 'NY', 'bill_to_address_postal_code':\n '10001', 'bill_to_address_country': 'US', 'bill_to_phone':\n '17174671111'}\n for field in res.data['fields']:\n if not field['editable'] or field['key'] not in cs_data:\n cs_data[field['key']] = field['value']\n cs_url = res.data['url']\n return cs_url, cs_data\n\n def do_cybersource_post(self, cs_url, cs_data):\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 200)\n soup = BeautifulSoup(res.content, 'html.parser')\n form_data = {}\n for element in soup.find_all('input'):\n form_data[element['name']] = element['value']\n url = reverse('cybersource-reply')\n return self.client.post(url, form_data)\n\n def check_finished_order(self, number, product_id, quantity=1):\n self.assertEqual(Order.objects.all().count(), 1)\n order = Order.objects.get()\n self.assertEqual(order.number, number)\n lines = order.lines.all()\n self.assertEqual(lines.count(), 1)\n line = lines[0]\n self.assertEqual(line.quantity, quantity)\n self.assertEqual(line.product_id, product_id)\n payment_events = order.payment_events.filter(event_type__name=\n 'Authorise')\n self.assertEqual(payment_events.count(), 1)\n self.assertEqual(payment_events[0].amount, order.total_incl_tax)\n payment_sources = order.sources.all()\n self.assertEqual(payment_sources.count(), 1)\n self.assertEqual(payment_sources[0].currency, order.currency)\n self.assertEqual(payment_sources[0].amount_allocated, order.\n total_incl_tax)\n self.assertEqual(payment_sources[0].amount_debited, D('0.00'))\n self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))\n transactions = payment_sources[0].transactions.all()\n self.assertEqual(transactions.count(), 1)\n self.assertEqual(transactions[0].txn_type, 'Authorise')\n self.assertEqual(transactions[0].amount, order.total_incl_tax)\n self.assertEqual(transactions[0].status, 'ACCEPT')\n self.assertEqual(transactions[0].log_field('req_reference_number'),\n order.number)\n self.assertEqual(transactions[0].token.card_last4, '1111')\n self.assertEqual(len(mail.outbox), 1)\n\n\nclass CheckoutIntegrationTest(BaseCheckoutTest):\n \"\"\"Full Integration Test of Checkout\"\"\"\n\n def test_checkout_process(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n def test_add_product_during_auth(self):\n \"\"\"Test attempting to add a product during the authorize flow\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)\n\n def test_pay_for_nothing(self):\n \"\"\"Test attempting to pay for an empty basket\"\"\"\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n data = {'guest_email': '[email protected]', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=['US']\n ), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_pre_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting an auth form\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n url = reverse('cybersource-sign-auth-request')\n data = {'guest_email': '[email protected]', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'total': '2.00',\n 'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',\n 'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':\n '10001', 'state': 'NY', 'country': reverse('country-detail',\n args=['US']), 'phone_number': '+1 (717) 467-1111'}}\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_during_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting auth from CyberSource\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n cs_data['amount'] = '2.00'\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 403)\n\n def test_free_product(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product(price=D('0.00'))\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n self.assertEqual(cs_data['amount'], '0.00')\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n\nclass CSReplyViewTest(BaseCheckoutTest):\n \"\"\"Test the CybersourceReplyView with fixtured requests\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket and session like SignAuthorizePaymentFormView would normally\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n session = self.client.session\n session[CHECKOUT_BASKET_ID] = basket_id\n session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))\n session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'\n session.save()\n return session, basket_id, session[CHECKOUT_ORDER_NUM]\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_signature(self, order_placed):\n \"\"\"Invalid signature should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n data['signature'] = 'abcdef'\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_request_type(self, order_placed):\n \"\"\"Bad request type should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data['req_transaction_type'] = 'payment',\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_duplicate_transaction_id(self, order_placed):\n \"\"\"Duplicate Transaction ID should result in redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_reference_number(self, order_placed):\n \"\"\"Mismatched reference number should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number + 'ABC')\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_missing_basket(self, order_placed):\n \"\"\"Missing basket should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n del session[CHECKOUT_BASKET_ID]\n session.save()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_declined_card(self, order_placed):\n \"\"\"Declined card should should result in redirect to failure page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'),\n fetch_redirect_response=False)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_success(self, order_placed):\n \"\"\"Successful authorization should create an order and redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(len(mail.outbox), 1, 'Should send email')\n self.assertEqual(order_placed.call_count, 1,\n 'Should trigger order_placed signal')\n order = order_placed.call_args[1]['order']\n self.assertEqual(order.status, 'Authorized', 'Should set order status')\n self.assertEqual(order.basket.id, basket_id,\n 'Should use basket from session')\n self.assertEqual(order.number, order_number,\n 'Should use order number from CS request')\n session = self.client.session\n self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,\n 'Should save order_id in session')\n self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'\n )\n source = order.sources.first()\n self.assertEqual(source.currency, 'USD')\n self.assertEqual(source.amount_allocated, D('99.99'))\n self.assertEqual(source.amount_refunded, D('0.00'))\n self.assertEqual(source.amount_debited, D('0.00'))\n self.assertEqual(source.transactions.count(), 1,\n 'Should save Transaction')\n transaction = source.transactions.first()\n self.assertEqual(transaction.log.data, data)\n self.assertEqual(transaction.token.log, transaction.log)\n self.assertEqual(transaction.token.masked_card_number,\n 'xxxxxxxxxxxx1111')\n self.assertEqual(transaction.token.card_type, '001')\n self.assertEqual(transaction.txn_type, 'Authorise')\n self.assertEqual(transaction.amount, D('99.99'))\n self.assertEqual(transaction.reference, data['transaction_id'])\n self.assertEqual(transaction.status, 'ACCEPT')\n self.assertEqual(transaction.request_token, data['request_token'])\n self.assertEqual(order.payment_events.count(), 1,\n 'Should save PaymentEvent')\n event = order.payment_events.first()\n self.assertEqual(event.amount, D('99.99'))\n self.assertEqual(event.reference, data['transaction_id'])\n self.assertEqual(event.event_type.name, 'Authorise')\n self.assertEqual(event.line_quantities.count(), 1,\n 'Should save PaymentEventQuantity')\n lq = event.line_quantities.first()\n self.assertEqual(lq.line, order.lines.first())\n self.assertEqual(lq.quantity, 1)\n\n\nclass AuthPaymentFormViewTest(BaseCheckoutTest):\n \"\"\"Test the SignAuthorizePaymentFormView\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket so that we can pay for it\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n return basket_id\n\n @patch('cybersource.signals.pre_build_auth_request.send')\n @patch('cybersource.signals.pre_calculate_auth_total.send')\n def test_request_auth_form_success(self, pre_calculate_auth_total,\n pre_build_auth_request):\n basket_id = self.prepare_basket()\n\n def add_taxes(sender, basket, shipping_address, **kwargs):\n for line in basket.all_lines():\n line.purchase_info.price.tax = D('0.42')\n pre_calculate_auth_total.side_effect = add_taxes\n\n def add_a_field(sender, extra_fields, request, basket, **kwargs):\n extra_fields['my_custom_field'] = 'ABC'\n pre_build_auth_request.side_effect = add_a_field\n session = self.client.session\n session[CHECKOUT_ORDER_NUM] = '10000042'\n session.save()\n cs_url, data = self.do_sign_auth_request(basket_id=basket_id)\n self.assertEqual(cs_url,\n 'https://testsecureacceptance.cybersource.com/silent/pay')\n session = self.client.session\n self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)\n basket = Basket.objects.get(id=basket_id)\n self.assertFalse(basket.can_be_edited)\n self.assertEqual(pre_calculate_auth_total.call_count, 1)\n self.assertEqual(pre_build_auth_request.call_count, 1)\n self.assertEquals(data['amount'], '10.42')\n self.assertEquals(data['bill_to_address_city'], 'Manhattan')\n self.assertEquals(data['bill_to_address_country'], 'US')\n self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['bill_to_address_line2'], 'apt 5')\n self.assertEquals(data['bill_to_address_postal_code'], '10001')\n self.assertEquals(data['bill_to_address_state'], 'NY')\n self.assertEquals(data['bill_to_email'], '[email protected]')\n self.assertEquals(data['bill_to_forename'], 'Testy')\n self.assertEquals(data['bill_to_phone'], '17174671111')\n self.assertEquals(data['bill_to_surname'], 'McUnitTest')\n self.assertEquals(data['card_cvn'], '123')\n self.assertEquals(data['card_expiry_date'], '12-2017')\n self.assertEquals(data['card_number'], '4111111111111111')\n self.assertEquals(data['card_type'], '001')\n self.assertEquals(data['currency'], 'USD')\n self.assertEquals(data['customer_ip_address'], '127.0.0.1')\n self.assertEquals(data['device_fingerprint_id'], '')\n self.assertEquals(data['item_0_name'], 'My Product')\n self.assertEquals(data['item_0_quantity'], '1')\n self.assertEquals(data['item_0_sku'], basket.all_lines()[0].\n stockrecord.partner_sku)\n self.assertEquals(data['item_0_unit_price'], '10.42')\n self.assertEquals(data['line_item_count'], '1')\n self.assertEquals(data['locale'], 'en')\n self.assertEquals(data['my_custom_field'], 'ABC')\n self.assertEquals(data['payment_method'], 'card')\n self.assertEquals(data['reference_number'], '10000042')\n self.assertEquals(data['ship_to_address_city'], 'Manhattan')\n self.assertEquals(data['ship_to_address_country'], 'US')\n self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['ship_to_address_line2'], '')\n self.assertEquals(data['ship_to_address_postal_code'], '10001')\n self.assertEquals(data['ship_to_address_state'], 'NY')\n self.assertEquals(data['ship_to_forename'], 'fadsf')\n self.assertEquals(data['ship_to_phone'], '17174671111')\n self.assertEquals(data['ship_to_surname'], 'fad')\n self.assertEquals(data['transaction_type'],\n 'authorization,create_payment_token')\n", "step-4": "from bs4 import BeautifulSoup\nfrom cybersource.constants import CHECKOUT_BASKET_ID, CHECKOUT_ORDER_NUM, CHECKOUT_SHIPPING_CODE, CHECKOUT_ORDER_ID\nfrom cybersource.tests import factories as cs_factories\nfrom decimal import Decimal as D\nfrom django.core import mail\nfrom django.core.urlresolvers import reverse\nfrom mock import patch\nfrom oscar.core.loading import get_class, get_model\nfrom oscar.test import factories\nfrom random import randrange\nfrom rest_framework.test import APITestCase\nimport datetime\nimport requests\nBasket = get_model('basket', 'Basket')\nProduct = get_model('catalogue', 'Product')\nOrder = get_model('order', 'Order')\n\n\nclass BaseCheckoutTest(APITestCase):\n fixtures = ['cybersource-test.yaml']\n\n def create_product(self, price=D('10.00')):\n product = factories.create_product(title='My Product',\n product_class='My Product Class')\n record = factories.create_stockrecord(currency='USD', product=\n product, num_in_stock=10, price_excl_tax=price)\n factories.create_purchase_info(record)\n return product\n\n def do_add_to_basket(self, product_id, quantity=1):\n url = reverse('api-basket-add-product')\n data = {'url': reverse('product-detail', args=[product_id]),\n 'quantity': quantity}\n return self.client.post(url, data)\n\n def do_get_basket(self):\n url = reverse('api-basket')\n return self.client.get(url)\n\n def do_sign_auth_request(self, basket_id=None, data=None):\n if data is None:\n data = {'guest_email': '[email protected]', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=[\n 'US']), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 200)\n next_year = datetime.date.today().year + 1\n cs_data = {'card_type': '001', 'card_number': '4111111111111111',\n 'card_cvn': '123', 'card_expiry_date': '12-{}'.format(next_year\n ), 'bill_to_forename': 'Testy', 'bill_to_surname': 'McUnitTest',\n 'bill_to_address_line1': '234 5th Ave', 'bill_to_address_line2':\n 'apt 5', 'bill_to_address_city': 'Manhattan',\n 'bill_to_address_state': 'NY', 'bill_to_address_postal_code':\n '10001', 'bill_to_address_country': 'US', 'bill_to_phone':\n '17174671111'}\n for field in res.data['fields']:\n if not field['editable'] or field['key'] not in cs_data:\n cs_data[field['key']] = field['value']\n cs_url = res.data['url']\n return cs_url, cs_data\n\n def do_cybersource_post(self, cs_url, cs_data):\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 200)\n soup = BeautifulSoup(res.content, 'html.parser')\n form_data = {}\n for element in soup.find_all('input'):\n form_data[element['name']] = element['value']\n url = reverse('cybersource-reply')\n return self.client.post(url, form_data)\n\n def check_finished_order(self, number, product_id, quantity=1):\n self.assertEqual(Order.objects.all().count(), 1)\n order = Order.objects.get()\n self.assertEqual(order.number, number)\n lines = order.lines.all()\n self.assertEqual(lines.count(), 1)\n line = lines[0]\n self.assertEqual(line.quantity, quantity)\n self.assertEqual(line.product_id, product_id)\n payment_events = order.payment_events.filter(event_type__name=\n 'Authorise')\n self.assertEqual(payment_events.count(), 1)\n self.assertEqual(payment_events[0].amount, order.total_incl_tax)\n payment_sources = order.sources.all()\n self.assertEqual(payment_sources.count(), 1)\n self.assertEqual(payment_sources[0].currency, order.currency)\n self.assertEqual(payment_sources[0].amount_allocated, order.\n total_incl_tax)\n self.assertEqual(payment_sources[0].amount_debited, D('0.00'))\n self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))\n transactions = payment_sources[0].transactions.all()\n self.assertEqual(transactions.count(), 1)\n self.assertEqual(transactions[0].txn_type, 'Authorise')\n self.assertEqual(transactions[0].amount, order.total_incl_tax)\n self.assertEqual(transactions[0].status, 'ACCEPT')\n self.assertEqual(transactions[0].log_field('req_reference_number'),\n order.number)\n self.assertEqual(transactions[0].token.card_last4, '1111')\n self.assertEqual(len(mail.outbox), 1)\n\n\nclass CheckoutIntegrationTest(BaseCheckoutTest):\n \"\"\"Full Integration Test of Checkout\"\"\"\n\n def test_checkout_process(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n def test_add_product_during_auth(self):\n \"\"\"Test attempting to add a product during the authorize flow\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)\n\n def test_pay_for_nothing(self):\n \"\"\"Test attempting to pay for an empty basket\"\"\"\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n data = {'guest_email': '[email protected]', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=['US']\n ), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_pre_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting an auth form\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n url = reverse('cybersource-sign-auth-request')\n data = {'guest_email': '[email protected]', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'total': '2.00',\n 'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',\n 'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':\n '10001', 'state': 'NY', 'country': reverse('country-detail',\n args=['US']), 'phone_number': '+1 (717) 467-1111'}}\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_during_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting auth from CyberSource\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n cs_data['amount'] = '2.00'\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 403)\n\n def test_free_product(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product(price=D('0.00'))\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n self.assertEqual(cs_data['amount'], '0.00')\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n\nclass CSReplyViewTest(BaseCheckoutTest):\n \"\"\"Test the CybersourceReplyView with fixtured requests\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket and session like SignAuthorizePaymentFormView would normally\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n session = self.client.session\n session[CHECKOUT_BASKET_ID] = basket_id\n session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))\n session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'\n session.save()\n return session, basket_id, session[CHECKOUT_ORDER_NUM]\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_signature(self, order_placed):\n \"\"\"Invalid signature should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n data['signature'] = 'abcdef'\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_request_type(self, order_placed):\n \"\"\"Bad request type should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data['req_transaction_type'] = 'payment',\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_duplicate_transaction_id(self, order_placed):\n \"\"\"Duplicate Transaction ID should result in redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_reference_number(self, order_placed):\n \"\"\"Mismatched reference number should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number + 'ABC')\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_missing_basket(self, order_placed):\n \"\"\"Missing basket should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n del session[CHECKOUT_BASKET_ID]\n session.save()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_declined_card(self, order_placed):\n \"\"\"Declined card should should result in redirect to failure page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'),\n fetch_redirect_response=False)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_success(self, order_placed):\n \"\"\"Successful authorization should create an order and redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(len(mail.outbox), 1, 'Should send email')\n self.assertEqual(order_placed.call_count, 1,\n 'Should trigger order_placed signal')\n order = order_placed.call_args[1]['order']\n self.assertEqual(order.status, 'Authorized', 'Should set order status')\n self.assertEqual(order.basket.id, basket_id,\n 'Should use basket from session')\n self.assertEqual(order.number, order_number,\n 'Should use order number from CS request')\n session = self.client.session\n self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,\n 'Should save order_id in session')\n self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'\n )\n source = order.sources.first()\n self.assertEqual(source.currency, 'USD')\n self.assertEqual(source.amount_allocated, D('99.99'))\n self.assertEqual(source.amount_refunded, D('0.00'))\n self.assertEqual(source.amount_debited, D('0.00'))\n self.assertEqual(source.transactions.count(), 1,\n 'Should save Transaction')\n transaction = source.transactions.first()\n self.assertEqual(transaction.log.data, data)\n self.assertEqual(transaction.token.log, transaction.log)\n self.assertEqual(transaction.token.masked_card_number,\n 'xxxxxxxxxxxx1111')\n self.assertEqual(transaction.token.card_type, '001')\n self.assertEqual(transaction.txn_type, 'Authorise')\n self.assertEqual(transaction.amount, D('99.99'))\n self.assertEqual(transaction.reference, data['transaction_id'])\n self.assertEqual(transaction.status, 'ACCEPT')\n self.assertEqual(transaction.request_token, data['request_token'])\n self.assertEqual(order.payment_events.count(), 1,\n 'Should save PaymentEvent')\n event = order.payment_events.first()\n self.assertEqual(event.amount, D('99.99'))\n self.assertEqual(event.reference, data['transaction_id'])\n self.assertEqual(event.event_type.name, 'Authorise')\n self.assertEqual(event.line_quantities.count(), 1,\n 'Should save PaymentEventQuantity')\n lq = event.line_quantities.first()\n self.assertEqual(lq.line, order.lines.first())\n self.assertEqual(lq.quantity, 1)\n\n\nclass AuthPaymentFormViewTest(BaseCheckoutTest):\n \"\"\"Test the SignAuthorizePaymentFormView\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket so that we can pay for it\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n return basket_id\n\n @patch('cybersource.signals.pre_build_auth_request.send')\n @patch('cybersource.signals.pre_calculate_auth_total.send')\n def test_request_auth_form_success(self, pre_calculate_auth_total,\n pre_build_auth_request):\n basket_id = self.prepare_basket()\n\n def add_taxes(sender, basket, shipping_address, **kwargs):\n for line in basket.all_lines():\n line.purchase_info.price.tax = D('0.42')\n pre_calculate_auth_total.side_effect = add_taxes\n\n def add_a_field(sender, extra_fields, request, basket, **kwargs):\n extra_fields['my_custom_field'] = 'ABC'\n pre_build_auth_request.side_effect = add_a_field\n session = self.client.session\n session[CHECKOUT_ORDER_NUM] = '10000042'\n session.save()\n cs_url, data = self.do_sign_auth_request(basket_id=basket_id)\n self.assertEqual(cs_url,\n 'https://testsecureacceptance.cybersource.com/silent/pay')\n session = self.client.session\n self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)\n basket = Basket.objects.get(id=basket_id)\n self.assertFalse(basket.can_be_edited)\n self.assertEqual(pre_calculate_auth_total.call_count, 1)\n self.assertEqual(pre_build_auth_request.call_count, 1)\n self.assertEquals(data['amount'], '10.42')\n self.assertEquals(data['bill_to_address_city'], 'Manhattan')\n self.assertEquals(data['bill_to_address_country'], 'US')\n self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['bill_to_address_line2'], 'apt 5')\n self.assertEquals(data['bill_to_address_postal_code'], '10001')\n self.assertEquals(data['bill_to_address_state'], 'NY')\n self.assertEquals(data['bill_to_email'], '[email protected]')\n self.assertEquals(data['bill_to_forename'], 'Testy')\n self.assertEquals(data['bill_to_phone'], '17174671111')\n self.assertEquals(data['bill_to_surname'], 'McUnitTest')\n self.assertEquals(data['card_cvn'], '123')\n self.assertEquals(data['card_expiry_date'], '12-2017')\n self.assertEquals(data['card_number'], '4111111111111111')\n self.assertEquals(data['card_type'], '001')\n self.assertEquals(data['currency'], 'USD')\n self.assertEquals(data['customer_ip_address'], '127.0.0.1')\n self.assertEquals(data['device_fingerprint_id'], '')\n self.assertEquals(data['item_0_name'], 'My Product')\n self.assertEquals(data['item_0_quantity'], '1')\n self.assertEquals(data['item_0_sku'], basket.all_lines()[0].\n stockrecord.partner_sku)\n self.assertEquals(data['item_0_unit_price'], '10.42')\n self.assertEquals(data['line_item_count'], '1')\n self.assertEquals(data['locale'], 'en')\n self.assertEquals(data['my_custom_field'], 'ABC')\n self.assertEquals(data['payment_method'], 'card')\n self.assertEquals(data['reference_number'], '10000042')\n self.assertEquals(data['ship_to_address_city'], 'Manhattan')\n self.assertEquals(data['ship_to_address_country'], 'US')\n self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['ship_to_address_line2'], '')\n self.assertEquals(data['ship_to_address_postal_code'], '10001')\n self.assertEquals(data['ship_to_address_state'], 'NY')\n self.assertEquals(data['ship_to_forename'], 'fadsf')\n self.assertEquals(data['ship_to_phone'], '17174671111')\n self.assertEquals(data['ship_to_surname'], 'fad')\n self.assertEquals(data['transaction_type'],\n 'authorization,create_payment_token')\n", "step-5": "from bs4 import BeautifulSoup\nfrom cybersource.constants import CHECKOUT_BASKET_ID, CHECKOUT_ORDER_NUM, CHECKOUT_SHIPPING_CODE, CHECKOUT_ORDER_ID\nfrom cybersource.tests import factories as cs_factories\nfrom decimal import Decimal as D\nfrom django.core import mail\nfrom django.core.urlresolvers import reverse\nfrom mock import patch\nfrom oscar.core.loading import get_class, get_model\nfrom oscar.test import factories\nfrom random import randrange\nfrom rest_framework.test import APITestCase\nimport datetime\nimport requests # Needed for external calls!\n\nBasket = get_model('basket', 'Basket')\nProduct = get_model('catalogue', 'Product')\nOrder = get_model('order', 'Order')\n\n\nclass BaseCheckoutTest(APITestCase):\n fixtures = ['cybersource-test.yaml']\n\n def create_product(self, price=D('10.00')):\n product = factories.create_product(\n title='My Product',\n product_class='My Product Class')\n record = factories.create_stockrecord(\n currency='USD',\n product=product,\n num_in_stock=10,\n price_excl_tax=price)\n factories.create_purchase_info(record)\n return product\n\n def do_add_to_basket(self, product_id, quantity=1):\n url = reverse('api-basket-add-product')\n data = {\n \"url\": reverse('product-detail', args=[product_id]),\n \"quantity\": quantity\n }\n return self.client.post(url, data)\n\n def do_get_basket(self):\n url = reverse('api-basket')\n return self.client.get(url)\n\n def do_sign_auth_request(self, basket_id=None, data=None):\n if data is None:\n data = {\n \"guest_email\": \"[email protected]\",\n \"basket\": reverse('basket-detail', args=[basket_id]),\n \"shipping_address\": {\n \"first_name\": \"fadsf\",\n \"last_name\": \"fad\",\n \"line1\": \"234 5th Ave\",\n \"line4\": \"Manhattan\",\n \"postcode\": \"10001\",\n \"state\": \"NY\",\n \"country\": reverse('country-detail', args=['US']),\n \"phone_number\": \"+1 (717) 467-1111\",\n }\n }\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 200)\n\n next_year = datetime.date.today().year + 1\n cs_data = {\n 'card_type': '001',\n 'card_number': '4111111111111111',\n 'card_cvn': '123',\n 'card_expiry_date': '12-{}'.format(next_year),\n 'bill_to_forename': 'Testy',\n 'bill_to_surname': 'McUnitTest',\n 'bill_to_address_line1': '234 5th Ave',\n 'bill_to_address_line2': 'apt 5',\n 'bill_to_address_city': 'Manhattan',\n 'bill_to_address_state': 'NY',\n 'bill_to_address_postal_code': '10001',\n 'bill_to_address_country': 'US',\n 'bill_to_phone': '17174671111',\n }\n for field in res.data['fields']:\n if not field['editable'] or field['key'] not in cs_data:\n cs_data[field['key']] = field['value']\n cs_url = res.data['url']\n return cs_url, cs_data\n\n def do_cybersource_post(self, cs_url, cs_data):\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 200)\n\n soup = BeautifulSoup(res.content, 'html.parser')\n form_data = {}\n for element in soup.find_all('input'):\n form_data[element['name']] = element['value']\n\n # We have the data from cybersource, send it to our cybersource callback\n url = reverse('cybersource-reply')\n return self.client.post(url, form_data)\n\n def check_finished_order(self, number, product_id, quantity=1):\n # Order exists and was paid for\n self.assertEqual(Order.objects.all().count(), 1)\n order = Order.objects.get()\n self.assertEqual(order.number, number)\n\n lines = order.lines.all()\n self.assertEqual(lines.count(), 1)\n line = lines[0]\n self.assertEqual(line.quantity, quantity)\n self.assertEqual(line.product_id, product_id)\n\n payment_events = order.payment_events.filter(event_type__name=\"Authorise\")\n self.assertEqual(payment_events.count(), 1)\n self.assertEqual(payment_events[0].amount, order.total_incl_tax)\n\n payment_sources = order.sources.all()\n self.assertEqual(payment_sources.count(), 1)\n self.assertEqual(payment_sources[0].currency, order.currency)\n self.assertEqual(payment_sources[0].amount_allocated, order.total_incl_tax)\n self.assertEqual(payment_sources[0].amount_debited, D('0.00'))\n self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))\n\n transactions = payment_sources[0].transactions.all()\n self.assertEqual(transactions.count(), 1)\n self.assertEqual(transactions[0].txn_type, 'Authorise')\n self.assertEqual(transactions[0].amount, order.total_incl_tax)\n self.assertEqual(transactions[0].status, 'ACCEPT')\n\n self.assertEqual(transactions[0].log_field('req_reference_number'), order.number)\n self.assertEqual(transactions[0].token.card_last4, '1111')\n\n self.assertEqual(len(mail.outbox), 1)\n\n\n\n\nclass CheckoutIntegrationTest(BaseCheckoutTest):\n \"\"\"Full Integration Test of Checkout\"\"\"\n\n def test_checkout_process(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n\n def test_add_product_during_auth(self):\n \"\"\"Test attempting to add a product during the authorize flow\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n # Adding a product here should succeed\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n # Adding a product here should go to a new basket, not the one we're auth'ing\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n # Adding a product here should go to basket2, not basket1\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)\n\n\n def test_pay_for_nothing(self):\n \"\"\"Test attempting to pay for an empty basket\"\"\"\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n data = {\n \"guest_email\": \"[email protected]\",\n \"basket\": reverse('basket-detail', args=[basket_id]),\n \"shipping_address\": {\n \"first_name\": \"fadsf\",\n \"last_name\": \"fad\",\n \"line1\": \"234 5th Ave\",\n \"line4\": \"Manhattan\",\n \"postcode\": \"10001\",\n \"state\": \"NY\",\n \"country\": reverse('country-detail', args=['US']),\n \"phone_number\": \"+1 (717) 467-1111\",\n }\n }\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n\n def test_manipulate_total_pre_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting an auth form\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n\n url = reverse('cybersource-sign-auth-request')\n data = {\n \"guest_email\": \"[email protected]\",\n \"basket\": reverse('basket-detail', args=[basket_id]),\n \"total\": \"2.00\", # Try and get $10 of product for only $2\n \"shipping_address\": {\n \"first_name\": \"fadsf\",\n \"last_name\": \"fad\",\n \"line1\": \"234 5th Ave\",\n \"line4\": \"Manhattan\",\n \"postcode\": \"10001\",\n \"state\": \"NY\",\n \"country\": reverse('country-detail', args=['US']),\n \"phone_number\": \"+1 (717) 467-1111\",\n }\n }\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n\n def test_manipulate_total_during_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting auth from CyberSource\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n cs_data['amount'] = '2.00'\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 403)\n\n\n def test_free_product(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product(price=D('0.00'))\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n self.assertEqual(cs_data['amount'], '0.00')\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n\n\nclass CSReplyViewTest(BaseCheckoutTest):\n \"\"\"Test the CybersourceReplyView with fixtured requests\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket and session like SignAuthorizePaymentFormView would normally\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n session = self.client.session\n session[CHECKOUT_BASKET_ID] = basket_id\n session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))\n session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'\n session.save()\n return session, basket_id, session[CHECKOUT_ORDER_NUM]\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_signature(self, order_placed):\n \"\"\"Invalid signature should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n\n data['signature'] = 'abcdef'\n\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_request_type(self, order_placed):\n \"\"\"Bad request type should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n\n data[\"req_transaction_type\"] = \"payment\",\n\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_duplicate_transaction_id(self, order_placed):\n \"\"\"Duplicate Transaction ID should result in redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_reference_number(self, order_placed):\n \"\"\"Mismatched reference number should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number + 'ABC')\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_missing_basket(self, order_placed):\n \"\"\"Missing basket should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n del session[CHECKOUT_BASKET_ID]\n session.save()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_declined_card(self, order_placed):\n \"\"\"Declined card should should result in redirect to failure page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'), fetch_redirect_response=False)\n\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_success(self, order_placed):\n \"\"\"Successful authorization should create an order and redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n resp = self.client.post(url, data)\n\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n\n self.assertEqual(len(mail.outbox), 1, 'Should send email')\n self.assertEqual(order_placed.call_count, 1, 'Should trigger order_placed signal')\n\n order = order_placed.call_args[1]['order']\n self.assertEqual(order.status, 'Authorized', 'Should set order status')\n self.assertEqual(order.basket.id, basket_id, 'Should use basket from session')\n self.assertEqual(order.number, order_number, 'Should use order number from CS request')\n\n session = self.client.session\n self.assertEquals(session[CHECKOUT_ORDER_ID], order.id, 'Should save order_id in session')\n\n self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource')\n source = order.sources.first()\n self.assertEqual(source.currency, 'USD')\n self.assertEqual(source.amount_allocated, D('99.99'))\n self.assertEqual(source.amount_refunded, D('0.00'))\n self.assertEqual(source.amount_debited, D('0.00'))\n\n self.assertEqual(source.transactions.count(), 1, 'Should save Transaction')\n transaction = source.transactions.first()\n self.assertEqual(transaction.log.data, data)\n self.assertEqual(transaction.token.log, transaction.log)\n self.assertEqual(transaction.token.masked_card_number, 'xxxxxxxxxxxx1111')\n self.assertEqual(transaction.token.card_type, '001')\n self.assertEqual(transaction.txn_type, 'Authorise')\n self.assertEqual(transaction.amount, D('99.99'))\n self.assertEqual(transaction.reference, data['transaction_id'])\n self.assertEqual(transaction.status, 'ACCEPT')\n self.assertEqual(transaction.request_token, data['request_token'])\n\n self.assertEqual(order.payment_events.count(), 1, 'Should save PaymentEvent')\n event = order.payment_events.first()\n self.assertEqual(event.amount, D('99.99'))\n self.assertEqual(event.reference, data['transaction_id'])\n self.assertEqual(event.event_type.name, 'Authorise')\n\n self.assertEqual(event.line_quantities.count(), 1, 'Should save PaymentEventQuantity')\n lq = event.line_quantities.first()\n self.assertEqual(lq.line, order.lines.first())\n self.assertEqual(lq.quantity, 1)\n\n\n\nclass AuthPaymentFormViewTest(BaseCheckoutTest):\n \"\"\"Test the SignAuthorizePaymentFormView\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket so that we can pay for it\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n return basket_id\n\n\n @patch('cybersource.signals.pre_build_auth_request.send')\n @patch('cybersource.signals.pre_calculate_auth_total.send')\n def test_request_auth_form_success(self, pre_calculate_auth_total, pre_build_auth_request):\n basket_id = self.prepare_basket()\n\n # Add some taxes to the basket\n def add_taxes(sender, basket, shipping_address, **kwargs):\n for line in basket.all_lines():\n line.purchase_info.price.tax = D('0.42')\n pre_calculate_auth_total.side_effect = add_taxes\n\n # Add an extra field into the request\n def add_a_field(sender, extra_fields, request, basket, **kwargs):\n extra_fields['my_custom_field'] = 'ABC'\n pre_build_auth_request.side_effect = add_a_field\n\n # Pregenerate the order number\n session = self.client.session\n session[CHECKOUT_ORDER_NUM] = '10000042'\n session.save()\n\n cs_url, data = self.do_sign_auth_request(basket_id=basket_id)\n\n # CS URL should be correct\n self.assertEqual(cs_url, 'https://testsecureacceptance.cybersource.com/silent/pay')\n\n # Basket ID should be stored in the session\n session = self.client.session\n self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)\n\n # Basket must be frozen\n basket = Basket.objects.get(id=basket_id)\n self.assertFalse(basket.can_be_edited)\n\n # Make sure each signal got called\n self.assertEqual(pre_calculate_auth_total.call_count, 1)\n self.assertEqual(pre_build_auth_request.call_count, 1)\n\n # Check response fields\n self.assertEquals(data['amount'], '10.42')\n self.assertEquals(data['bill_to_address_city'], 'Manhattan')\n self.assertEquals(data['bill_to_address_country'], 'US')\n self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['bill_to_address_line2'], 'apt 5')\n self.assertEquals(data['bill_to_address_postal_code'], '10001')\n self.assertEquals(data['bill_to_address_state'], 'NY')\n self.assertEquals(data['bill_to_email'], '[email protected]')\n self.assertEquals(data['bill_to_forename'], 'Testy')\n self.assertEquals(data['bill_to_phone'], '17174671111')\n self.assertEquals(data['bill_to_surname'], 'McUnitTest')\n self.assertEquals(data['card_cvn'], '123')\n self.assertEquals(data['card_expiry_date'], '12-2017')\n self.assertEquals(data['card_number'], '4111111111111111')\n self.assertEquals(data['card_type'], '001')\n self.assertEquals(data['currency'], 'USD')\n self.assertEquals(data['customer_ip_address'], '127.0.0.1')\n self.assertEquals(data['device_fingerprint_id'], '')\n self.assertEquals(data['item_0_name'], 'My Product')\n self.assertEquals(data['item_0_quantity'], '1')\n self.assertEquals(data['item_0_sku'], basket.all_lines()[0].stockrecord.partner_sku)\n self.assertEquals(data['item_0_unit_price'], '10.42')\n self.assertEquals(data['line_item_count'], '1')\n self.assertEquals(data['locale'], 'en')\n self.assertEquals(data['my_custom_field'], 'ABC')\n self.assertEquals(data['payment_method'], 'card')\n self.assertEquals(data['reference_number'], '10000042')\n self.assertEquals(data['ship_to_address_city'], 'Manhattan')\n self.assertEquals(data['ship_to_address_country'], 'US')\n self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['ship_to_address_line2'], '')\n self.assertEquals(data['ship_to_address_postal_code'], '10001')\n self.assertEquals(data['ship_to_address_state'], 'NY')\n self.assertEquals(data['ship_to_forename'], 'fadsf')\n self.assertEquals(data['ship_to_phone'], '17174671111')\n self.assertEquals(data['ship_to_surname'], 'fad')\n self.assertEquals(data['transaction_type'], 'authorization,create_payment_token')\n", "step-ids": [ 19, 25, 29, 32, 33 ] }
[ 19, 25, 29, 32, 33 ]
# -*- coding: utf-8 -*- from copy import copy from openprocurement.api.utils import ( json_view, context_unpack, APIResource, get_now, ) from openprocurement.tender.core.utils import save_tender, apply_patch from openprocurement.tender.core.validation import ( validate_requirement_data, validate_patch_requirement_data, validate_operation_ecriteria_objects, validate_patch_exclusion_ecriteria_objects, validate_change_requirement_objects, validate_put_requirement_objects, ) class BaseTenderCriteriaRGRequirementResource(APIResource): @json_view( content_type="application/json", validators=( validate_operation_ecriteria_objects, validate_patch_exclusion_ecriteria_objects, validate_requirement_data, ), permission="edit_tender" ) def collection_post(self): requirement = self.request.validated["requirement"] self.request.context.requirements.append(requirement) tender = self.request.validated["tender"] if ( self.request.authenticated_role == "tender_owner" and tender.status == "active.tendering" and hasattr(tender, "invalidate_bids_data") ): tender.invalidate_bids_data() if save_tender(self.request): self.LOGGER.info( "Created requirement group requirement {}".format(requirement.id), extra=context_unpack( self.request, {"MESSAGE_ID": "requirement_group_requirement_create"}, {"requirement_id": requirement.id}, ), ) self.request.response.status = 201 self.request.response.headers["Location"] = self.request.route_url( "{}:Requirement Group Requirement".format(self.request.validated["tender"].procurementMethodType), tender_id=self.request.validated["tender_id"], criterion_id=self.request.validated["criterion"].id, requirement_group_id=self.request.validated["requirement_group"].id, requirement_id=requirement.id ) return {"data": requirement.serialize("view")} @json_view(permission="view_tender") def collection_get(self): return {"data": [i.serialize("view") for i in self.request.context.requirements]} @json_view(permission="view_tender") def get(self): return {"data": self.request.validated["requirement"].serialize("view")} @json_view( content_type="application/json", validators=( validate_change_requirement_objects, validate_patch_requirement_data, ), permission="edit_tender" ) def patch(self): requirement = self.request.context apply_patch(self.request, save=False, src=requirement.serialize()) tender = self.request.validated["tender"] if self.request.authenticated_role == "tender_owner" and hasattr(tender, "invalidate_bids_data"): tender.invalidate_bids_data() if save_tender(self.request): self.LOGGER.info( "Updated {}".format(requirement.id), extra=context_unpack(self.request, {"MESSAGE_ID": "requirement_group_requirement_patch"}), ) return {"data": requirement.serialize("view")} @json_view( content_type="application/json", validators=( validate_put_requirement_objects, validate_patch_requirement_data, ), permission="edit_tender" ) def put(self): old_requirement = self.request.context requirement = old_requirement if self.request.validated["data"].get("status") != "cancelled": model = type(old_requirement) data = copy(self.request.validated["data"]) for attr_name in type(old_requirement)._fields: if data.get(attr_name) is None: data[attr_name] = getattr(old_requirement, attr_name) # To avoid new version creation if no changes and only id's were regenerated if "eligibleEvidences" not in self.request.json.get("data", {}): data["eligibleEvidences"] = [ evidence.to_primitive(role="create") for evidence in getattr(old_requirement, "eligibleEvidences") ] requirement = model(data) if old_requirement.to_primitive() == requirement.to_primitive(): return {"data": (old_requirement.serialize("view"),)} requirement.datePublished = get_now() requirement.dateModified = None self.request.validated["requirement_group"].requirements.append(requirement) if old_requirement.status == "active": old_requirement.status = "cancelled" old_requirement.dateModified = get_now() tender = self.request.validated["tender"] if ( self.request.authenticated_role == "tender_owner" and tender.status == "active.tendering" and hasattr(tender, "invalidate_bids_data") ): tender.invalidate_bids_data() if save_tender(self.request): self.LOGGER.info( "New version of requirement {}".format(requirement.id), extra=context_unpack(self.request, {"MESSAGE_ID": "requirement_group_requirement_put"}), ) return {"data": (requirement.serialize("view"), old_requirement.serialize("view_old"))}
normal
{ "blob_id": "6194079dd506553b4e5b66f1fb92bb8642704b59", "index": 6893, "step-1": "<mask token>\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n <mask token>\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n <mask token>\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n", "step-2": "<mask token>\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(content_type='application/json', validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data), permission='edit_tender')\n def collection_post(self):\n requirement = self.request.validated['requirement']\n self.request.context.requirements.append(requirement)\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Created requirement group requirement {}'.\n format(requirement.id), extra=context_unpack(self.request,\n {'MESSAGE_ID': 'requirement_group_requirement_create'}, {\n 'requirement_id': requirement.id}))\n self.request.response.status = 201\n self.request.response.headers['Location'] = self.request.route_url(\n '{}:Requirement Group Requirement'.format(self.request.\n validated['tender'].procurementMethodType), tender_id=self.\n request.validated['tender_id'], criterion_id=self.request.\n validated['criterion'].id, requirement_group_id=self.\n request.validated['requirement_group'].id, requirement_id=\n requirement.id)\n return {'data': requirement.serialize('view')}\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n <mask token>\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n", "step-3": "<mask token>\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(content_type='application/json', validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data), permission='edit_tender')\n def collection_post(self):\n requirement = self.request.validated['requirement']\n self.request.context.requirements.append(requirement)\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Created requirement group requirement {}'.\n format(requirement.id), extra=context_unpack(self.request,\n {'MESSAGE_ID': 'requirement_group_requirement_create'}, {\n 'requirement_id': requirement.id}))\n self.request.response.status = 201\n self.request.response.headers['Location'] = self.request.route_url(\n '{}:Requirement Group Requirement'.format(self.request.\n validated['tender'].procurementMethodType), tender_id=self.\n request.validated['tender_id'], criterion_id=self.request.\n validated['criterion'].id, requirement_group_id=self.\n request.validated['requirement_group'].id, requirement_id=\n requirement.id)\n return {'data': requirement.serialize('view')}\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n\n @json_view(permission='view_tender')\n def get(self):\n return {'data': self.request.validated['requirement'].serialize('view')\n }\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n", "step-4": "from copy import copy\nfrom openprocurement.api.utils import json_view, context_unpack, APIResource, get_now\nfrom openprocurement.tender.core.utils import save_tender, apply_patch\nfrom openprocurement.tender.core.validation import validate_requirement_data, validate_patch_requirement_data, validate_operation_ecriteria_objects, validate_patch_exclusion_ecriteria_objects, validate_change_requirement_objects, validate_put_requirement_objects\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(content_type='application/json', validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data), permission='edit_tender')\n def collection_post(self):\n requirement = self.request.validated['requirement']\n self.request.context.requirements.append(requirement)\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Created requirement group requirement {}'.\n format(requirement.id), extra=context_unpack(self.request,\n {'MESSAGE_ID': 'requirement_group_requirement_create'}, {\n 'requirement_id': requirement.id}))\n self.request.response.status = 201\n self.request.response.headers['Location'] = self.request.route_url(\n '{}:Requirement Group Requirement'.format(self.request.\n validated['tender'].procurementMethodType), tender_id=self.\n request.validated['tender_id'], criterion_id=self.request.\n validated['criterion'].id, requirement_group_id=self.\n request.validated['requirement_group'].id, requirement_id=\n requirement.id)\n return {'data': requirement.serialize('view')}\n\n @json_view(permission='view_tender')\n def collection_get(self):\n return {'data': [i.serialize('view') for i in self.request.context.\n requirements]}\n\n @json_view(permission='view_tender')\n def get(self):\n return {'data': self.request.validated['requirement'].serialize('view')\n }\n\n @json_view(content_type='application/json', validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data), permission='edit_tender')\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated['tender']\n if self.request.authenticated_role == 'tender_owner' and hasattr(tender\n , 'invalidate_bids_data'):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('Updated {}'.format(requirement.id), extra=\n context_unpack(self.request, {'MESSAGE_ID':\n 'requirement_group_requirement_patch'}))\n return {'data': requirement.serialize('view')}\n\n @json_view(content_type='application/json', validators=(\n validate_put_requirement_objects, validate_patch_requirement_data),\n permission='edit_tender')\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated['data'].get('status') != 'cancelled':\n model = type(old_requirement)\n data = copy(self.request.validated['data'])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n if 'eligibleEvidences' not in self.request.json.get('data', {}):\n data['eligibleEvidences'] = [evidence.to_primitive(role=\n 'create') for evidence in getattr(old_requirement,\n 'eligibleEvidences')]\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {'data': (old_requirement.serialize('view'),)}\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated['requirement_group'].requirements.append(\n requirement)\n if old_requirement.status == 'active':\n old_requirement.status = 'cancelled'\n old_requirement.dateModified = get_now()\n tender = self.request.validated['tender']\n if (self.request.authenticated_role == 'tender_owner' and tender.\n status == 'active.tendering' and hasattr(tender,\n 'invalidate_bids_data')):\n tender.invalidate_bids_data()\n if save_tender(self.request):\n self.LOGGER.info('New version of requirement {}'.format(\n requirement.id), extra=context_unpack(self.request, {\n 'MESSAGE_ID': 'requirement_group_requirement_put'}))\n return {'data': (requirement.serialize('view'), old_requirement\n .serialize('view_old'))}\n", "step-5": "# -*- coding: utf-8 -*-\nfrom copy import copy\n\nfrom openprocurement.api.utils import (\n json_view,\n context_unpack,\n APIResource,\n get_now,\n)\nfrom openprocurement.tender.core.utils import save_tender, apply_patch\nfrom openprocurement.tender.core.validation import (\n validate_requirement_data,\n validate_patch_requirement_data,\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_change_requirement_objects,\n validate_put_requirement_objects,\n)\n\n\nclass BaseTenderCriteriaRGRequirementResource(APIResource):\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_operation_ecriteria_objects,\n validate_patch_exclusion_ecriteria_objects,\n validate_requirement_data,\n ),\n permission=\"edit_tender\"\n )\n def collection_post(self):\n\n requirement = self.request.validated[\"requirement\"]\n self.request.context.requirements.append(requirement)\n tender = self.request.validated[\"tender\"]\n if (\n self.request.authenticated_role == \"tender_owner\"\n and tender.status == \"active.tendering\"\n and hasattr(tender, \"invalidate_bids_data\")\n ):\n tender.invalidate_bids_data()\n\n if save_tender(self.request):\n self.LOGGER.info(\n \"Created requirement group requirement {}\".format(requirement.id),\n extra=context_unpack(\n self.request,\n {\"MESSAGE_ID\": \"requirement_group_requirement_create\"},\n {\"requirement_id\": requirement.id},\n ),\n )\n self.request.response.status = 201\n self.request.response.headers[\"Location\"] = self.request.route_url(\n \"{}:Requirement Group Requirement\".format(self.request.validated[\"tender\"].procurementMethodType),\n tender_id=self.request.validated[\"tender_id\"],\n criterion_id=self.request.validated[\"criterion\"].id,\n requirement_group_id=self.request.validated[\"requirement_group\"].id,\n requirement_id=requirement.id\n )\n return {\"data\": requirement.serialize(\"view\")}\n\n @json_view(permission=\"view_tender\")\n def collection_get(self):\n return {\"data\": [i.serialize(\"view\") for i in self.request.context.requirements]}\n\n @json_view(permission=\"view_tender\")\n def get(self):\n return {\"data\": self.request.validated[\"requirement\"].serialize(\"view\")}\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_change_requirement_objects,\n validate_patch_requirement_data,\n ),\n permission=\"edit_tender\"\n )\n def patch(self):\n requirement = self.request.context\n apply_patch(self.request, save=False, src=requirement.serialize())\n tender = self.request.validated[\"tender\"]\n\n if self.request.authenticated_role == \"tender_owner\" and hasattr(tender, \"invalidate_bids_data\"):\n tender.invalidate_bids_data()\n\n if save_tender(self.request):\n self.LOGGER.info(\n \"Updated {}\".format(requirement.id),\n extra=context_unpack(self.request, {\"MESSAGE_ID\": \"requirement_group_requirement_patch\"}),\n )\n return {\"data\": requirement.serialize(\"view\")}\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_put_requirement_objects,\n validate_patch_requirement_data,\n ),\n permission=\"edit_tender\"\n )\n def put(self):\n old_requirement = self.request.context\n requirement = old_requirement\n if self.request.validated[\"data\"].get(\"status\") != \"cancelled\":\n model = type(old_requirement)\n data = copy(self.request.validated[\"data\"])\n for attr_name in type(old_requirement)._fields:\n if data.get(attr_name) is None:\n data[attr_name] = getattr(old_requirement, attr_name)\n # To avoid new version creation if no changes and only id's were regenerated\n if \"eligibleEvidences\" not in self.request.json.get(\"data\", {}):\n data[\"eligibleEvidences\"] = [\n evidence.to_primitive(role=\"create\") for evidence in getattr(old_requirement, \"eligibleEvidences\")\n ]\n\n requirement = model(data)\n if old_requirement.to_primitive() == requirement.to_primitive():\n return {\"data\": (old_requirement.serialize(\"view\"),)}\n\n requirement.datePublished = get_now()\n requirement.dateModified = None\n self.request.validated[\"requirement_group\"].requirements.append(requirement)\n\n if old_requirement.status == \"active\":\n old_requirement.status = \"cancelled\"\n old_requirement.dateModified = get_now()\n\n tender = self.request.validated[\"tender\"]\n if (\n self.request.authenticated_role == \"tender_owner\"\n and tender.status == \"active.tendering\"\n and hasattr(tender, \"invalidate_bids_data\")\n ):\n tender.invalidate_bids_data()\n\n if save_tender(self.request):\n self.LOGGER.info(\n \"New version of requirement {}\".format(requirement.id),\n extra=context_unpack(self.request, {\"MESSAGE_ID\": \"requirement_group_requirement_put\"}),\n )\n return {\"data\": (requirement.serialize(\"view\"), old_requirement.serialize(\"view_old\"))}\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
#!/usr/bin/python3 """ This module contains a Fabric function definition. """ from datetime import datetime, time from fabric.api import * from pathlib import Path def do_pack(): timestamp = datetime.utcnow().strftime("%Y%m%d%H%M%S") archive = "web_static_" + timestamp + ".tgz" local("mkdir -p versions") local("tar -cvzf versions/{} web_static/".format(archive)) my_file = Path("versions/{}".format(archive)) if my_file.is_file(): return my_file else: return None
normal
{ "blob_id": "6f3de70267956a6c7c3c5b261cf591051de4c548", "index": 1968, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef do_pack():\n timestamp = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n archive = 'web_static_' + timestamp + '.tgz'\n local('mkdir -p versions')\n local('tar -cvzf versions/{} web_static/'.format(archive))\n my_file = Path('versions/{}'.format(archive))\n if my_file.is_file():\n return my_file\n else:\n return None\n", "step-3": "<mask token>\nfrom datetime import datetime, time\nfrom fabric.api import *\nfrom pathlib import Path\n\n\ndef do_pack():\n timestamp = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n archive = 'web_static_' + timestamp + '.tgz'\n local('mkdir -p versions')\n local('tar -cvzf versions/{} web_static/'.format(archive))\n my_file = Path('versions/{}'.format(archive))\n if my_file.is_file():\n return my_file\n else:\n return None\n", "step-4": "#!/usr/bin/python3\n\"\"\"\n This module contains a Fabric function definition.\n\"\"\"\nfrom datetime import datetime, time\nfrom fabric.api import *\nfrom pathlib import Path\n\n\ndef do_pack():\n timestamp = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n archive = \"web_static_\" + timestamp + \".tgz\"\n\n local(\"mkdir -p versions\")\n local(\"tar -cvzf versions/{} web_static/\".format(archive))\n\n my_file = Path(\"versions/{}\".format(archive))\n if my_file.is_file():\n return my_file\n else:\n return None\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Codec: def serialize(self, root): """Encodes a tree to a single string. :type root: TreeNode :rtype: str """ if(not root) : return "X" else : return ",".join([str(root.val), self.serialize(root.left), self.serialize(root.right)]) # Q = [root] # res = [] # while(Q) : # newQ = [] # noChange = True # while(Q) : # v = Q.pop(0) # if(v == None) : # res.append(' ') # newQ.append(None) # newQ.append(None) # else : # res.append(str(v.val)) # if(v.left == None) : # newQ.append(None) # else : # noChange = False # newQ.append(v.left) # if(v.right == None) : # newQ.append(None) # else : # noChange = False # newQ.append(v.right) # if(noChange) : # break # Q = newQ # return ','.join(res) def deserialize(self, data): """Decodes your encoded data to tree. :type data: str :rtype: TreeNode """ self.data = data if(data[0] == "X") : return None else : t = TreeNode(int(self.data[: self.data.find(",")])) t.left = self.deserialize(self.data[self.data.find(",") + 1 :]) t.right = self.deserialize(self.data[self.data.find(",") + 1 :]) return t # arr = data.split(",") # l = len(arr) # if(l == 0 or arr[0] == " ") : # return None # t = TreeNode(int(arr[0])) # Q = [t] # half = (l + 1) / 2 - 1 # i = 0 # while(i < half) : # v = Q.pop(0) # if(v == None) : # i += 1 # Q.append(None) # Q.append(None) # continue # if(arr[2 * i + 1] == ' ') : # v.left = None # Q.append(None) # else : # l = TreeNode(int(arr[2 * i + 1])) # v.left = l # Q.append(l) # if(arr[2 * i + 2] == ' ') : # v.right = None # Q.append(None) # else : # r = TreeNode(int(arr[2 * i + 2])) # v.right = r # Q.append(r) # i += 1 # return t # Your Codec object will be instantiated and called as such: # ser = Codec() # deser = Codec() # ans = deser.deserialize(ser.serialize(root))
normal
{ "blob_id": "006e1088e72201fab7eebd1409c025b5dba69403", "index": 5938, "step-1": "<mask token>\n", "step-2": "class Codec:\n <mask token>\n <mask token>\n", "step-3": "class Codec:\n <mask token>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n \n :type data: str\n :rtype: TreeNode\n \"\"\"\n self.data = data\n if data[0] == 'X':\n return None\n else:\n t = TreeNode(int(self.data[:self.data.find(',')]))\n t.left = self.deserialize(self.data[self.data.find(',') + 1:])\n t.right = self.deserialize(self.data[self.data.find(',') + 1:])\n return t\n", "step-4": "class Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n \n :type root: TreeNode\n :rtype: str\n \"\"\"\n if not root:\n return 'X'\n else:\n return ','.join([str(root.val), self.serialize(root.left), self\n .serialize(root.right)])\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n \n :type data: str\n :rtype: TreeNode\n \"\"\"\n self.data = data\n if data[0] == 'X':\n return None\n else:\n t = TreeNode(int(self.data[:self.data.find(',')]))\n t.left = self.deserialize(self.data[self.data.find(',') + 1:])\n t.right = self.deserialize(self.data[self.data.find(',') + 1:])\n return t\n", "step-5": "# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n \n :type root: TreeNode\n :rtype: str\n \"\"\"\n if(not root) :\n return \"X\"\n else :\n return \",\".join([str(root.val), self.serialize(root.left), self.serialize(root.right)])\n \n \n# Q = [root]\n# res = []\n# while(Q) :\n# newQ = []\n# noChange = True\n# while(Q) :\n# v = Q.pop(0)\n# if(v == None) :\n# res.append(' ')\n# newQ.append(None)\n# newQ.append(None)\n# else :\n# res.append(str(v.val))\n \n# if(v.left == None) :\n# newQ.append(None)\n# else :\n# noChange = False\n# newQ.append(v.left) \n \n# if(v.right == None) :\n# newQ.append(None)\n# else :\n# noChange = False\n# newQ.append(v.right)\n\n \n# if(noChange) :\n# break\n# Q = newQ\n# return ','.join(res)\n \n \n \n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n \n :type data: str\n :rtype: TreeNode\n \"\"\"\n self.data = data\n \n if(data[0] == \"X\") :\n return None\n else :\n t = TreeNode(int(self.data[: self.data.find(\",\")]))\n t.left = self.deserialize(self.data[self.data.find(\",\") + 1 :])\n t.right = self.deserialize(self.data[self.data.find(\",\") + 1 :])\n return t\n \n \n \n# arr = data.split(\",\")\n \n# l = len(arr)\n \n# if(l == 0 or arr[0] == \" \") :\n# return None\n \n# t = TreeNode(int(arr[0]))\n \n# Q = [t]\n \n# half = (l + 1) / 2 - 1\n \n# i = 0\n \n \n# while(i < half) :\n# v = Q.pop(0)\n# if(v == None) :\n# i += 1\n# Q.append(None)\n# Q.append(None)\n# continue\n \n# if(arr[2 * i + 1] == ' ') :\n# v.left = None\n# Q.append(None)\n# else :\n# l = TreeNode(int(arr[2 * i + 1]))\n# v.left = l\n# Q.append(l)\n# if(arr[2 * i + 2] == ' ') :\n# v.right = None\n# Q.append(None)\n# else :\n# r = TreeNode(int(arr[2 * i + 2]))\n# v.right = r\n# Q.append(r)\n# i += 1\n# return t\n \n \n\n# Your Codec object will be instantiated and called as such:\n# ser = Codec()\n# deser = Codec()\n# ans = deser.deserialize(ser.serialize(root))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part # of the Robot Operating System project, released under the MIT License. Please # see the LICENSE file included as part of this package. # # author: Murray Altheim # created: 2020-04-15 # modified: 2020-04-15 import pprint from colorama import init, Fore, Style init() try: import yaml except ImportError: exit("This script requires the pyyaml module\nInstall with: pip3 install --user pyyaml") from core.logger import Level, Logger class ConfigLoader(): ''' Has just one method: configure() reads a YAML file. ''' def __init__(self, level): self._log = Logger('configloader', level) self._log.info('ready.') # .......................................................................... def configure(self, filename='config.yaml'): ''' Read and return configuration from the specified YAML file. Pretty-prints the configuration object if the log level is set to DEBUG. ''' self._log.info('reading from yaml configuration file {}...'.format(filename)) _config = yaml.safe_load(open(filename, 'r')) if self._log.level == Level.DEBUG: self._log.debug('YAML configuration as read:') print(Fore.BLUE) pp = pprint.PrettyPrinter(width=80, indent=2) pp.pprint(_config) print(Style.RESET_ALL) self._log.info('configuration read.') return _config #EOF
normal
{ "blob_id": "3a6038cb80548b98fc7e4a328092f1dc1ffd6dfd", "index": 1154, "step-1": "<mask token>\n\n\nclass ConfigLoader:\n <mask token>\n\n def __init__(self, level):\n self._log = Logger('configloader', level)\n self._log.info('ready.')\n\n def configure(self, filename='config.yaml'):\n \"\"\"\n Read and return configuration from the specified YAML file.\n\n Pretty-prints the configuration object if the log level is set to DEBUG.\n \"\"\"\n self._log.info('reading from yaml configuration file {}...'.format(\n filename))\n _config = yaml.safe_load(open(filename, 'r'))\n if self._log.level == Level.DEBUG:\n self._log.debug('YAML configuration as read:')\n print(Fore.BLUE)\n pp = pprint.PrettyPrinter(width=80, indent=2)\n pp.pprint(_config)\n print(Style.RESET_ALL)\n self._log.info('configuration read.')\n return _config\n", "step-2": "<mask token>\n\n\nclass ConfigLoader:\n \"\"\"\n Has just one method: configure() reads a YAML file.\n \"\"\"\n\n def __init__(self, level):\n self._log = Logger('configloader', level)\n self._log.info('ready.')\n\n def configure(self, filename='config.yaml'):\n \"\"\"\n Read and return configuration from the specified YAML file.\n\n Pretty-prints the configuration object if the log level is set to DEBUG.\n \"\"\"\n self._log.info('reading from yaml configuration file {}...'.format(\n filename))\n _config = yaml.safe_load(open(filename, 'r'))\n if self._log.level == Level.DEBUG:\n self._log.debug('YAML configuration as read:')\n print(Fore.BLUE)\n pp = pprint.PrettyPrinter(width=80, indent=2)\n pp.pprint(_config)\n print(Style.RESET_ALL)\n self._log.info('configuration read.')\n return _config\n", "step-3": "<mask token>\ninit()\ntry:\n import yaml\nexcept ImportError:\n exit(\n 'This script requires the pyyaml module\\nInstall with: pip3 install --user pyyaml'\n )\n<mask token>\n\n\nclass ConfigLoader:\n \"\"\"\n Has just one method: configure() reads a YAML file.\n \"\"\"\n\n def __init__(self, level):\n self._log = Logger('configloader', level)\n self._log.info('ready.')\n\n def configure(self, filename='config.yaml'):\n \"\"\"\n Read and return configuration from the specified YAML file.\n\n Pretty-prints the configuration object if the log level is set to DEBUG.\n \"\"\"\n self._log.info('reading from yaml configuration file {}...'.format(\n filename))\n _config = yaml.safe_load(open(filename, 'r'))\n if self._log.level == Level.DEBUG:\n self._log.debug('YAML configuration as read:')\n print(Fore.BLUE)\n pp = pprint.PrettyPrinter(width=80, indent=2)\n pp.pprint(_config)\n print(Style.RESET_ALL)\n self._log.info('configuration read.')\n return _config\n", "step-4": "import pprint\nfrom colorama import init, Fore, Style\ninit()\ntry:\n import yaml\nexcept ImportError:\n exit(\n 'This script requires the pyyaml module\\nInstall with: pip3 install --user pyyaml'\n )\nfrom core.logger import Level, Logger\n\n\nclass ConfigLoader:\n \"\"\"\n Has just one method: configure() reads a YAML file.\n \"\"\"\n\n def __init__(self, level):\n self._log = Logger('configloader', level)\n self._log.info('ready.')\n\n def configure(self, filename='config.yaml'):\n \"\"\"\n Read and return configuration from the specified YAML file.\n\n Pretty-prints the configuration object if the log level is set to DEBUG.\n \"\"\"\n self._log.info('reading from yaml configuration file {}...'.format(\n filename))\n _config = yaml.safe_load(open(filename, 'r'))\n if self._log.level == Level.DEBUG:\n self._log.debug('YAML configuration as read:')\n print(Fore.BLUE)\n pp = pprint.PrettyPrinter(width=80, indent=2)\n pp.pprint(_config)\n print(Style.RESET_ALL)\n self._log.info('configuration read.')\n return _config\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part\n# of the Robot Operating System project, released under the MIT License. Please\n# see the LICENSE file included as part of this package.\n#\n# author: Murray Altheim\n# created: 2020-04-15\n# modified: 2020-04-15\n\nimport pprint\nfrom colorama import init, Fore, Style\ninit()\ntry:\n import yaml\nexcept ImportError:\n exit(\"This script requires the pyyaml module\\nInstall with: pip3 install --user pyyaml\")\n\nfrom core.logger import Level, Logger\n\nclass ConfigLoader():\n '''\n Has just one method: configure() reads a YAML file.\n '''\n def __init__(self, level):\n self._log = Logger('configloader', level)\n self._log.info('ready.')\n\n # ..........................................................................\n def configure(self, filename='config.yaml'):\n '''\n Read and return configuration from the specified YAML file.\n\n Pretty-prints the configuration object if the log level is set to DEBUG.\n '''\n self._log.info('reading from yaml configuration file {}...'.format(filename))\n _config = yaml.safe_load(open(filename, 'r'))\n if self._log.level == Level.DEBUG:\n self._log.debug('YAML configuration as read:')\n print(Fore.BLUE)\n pp = pprint.PrettyPrinter(width=80, indent=2)\n pp.pprint(_config)\n print(Style.RESET_ALL)\n self._log.info('configuration read.')\n return _config\n\n#EOF\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
""" Constant types in Python. 定数上書きチェック用 """ import os from common import const from datetime import timedelta from linebot.models import ( TemplateSendMessage, CarouselTemplate, CarouselColumn, MessageAction, QuickReplyButton, CameraAction, CameraRollAction, LocationAction ) const.API_PROFILE_URL = 'https://api.line.me/v2/profile' const.API_NOTIFICATIONTOKEN_URL = 'https://api.line.me/message/v3/notifier/token' # noqa: E501 const.API_ACCESSTOKEN_URL = 'https://api.line.me/v2/oauth/accessToken' const.API_SENDSERVICEMESSAGE_URL = 'https://api.line.me/message/v3/notifier/send?target=service' # noqa 501 const.API_USER_ID_URL = 'https://api.line.me/oauth2/v2.1/verify' const.MSG_ERROR_NOPARAM = 'パラメータ未設定エラー' const.DATA_LIMIT_TIME = 60 * 60 * 12 const.ONE_WEEK = timedelta(days=7) const.JST_UTC_TIMEDELTA = timedelta(hours=9) const.FLEX = { "type": "flex", "altText": "Flex Message", "contents": { "type": "bubble", "hero": { "type": "image", "url": "https://media.istockphoto.com/photos/empty-coffee-shop-picture-id1154756901", # noqa:E501 "size": "full", "aspectRatio": "1:1", "aspectMode": "cover", "action": { "type": "uri", "label": "UseCase Cafe", "uri": "https://line.me/ja/" } }, "body": { "type": "box", "layout": "vertical", "contents": [ { "type": "text", "text": "LINE Cafe", "size": "xl", "weight": "bold" }, { "type": "box", "layout": "baseline", "margin": "md", "contents": [ { "type": "icon", "url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501 "size": "sm" }, { "type": "icon", "url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501 "size": "sm" }, { "type": "icon", "url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501 "size": "sm" }, { "type": "icon", "url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501 "size": "sm" }, { "type": "icon", "url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png", # noqa:E501 "size": "sm" }, { "type": "text", "text": "4.0", "flex": 0, "margin": "md", "size": "sm", "color": "#999999" } ] }, { "type": "box", "layout": "vertical", "spacing": "sm", "margin": "lg", "contents": [ { "type": "box", "layout": "baseline", "spacing": "sm", "contents": [ { "type": "text", "text": "Place", "flex": 1, "size": "sm", "color": "#AAAAAA" }, { "type": "text", "text": "Miraina Tower, 4-1-6 Shinjuku, Tokyo", # noqa:E501 "flex": 5, "size": "sm", "color": "#666666", "wrap": True } ] }, { "type": "box", "layout": "baseline", "spacing": "sm", "contents": [ { "type": "text", "text": "Time", "flex": 1, "size": "sm", "color": "#AAAAAA" }, { "type": "text", "text": "10:00 - 23:00", "flex": 5, "size": "sm", "color": "#666666", "wrap": True } ] } ] } ] }, "footer": { "type": "box", "layout": "vertical", "flex": 0, "spacing": "sm", "contents": [ { "type": "button", "action": { "type": "uri", "label": "WEBサイト", "uri": "https://line.me/ja/" }, "height": "sm", "style": "link" }, { "type": "button", "action": { "type": "datetimepicker", "label": "予約", "data": "action=reserve", "mode": "datetime", "initial": "2020-01-01t00:00", "max": "2020-12-31t23:59", "min": "2020-01-01t00:00" }, "height": "sm", "style": "link" }, { "type": "button", "action": { "type": "postback", "label": "クイックアクション", "data": "action=quick_reply", }, "height": "sm", "style": "link" }, { "type": "spacer", "size": "sm" } ] } } } const.CAROUSEL = TemplateSendMessage( alt_text='Carousel template', template=CarouselTemplate( columns=[ CarouselColumn( thumbnail_image_url='https://media.istockphoto.com/photos/neon-sale-glowing-text-sign-sale-banner-design-3d-render-glow-sale-picture-id854550186', # noqa:E501 title='最大80%OFF', text='期間限定SALE', actions=[ MessageAction( label='Go to SALE', text='Choose SALE' ) ] ), CarouselColumn( thumbnail_image_url='https://media.istockphoto.com/photos/womens-clothes-set-isolatedfemale-clothing-collage-picture-id1067767654', # noqa:E501 title='今月のおススメ商品', text='これがあれば困らない!', actions=[ MessageAction( label='Recommended', text='Choose Recommended' ) ] ), CarouselColumn( thumbnail_image_url='https://media.istockphoto.com/photos/clothes-hanging-on-rail-in-white-wardrobe-picture-id518597694', # noqa:E501 title='スッキリ収納特集', text='大切なお洋服をスッキリ簡単に収納します', actions=[ MessageAction( label='To receive clothes', text='Choose receive clothes' ) ] ) ] ) ) const.QUICK_REPLY_ITEMS = [ QuickReplyButton(action=LocationAction(label='位置情報')), QuickReplyButton(action=CameraAction(label='カメラ起動')), QuickReplyButton(action=CameraRollAction(label='カメラロール起動')), ] const.MENU_LIST = {'message': os.getenv('RICH_MENU_MESSAGE', None), 'carousel': os.getenv('RICH_MENU_CAROUSEL', None), 'flex': os.getenv('RICH_MENU_FLEX', None) }
normal
{ "blob_id": "25fcf162306b3d6d6307e703a7d829754cba2778", "index": 2347, "step-1": "<mask token>\n", "step-2": "<mask token>\nconst.API_PROFILE_URL = 'https://api.line.me/v2/profile'\nconst.API_NOTIFICATIONTOKEN_URL = (\n 'https://api.line.me/message/v3/notifier/token')\nconst.API_ACCESSTOKEN_URL = 'https://api.line.me/v2/oauth/accessToken'\nconst.API_SENDSERVICEMESSAGE_URL = (\n 'https://api.line.me/message/v3/notifier/send?target=service')\nconst.API_USER_ID_URL = 'https://api.line.me/oauth2/v2.1/verify'\nconst.MSG_ERROR_NOPARAM = 'パラメータ未設定エラー'\nconst.DATA_LIMIT_TIME = 60 * 60 * 12\nconst.ONE_WEEK = timedelta(days=7)\nconst.JST_UTC_TIMEDELTA = timedelta(hours=9)\nconst.FLEX = {'type': 'flex', 'altText': 'Flex Message', 'contents': {\n 'type': 'bubble', 'hero': {'type': 'image', 'url':\n 'https://media.istockphoto.com/photos/empty-coffee-shop-picture-id1154756901'\n , 'size': 'full', 'aspectRatio': '1:1', 'aspectMode': 'cover', 'action':\n {'type': 'uri', 'label': 'UseCase Cafe', 'uri': 'https://line.me/ja/'}},\n 'body': {'type': 'box', 'layout': 'vertical', 'contents': [{'type':\n 'text', 'text': 'LINE Cafe', 'size': 'xl', 'weight': 'bold'}, {'type':\n 'box', 'layout': 'baseline', 'margin': 'md', 'contents': [{'type':\n 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png'\n , 'size': 'sm'}, {'type': 'text', 'text': '4.0', 'flex': 0, 'margin':\n 'md', 'size': 'sm', 'color': '#999999'}]}, {'type': 'box', 'layout':\n 'vertical', 'spacing': 'sm', 'margin': 'lg', 'contents': [{'type':\n 'box', 'layout': 'baseline', 'spacing': 'sm', 'contents': [{'type':\n 'text', 'text': 'Place', 'flex': 1, 'size': 'sm', 'color': '#AAAAAA'},\n {'type': 'text', 'text': 'Miraina Tower, 4-1-6 Shinjuku, Tokyo', 'flex':\n 5, 'size': 'sm', 'color': '#666666', 'wrap': True}]}, {'type': 'box',\n 'layout': 'baseline', 'spacing': 'sm', 'contents': [{'type': 'text',\n 'text': 'Time', 'flex': 1, 'size': 'sm', 'color': '#AAAAAA'}, {'type':\n 'text', 'text': '10:00 - 23:00', 'flex': 5, 'size': 'sm', 'color':\n '#666666', 'wrap': True}]}]}]}, 'footer': {'type': 'box', 'layout':\n 'vertical', 'flex': 0, 'spacing': 'sm', 'contents': [{'type': 'button',\n 'action': {'type': 'uri', 'label': 'WEBサイト', 'uri':\n 'https://line.me/ja/'}, 'height': 'sm', 'style': 'link'}, {'type':\n 'button', 'action': {'type': 'datetimepicker', 'label': '予約', 'data':\n 'action=reserve', 'mode': 'datetime', 'initial': '2020-01-01t00:00',\n 'max': '2020-12-31t23:59', 'min': '2020-01-01t00:00'}, 'height': 'sm',\n 'style': 'link'}, {'type': 'button', 'action': {'type': 'postback',\n 'label': 'クイックアクション', 'data': 'action=quick_reply'}, 'height': 'sm',\n 'style': 'link'}, {'type': 'spacer', 'size': 'sm'}]}}}\nconst.CAROUSEL = TemplateSendMessage(alt_text='Carousel template', template\n =CarouselTemplate(columns=[CarouselColumn(thumbnail_image_url=\n 'https://media.istockphoto.com/photos/neon-sale-glowing-text-sign-sale-banner-design-3d-render-glow-sale-picture-id854550186'\n , title='最大80%OFF', text='期間限定SALE', actions=[MessageAction(label=\n 'Go to SALE', text='Choose SALE')]), CarouselColumn(thumbnail_image_url\n =\n 'https://media.istockphoto.com/photos/womens-clothes-set-isolatedfemale-clothing-collage-picture-id1067767654'\n , title='今月のおススメ商品', text='これがあれば困らない!', actions=[MessageAction(label=\n 'Recommended', text='Choose Recommended')]), CarouselColumn(\n thumbnail_image_url=\n 'https://media.istockphoto.com/photos/clothes-hanging-on-rail-in-white-wardrobe-picture-id518597694'\n , title='スッキリ収納特集', text='大切なお洋服をスッキリ簡単に収納します', actions=[MessageAction(\n label='To receive clothes', text='Choose receive clothes')])]))\nconst.QUICK_REPLY_ITEMS = [QuickReplyButton(action=LocationAction(label=\n '位置情報')), QuickReplyButton(action=CameraAction(label='カメラ起動')),\n QuickReplyButton(action=CameraRollAction(label='カメラロール起動'))]\nconst.MENU_LIST = {'message': os.getenv('RICH_MENU_MESSAGE', None),\n 'carousel': os.getenv('RICH_MENU_CAROUSEL', None), 'flex': os.getenv(\n 'RICH_MENU_FLEX', None)}\n", "step-3": "<mask token>\nimport os\nfrom common import const\nfrom datetime import timedelta\nfrom linebot.models import TemplateSendMessage, CarouselTemplate, CarouselColumn, MessageAction, QuickReplyButton, CameraAction, CameraRollAction, LocationAction\nconst.API_PROFILE_URL = 'https://api.line.me/v2/profile'\nconst.API_NOTIFICATIONTOKEN_URL = (\n 'https://api.line.me/message/v3/notifier/token')\nconst.API_ACCESSTOKEN_URL = 'https://api.line.me/v2/oauth/accessToken'\nconst.API_SENDSERVICEMESSAGE_URL = (\n 'https://api.line.me/message/v3/notifier/send?target=service')\nconst.API_USER_ID_URL = 'https://api.line.me/oauth2/v2.1/verify'\nconst.MSG_ERROR_NOPARAM = 'パラメータ未設定エラー'\nconst.DATA_LIMIT_TIME = 60 * 60 * 12\nconst.ONE_WEEK = timedelta(days=7)\nconst.JST_UTC_TIMEDELTA = timedelta(hours=9)\nconst.FLEX = {'type': 'flex', 'altText': 'Flex Message', 'contents': {\n 'type': 'bubble', 'hero': {'type': 'image', 'url':\n 'https://media.istockphoto.com/photos/empty-coffee-shop-picture-id1154756901'\n , 'size': 'full', 'aspectRatio': '1:1', 'aspectMode': 'cover', 'action':\n {'type': 'uri', 'label': 'UseCase Cafe', 'uri': 'https://line.me/ja/'}},\n 'body': {'type': 'box', 'layout': 'vertical', 'contents': [{'type':\n 'text', 'text': 'LINE Cafe', 'size': 'xl', 'weight': 'bold'}, {'type':\n 'box', 'layout': 'baseline', 'margin': 'md', 'contents': [{'type':\n 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png'\n , 'size': 'sm'}, {'type': 'text', 'text': '4.0', 'flex': 0, 'margin':\n 'md', 'size': 'sm', 'color': '#999999'}]}, {'type': 'box', 'layout':\n 'vertical', 'spacing': 'sm', 'margin': 'lg', 'contents': [{'type':\n 'box', 'layout': 'baseline', 'spacing': 'sm', 'contents': [{'type':\n 'text', 'text': 'Place', 'flex': 1, 'size': 'sm', 'color': '#AAAAAA'},\n {'type': 'text', 'text': 'Miraina Tower, 4-1-6 Shinjuku, Tokyo', 'flex':\n 5, 'size': 'sm', 'color': '#666666', 'wrap': True}]}, {'type': 'box',\n 'layout': 'baseline', 'spacing': 'sm', 'contents': [{'type': 'text',\n 'text': 'Time', 'flex': 1, 'size': 'sm', 'color': '#AAAAAA'}, {'type':\n 'text', 'text': '10:00 - 23:00', 'flex': 5, 'size': 'sm', 'color':\n '#666666', 'wrap': True}]}]}]}, 'footer': {'type': 'box', 'layout':\n 'vertical', 'flex': 0, 'spacing': 'sm', 'contents': [{'type': 'button',\n 'action': {'type': 'uri', 'label': 'WEBサイト', 'uri':\n 'https://line.me/ja/'}, 'height': 'sm', 'style': 'link'}, {'type':\n 'button', 'action': {'type': 'datetimepicker', 'label': '予約', 'data':\n 'action=reserve', 'mode': 'datetime', 'initial': '2020-01-01t00:00',\n 'max': '2020-12-31t23:59', 'min': '2020-01-01t00:00'}, 'height': 'sm',\n 'style': 'link'}, {'type': 'button', 'action': {'type': 'postback',\n 'label': 'クイックアクション', 'data': 'action=quick_reply'}, 'height': 'sm',\n 'style': 'link'}, {'type': 'spacer', 'size': 'sm'}]}}}\nconst.CAROUSEL = TemplateSendMessage(alt_text='Carousel template', template\n =CarouselTemplate(columns=[CarouselColumn(thumbnail_image_url=\n 'https://media.istockphoto.com/photos/neon-sale-glowing-text-sign-sale-banner-design-3d-render-glow-sale-picture-id854550186'\n , title='最大80%OFF', text='期間限定SALE', actions=[MessageAction(label=\n 'Go to SALE', text='Choose SALE')]), CarouselColumn(thumbnail_image_url\n =\n 'https://media.istockphoto.com/photos/womens-clothes-set-isolatedfemale-clothing-collage-picture-id1067767654'\n , title='今月のおススメ商品', text='これがあれば困らない!', actions=[MessageAction(label=\n 'Recommended', text='Choose Recommended')]), CarouselColumn(\n thumbnail_image_url=\n 'https://media.istockphoto.com/photos/clothes-hanging-on-rail-in-white-wardrobe-picture-id518597694'\n , title='スッキリ収納特集', text='大切なお洋服をスッキリ簡単に収納します', actions=[MessageAction(\n label='To receive clothes', text='Choose receive clothes')])]))\nconst.QUICK_REPLY_ITEMS = [QuickReplyButton(action=LocationAction(label=\n '位置情報')), QuickReplyButton(action=CameraAction(label='カメラ起動')),\n QuickReplyButton(action=CameraRollAction(label='カメラロール起動'))]\nconst.MENU_LIST = {'message': os.getenv('RICH_MENU_MESSAGE', None),\n 'carousel': os.getenv('RICH_MENU_CAROUSEL', None), 'flex': os.getenv(\n 'RICH_MENU_FLEX', None)}\n", "step-4": "\"\"\"\nConstant types in Python.\n定数上書きチェック用\n\"\"\"\nimport os\nfrom common import const\nfrom datetime import timedelta\n\nfrom linebot.models import (\n TemplateSendMessage, CarouselTemplate, CarouselColumn, MessageAction,\n QuickReplyButton, CameraAction, CameraRollAction, LocationAction\n)\n\nconst.API_PROFILE_URL = 'https://api.line.me/v2/profile'\nconst.API_NOTIFICATIONTOKEN_URL = 'https://api.line.me/message/v3/notifier/token' # noqa: E501\nconst.API_ACCESSTOKEN_URL = 'https://api.line.me/v2/oauth/accessToken'\nconst.API_SENDSERVICEMESSAGE_URL = 'https://api.line.me/message/v3/notifier/send?target=service' # noqa 501\nconst.API_USER_ID_URL = 'https://api.line.me/oauth2/v2.1/verify'\n\nconst.MSG_ERROR_NOPARAM = 'パラメータ未設定エラー'\nconst.DATA_LIMIT_TIME = 60 * 60 * 12\nconst.ONE_WEEK = timedelta(days=7)\nconst.JST_UTC_TIMEDELTA = timedelta(hours=9)\n\n\nconst.FLEX = {\n \"type\": \"flex\",\n \"altText\": \"Flex Message\",\n \"contents\": {\n \"type\": \"bubble\",\n \"hero\": {\n \"type\": \"image\",\n \"url\": \"https://media.istockphoto.com/photos/empty-coffee-shop-picture-id1154756901\", # noqa:E501\n \"size\": \"full\",\n \"aspectRatio\": \"1:1\",\n \"aspectMode\": \"cover\",\n \"action\": {\n \"type\": \"uri\",\n \"label\": \"UseCase Cafe\",\n \"uri\": \"https://line.me/ja/\"\n }\n },\n \"body\": {\n \"type\": \"box\",\n \"layout\": \"vertical\",\n \"contents\": [\n {\n \"type\": \"text\",\n \"text\": \"LINE Cafe\",\n \"size\": \"xl\",\n \"weight\": \"bold\"\n },\n {\n \"type\": \"box\",\n \"layout\": \"baseline\",\n \"margin\": \"md\",\n \"contents\": [\n {\n \"type\": \"icon\",\n \"url\": \"https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png\", # noqa:E501\n \"size\": \"sm\"\n },\n {\n \"type\": \"icon\",\n \"url\": \"https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png\", # noqa:E501\n \"size\": \"sm\"\n },\n {\n \"type\": \"icon\",\n \"url\": \"https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png\", # noqa:E501\n \"size\": \"sm\"\n },\n {\n \"type\": \"icon\",\n \"url\": \"https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png\", # noqa:E501\n \"size\": \"sm\"\n },\n {\n \"type\": \"icon\",\n \"url\": \"https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png\", # noqa:E501\n \"size\": \"sm\"\n },\n {\n \"type\": \"text\",\n \"text\": \"4.0\",\n \"flex\": 0,\n \"margin\": \"md\",\n \"size\": \"sm\",\n \"color\": \"#999999\"\n }\n ]\n },\n {\n \"type\": \"box\",\n \"layout\": \"vertical\",\n \"spacing\": \"sm\",\n \"margin\": \"lg\",\n \"contents\": [\n {\n \"type\": \"box\",\n \"layout\": \"baseline\",\n \"spacing\": \"sm\",\n \"contents\": [\n {\n \"type\": \"text\",\n \"text\": \"Place\",\n \"flex\": 1,\n \"size\": \"sm\",\n \"color\": \"#AAAAAA\"\n },\n {\n \"type\": \"text\",\n \"text\": \"Miraina Tower, 4-1-6 Shinjuku, Tokyo\", # noqa:E501\n \"flex\": 5,\n \"size\": \"sm\",\n \"color\": \"#666666\",\n \"wrap\": True\n }\n ]\n },\n {\n \"type\": \"box\",\n \"layout\": \"baseline\",\n \"spacing\": \"sm\",\n \"contents\": [\n {\n \"type\": \"text\",\n \"text\": \"Time\",\n \"flex\": 1,\n \"size\": \"sm\",\n \"color\": \"#AAAAAA\"\n },\n {\n \"type\": \"text\",\n \"text\": \"10:00 - 23:00\",\n \"flex\": 5,\n \"size\": \"sm\",\n \"color\": \"#666666\",\n \"wrap\": True\n }\n ]\n }\n ]\n }\n ]\n },\n \"footer\": {\n \"type\": \"box\",\n \"layout\": \"vertical\",\n \"flex\": 0,\n \"spacing\": \"sm\",\n \"contents\": [\n {\n \"type\": \"button\",\n \"action\": {\n \"type\": \"uri\",\n \"label\": \"WEBサイト\",\n \"uri\": \"https://line.me/ja/\"\n },\n \"height\": \"sm\",\n \"style\": \"link\"\n },\n {\n \"type\": \"button\",\n \"action\": {\n \"type\": \"datetimepicker\",\n \"label\": \"予約\",\n \"data\": \"action=reserve\",\n \"mode\": \"datetime\",\n \"initial\": \"2020-01-01t00:00\",\n \"max\": \"2020-12-31t23:59\",\n \"min\": \"2020-01-01t00:00\"\n },\n \"height\": \"sm\",\n \"style\": \"link\"\n },\n {\n \"type\": \"button\",\n \"action\": {\n \"type\": \"postback\",\n \"label\": \"クイックアクション\",\n \"data\": \"action=quick_reply\",\n },\n \"height\": \"sm\",\n \"style\": \"link\"\n },\n {\n \"type\": \"spacer\",\n \"size\": \"sm\"\n }\n ]\n }\n }\n}\n\nconst.CAROUSEL = TemplateSendMessage(\n alt_text='Carousel template',\n template=CarouselTemplate(\n columns=[\n CarouselColumn(\n thumbnail_image_url='https://media.istockphoto.com/photos/neon-sale-glowing-text-sign-sale-banner-design-3d-render-glow-sale-picture-id854550186', # noqa:E501\n title='最大80%OFF',\n text='期間限定SALE',\n actions=[\n MessageAction(\n label='Go to SALE',\n text='Choose SALE'\n )\n ]\n ),\n CarouselColumn(\n thumbnail_image_url='https://media.istockphoto.com/photos/womens-clothes-set-isolatedfemale-clothing-collage-picture-id1067767654', # noqa:E501\n title='今月のおススメ商品',\n text='これがあれば困らない!',\n actions=[\n MessageAction(\n label='Recommended',\n text='Choose Recommended'\n )\n ]\n ),\n CarouselColumn(\n thumbnail_image_url='https://media.istockphoto.com/photos/clothes-hanging-on-rail-in-white-wardrobe-picture-id518597694', # noqa:E501\n title='スッキリ収納特集',\n text='大切なお洋服をスッキリ簡単に収納します',\n actions=[\n MessageAction(\n label='To receive clothes',\n text='Choose receive clothes'\n )\n ]\n )\n ]\n )\n)\n\nconst.QUICK_REPLY_ITEMS = [\n QuickReplyButton(action=LocationAction(label='位置情報')),\n QuickReplyButton(action=CameraAction(label='カメラ起動')),\n QuickReplyButton(action=CameraRollAction(label='カメラロール起動')),\n]\n\nconst.MENU_LIST = {'message': os.getenv('RICH_MENU_MESSAGE', None),\n 'carousel': os.getenv('RICH_MENU_CAROUSEL', None),\n 'flex': os.getenv('RICH_MENU_FLEX', None)\n }\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Generated by Django 3.0.7 on 2020-06-15 15:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0003_auto_20200615_1225'), ] operations = [ migrations.AlterField( model_name='product', name='harmonizacao', field=models.TextField(null=True), ), migrations.AlterField( model_name='product', name='history', field=models.TextField(null=True), ), migrations.AlterField( model_name='product', name='premios', field=models.TextField(null=True), ), ]
normal
{ "blob_id": "c382b298cce8d7045d6ce8a84f90b3800dba7717", "index": 297, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('products', '0003_auto_20200615_1225')]\n operations = [migrations.AlterField(model_name='product', name=\n 'harmonizacao', field=models.TextField(null=True)), migrations.\n AlterField(model_name='product', name='history', field=models.\n TextField(null=True)), migrations.AlterField(model_name='product',\n name='premios', field=models.TextField(null=True))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('products', '0003_auto_20200615_1225')]\n operations = [migrations.AlterField(model_name='product', name=\n 'harmonizacao', field=models.TextField(null=True)), migrations.\n AlterField(model_name='product', name='history', field=models.\n TextField(null=True)), migrations.AlterField(model_name='product',\n name='premios', field=models.TextField(null=True))]\n", "step-5": "# Generated by Django 3.0.7 on 2020-06-15 15:26\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('products', '0003_auto_20200615_1225'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='product',\n name='harmonizacao',\n field=models.TextField(null=True),\n ),\n migrations.AlterField(\n model_name='product',\n name='history',\n field=models.TextField(null=True),\n ),\n migrations.AlterField(\n model_name='product',\n name='premios',\n field=models.TextField(null=True),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from binaryninja import * import yara def get_yara_rule_path(): return get_open_filename_input("Open YARA rule", "YARA rules (*.yar *.yara)") def get_markdown_result(matches): entry_fmt = "| {} | {} | {} |\n" md_text = """# YARA - Scan results | Rule Name | Function | Strings offsets | |-----------|----------|-----------------| """ for m in matches: rule = m['rule'] func = '-' if 'funcs' in m and len(m['funcs']) > 0: func = " ".join(['[{:name}](binaryninja://?expr={:name})'.format(name=f.name) for f in m['funcs']]) # 'strings': [(81L, '$a', 'abc'), (141L, '$b', 'def')] s = " ".join(['["{}"](binaryninja://?expr=0x{:x})'.format(s[2].decode('utf-8'), s[0]) for s in m['strings']]) md_text += entry_fmt.format(rule, func, s) return md_text def plugin_search_file(bv): matches = [] def yara_callback(data): """ { 'tags': ['foo', 'bar'], 'matches': True, 'namespace': 'default', 'rule': 'my_rule', 'meta': {}, 'strings': [(81L, '$a', 'abc'), (141L, '$b', 'def')] } """ if data['matches']: funcs = [] for addr, _, _ in data['strings']: funcs += bv.get_functions_containing(addr) data['funcs'] = funcs matches.append(data) yara.CALLBACK_CONTINUE yara_path = get_yara_rule_path() # user closed message prompt if yara_path is None: return try: rules = yara.compile(filepath=yara_path.decode('utf-8')) rules.match(bv.file.original_filename, callback=yara_callback) except Exception as e: log_error("[YARA] Exception: {}".format(str(e))) show_message_box("Error", "Check logs for details", icon=MessageBoxIcon.ErrorIcon) if len(matches) > 0: bv.show_markdown_report("YARA", get_markdown_result(matches)) else: log_info("[YARA] No matches") def plugin_search_functions(bv): show_message_box("Not implemented", "This feature is not implemented yet") # TODO implement Background task maybe? PluginCommand.register("[YARA] Scan file with yara rule...", "Scan file with yara rule", plugin_search_file) # PluginCommand.register('[YARA] Scan functions with yara rule...', "Scan all functions with yara rules (might be slower)", plugin_search_functions)
normal
{ "blob_id": "56d4532b633242f34f7a6ed86a35290836861f67", "index": 4201, "step-1": "<mask token>\n\n\ndef get_markdown_result(matches):\n entry_fmt = '| {} | {} | {} |\\n'\n md_text = \"\"\"# YARA - Scan results\n\n| Rule Name | Function | Strings offsets |\n|-----------|----------|-----------------|\n\"\"\"\n for m in matches:\n rule = m['rule']\n func = '-'\n if 'funcs' in m and len(m['funcs']) > 0:\n func = ' '.join(['[{:name}](binaryninja://?expr={:name})'.\n format(name=f.name) for f in m['funcs']])\n s = ' '.join(['[\"{}\"](binaryninja://?expr=0x{:x})'.format(s[2].\n decode('utf-8'), s[0]) for s in m['strings']])\n md_text += entry_fmt.format(rule, func, s)\n return md_text\n\n\ndef plugin_search_file(bv):\n matches = []\n\n def yara_callback(data):\n \"\"\"\n\t\t\t{\n\t\t\t'tags': ['foo', 'bar'],\n\t\t\t'matches': True,\n\t\t\t'namespace': 'default',\n\t\t\t'rule': 'my_rule',\n\t\t\t'meta': {},\n\t\t\t'strings': [(81L, '$a', 'abc'), (141L, '$b', 'def')]\n\t\t\t}\n\t\t\"\"\"\n if data['matches']:\n funcs = []\n for addr, _, _ in data['strings']:\n funcs += bv.get_functions_containing(addr)\n data['funcs'] = funcs\n matches.append(data)\n yara.CALLBACK_CONTINUE\n yara_path = get_yara_rule_path()\n if yara_path is None:\n return\n try:\n rules = yara.compile(filepath=yara_path.decode('utf-8'))\n rules.match(bv.file.original_filename, callback=yara_callback)\n except Exception as e:\n log_error('[YARA] Exception: {}'.format(str(e)))\n show_message_box('Error', 'Check logs for details', icon=\n MessageBoxIcon.ErrorIcon)\n if len(matches) > 0:\n bv.show_markdown_report('YARA', get_markdown_result(matches))\n else:\n log_info('[YARA] No matches')\n\n\ndef plugin_search_functions(bv):\n show_message_box('Not implemented', 'This feature is not implemented yet')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_yara_rule_path():\n return get_open_filename_input('Open YARA rule',\n 'YARA rules (*.yar *.yara)')\n\n\ndef get_markdown_result(matches):\n entry_fmt = '| {} | {} | {} |\\n'\n md_text = \"\"\"# YARA - Scan results\n\n| Rule Name | Function | Strings offsets |\n|-----------|----------|-----------------|\n\"\"\"\n for m in matches:\n rule = m['rule']\n func = '-'\n if 'funcs' in m and len(m['funcs']) > 0:\n func = ' '.join(['[{:name}](binaryninja://?expr={:name})'.\n format(name=f.name) for f in m['funcs']])\n s = ' '.join(['[\"{}\"](binaryninja://?expr=0x{:x})'.format(s[2].\n decode('utf-8'), s[0]) for s in m['strings']])\n md_text += entry_fmt.format(rule, func, s)\n return md_text\n\n\ndef plugin_search_file(bv):\n matches = []\n\n def yara_callback(data):\n \"\"\"\n\t\t\t{\n\t\t\t'tags': ['foo', 'bar'],\n\t\t\t'matches': True,\n\t\t\t'namespace': 'default',\n\t\t\t'rule': 'my_rule',\n\t\t\t'meta': {},\n\t\t\t'strings': [(81L, '$a', 'abc'), (141L, '$b', 'def')]\n\t\t\t}\n\t\t\"\"\"\n if data['matches']:\n funcs = []\n for addr, _, _ in data['strings']:\n funcs += bv.get_functions_containing(addr)\n data['funcs'] = funcs\n matches.append(data)\n yara.CALLBACK_CONTINUE\n yara_path = get_yara_rule_path()\n if yara_path is None:\n return\n try:\n rules = yara.compile(filepath=yara_path.decode('utf-8'))\n rules.match(bv.file.original_filename, callback=yara_callback)\n except Exception as e:\n log_error('[YARA] Exception: {}'.format(str(e)))\n show_message_box('Error', 'Check logs for details', icon=\n MessageBoxIcon.ErrorIcon)\n if len(matches) > 0:\n bv.show_markdown_report('YARA', get_markdown_result(matches))\n else:\n log_info('[YARA] No matches')\n\n\ndef plugin_search_functions(bv):\n show_message_box('Not implemented', 'This feature is not implemented yet')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_yara_rule_path():\n return get_open_filename_input('Open YARA rule',\n 'YARA rules (*.yar *.yara)')\n\n\ndef get_markdown_result(matches):\n entry_fmt = '| {} | {} | {} |\\n'\n md_text = \"\"\"# YARA - Scan results\n\n| Rule Name | Function | Strings offsets |\n|-----------|----------|-----------------|\n\"\"\"\n for m in matches:\n rule = m['rule']\n func = '-'\n if 'funcs' in m and len(m['funcs']) > 0:\n func = ' '.join(['[{:name}](binaryninja://?expr={:name})'.\n format(name=f.name) for f in m['funcs']])\n s = ' '.join(['[\"{}\"](binaryninja://?expr=0x{:x})'.format(s[2].\n decode('utf-8'), s[0]) for s in m['strings']])\n md_text += entry_fmt.format(rule, func, s)\n return md_text\n\n\ndef plugin_search_file(bv):\n matches = []\n\n def yara_callback(data):\n \"\"\"\n\t\t\t{\n\t\t\t'tags': ['foo', 'bar'],\n\t\t\t'matches': True,\n\t\t\t'namespace': 'default',\n\t\t\t'rule': 'my_rule',\n\t\t\t'meta': {},\n\t\t\t'strings': [(81L, '$a', 'abc'), (141L, '$b', 'def')]\n\t\t\t}\n\t\t\"\"\"\n if data['matches']:\n funcs = []\n for addr, _, _ in data['strings']:\n funcs += bv.get_functions_containing(addr)\n data['funcs'] = funcs\n matches.append(data)\n yara.CALLBACK_CONTINUE\n yara_path = get_yara_rule_path()\n if yara_path is None:\n return\n try:\n rules = yara.compile(filepath=yara_path.decode('utf-8'))\n rules.match(bv.file.original_filename, callback=yara_callback)\n except Exception as e:\n log_error('[YARA] Exception: {}'.format(str(e)))\n show_message_box('Error', 'Check logs for details', icon=\n MessageBoxIcon.ErrorIcon)\n if len(matches) > 0:\n bv.show_markdown_report('YARA', get_markdown_result(matches))\n else:\n log_info('[YARA] No matches')\n\n\ndef plugin_search_functions(bv):\n show_message_box('Not implemented', 'This feature is not implemented yet')\n\n\nPluginCommand.register('[YARA] Scan file with yara rule...',\n 'Scan file with yara rule', plugin_search_file)\n", "step-4": "from binaryninja import *\nimport yara\n\n\ndef get_yara_rule_path():\n return get_open_filename_input('Open YARA rule',\n 'YARA rules (*.yar *.yara)')\n\n\ndef get_markdown_result(matches):\n entry_fmt = '| {} | {} | {} |\\n'\n md_text = \"\"\"# YARA - Scan results\n\n| Rule Name | Function | Strings offsets |\n|-----------|----------|-----------------|\n\"\"\"\n for m in matches:\n rule = m['rule']\n func = '-'\n if 'funcs' in m and len(m['funcs']) > 0:\n func = ' '.join(['[{:name}](binaryninja://?expr={:name})'.\n format(name=f.name) for f in m['funcs']])\n s = ' '.join(['[\"{}\"](binaryninja://?expr=0x{:x})'.format(s[2].\n decode('utf-8'), s[0]) for s in m['strings']])\n md_text += entry_fmt.format(rule, func, s)\n return md_text\n\n\ndef plugin_search_file(bv):\n matches = []\n\n def yara_callback(data):\n \"\"\"\n\t\t\t{\n\t\t\t'tags': ['foo', 'bar'],\n\t\t\t'matches': True,\n\t\t\t'namespace': 'default',\n\t\t\t'rule': 'my_rule',\n\t\t\t'meta': {},\n\t\t\t'strings': [(81L, '$a', 'abc'), (141L, '$b', 'def')]\n\t\t\t}\n\t\t\"\"\"\n if data['matches']:\n funcs = []\n for addr, _, _ in data['strings']:\n funcs += bv.get_functions_containing(addr)\n data['funcs'] = funcs\n matches.append(data)\n yara.CALLBACK_CONTINUE\n yara_path = get_yara_rule_path()\n if yara_path is None:\n return\n try:\n rules = yara.compile(filepath=yara_path.decode('utf-8'))\n rules.match(bv.file.original_filename, callback=yara_callback)\n except Exception as e:\n log_error('[YARA] Exception: {}'.format(str(e)))\n show_message_box('Error', 'Check logs for details', icon=\n MessageBoxIcon.ErrorIcon)\n if len(matches) > 0:\n bv.show_markdown_report('YARA', get_markdown_result(matches))\n else:\n log_info('[YARA] No matches')\n\n\ndef plugin_search_functions(bv):\n show_message_box('Not implemented', 'This feature is not implemented yet')\n\n\nPluginCommand.register('[YARA] Scan file with yara rule...',\n 'Scan file with yara rule', plugin_search_file)\n", "step-5": "from binaryninja import *\nimport yara\n\ndef get_yara_rule_path():\n\treturn get_open_filename_input(\"Open YARA rule\", \"YARA rules (*.yar *.yara)\")\n\ndef get_markdown_result(matches):\n\tentry_fmt = \"| {} | {} | {} |\\n\"\n\tmd_text = \"\"\"# YARA - Scan results\n\n| Rule Name | Function | Strings offsets |\n|-----------|----------|-----------------|\n\"\"\"\n\tfor m in matches:\n\t\trule = m['rule']\n\t\tfunc = '-'\n\t\tif 'funcs' in m and len(m['funcs']) > 0:\n\t\t\tfunc = \" \".join(['[{:name}](binaryninja://?expr={:name})'.format(name=f.name) for f in m['funcs']])\n\t\t\n\t\t# 'strings': [(81L, '$a', 'abc'), (141L, '$b', 'def')]\n\t\ts = \" \".join(['[\"{}\"](binaryninja://?expr=0x{:x})'.format(s[2].decode('utf-8'), s[0]) for s in m['strings']])\n\t\tmd_text += entry_fmt.format(rule, func, s)\n\treturn md_text\n\ndef plugin_search_file(bv):\n\tmatches = []\n\t\n\tdef yara_callback(data):\n\t\t\"\"\"\n\t\t\t{\n\t\t\t'tags': ['foo', 'bar'],\n\t\t\t'matches': True,\n\t\t\t'namespace': 'default',\n\t\t\t'rule': 'my_rule',\n\t\t\t'meta': {},\n\t\t\t'strings': [(81L, '$a', 'abc'), (141L, '$b', 'def')]\n\t\t\t}\n\t\t\"\"\"\n\t\tif data['matches']:\n\t\t\tfuncs = []\n\t\t\tfor addr, _, _ in data['strings']:\n\t\t\t\tfuncs += bv.get_functions_containing(addr)\n\t\t\tdata['funcs'] = funcs\n\t\t\tmatches.append(data)\n\n\t\tyara.CALLBACK_CONTINUE\n\n\tyara_path = get_yara_rule_path()\n\t\n\t# user closed message prompt\n\tif yara_path is None:\n\t\treturn\n\n\ttry:\n\t\trules = yara.compile(filepath=yara_path.decode('utf-8'))\n\t\trules.match(bv.file.original_filename, callback=yara_callback)\n\n\texcept Exception as e:\n\t\tlog_error(\"[YARA] Exception: {}\".format(str(e)))\n\t\tshow_message_box(\"Error\", \"Check logs for details\", icon=MessageBoxIcon.ErrorIcon)\n\n\tif len(matches) > 0:\n\t\tbv.show_markdown_report(\"YARA\", get_markdown_result(matches))\n\telse:\n\t\tlog_info(\"[YARA] No matches\")\n\ndef plugin_search_functions(bv):\n\tshow_message_box(\"Not implemented\", \"This feature is not implemented yet\")\n\t # TODO implement Background task maybe?\n\nPluginCommand.register(\"[YARA] Scan file with yara rule...\", \"Scan file with yara rule\", plugin_search_file)\n# PluginCommand.register('[YARA] Scan functions with yara rule...', \"Scan all functions with yara rules (might be slower)\", plugin_search_functions)\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
import os from pathlib import Path DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("PLOTTER_ROOT", "~/.plotter/mainnet"))).resolve()
normal
{ "blob_id": "3a8164299fa51b7d781f2b80d77cfba05b5f6915", "index": 4157, "step-1": "<mask token>\n", "step-2": "<mask token>\nDEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv('PLOTTER_ROOT',\n '~/.plotter/mainnet'))).resolve()\n", "step-3": "import os\nfrom pathlib import Path\nDEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv('PLOTTER_ROOT',\n '~/.plotter/mainnet'))).resolve()\n", "step-4": "import os\nfrom pathlib import Path\n\nDEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv(\"PLOTTER_ROOT\", \"~/.plotter/mainnet\"))).resolve()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
list1 = [('北京大洋路', '红蛋', '散框批发', '120-125', '44', '落', '8车'), ('北京回龙观', '红蛋', '散框批发', '124', '44', '落', ''), ('北京石门', '红蛋', '散框批发', '124', '44', '落', '')] mysql_data = [] import numpy as np for l in list1: array = np.array(l) tolist = array.tolist() tolist.insert(0, 'ppp') tolist.append('lll') mysql_data.append(tolist) print(mysql_data) import requests headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36' } get = requests.get('http://www.baidu.com', headers=headers) print(get.text)
normal
{ "blob_id": "896d836ede533bad24f4077e5ba964105d96bf7a", "index": 9485, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor l in list1:\n array = np.array(l)\n tolist = array.tolist()\n tolist.insert(0, 'ppp')\n tolist.append('lll')\n mysql_data.append(tolist)\nprint(mysql_data)\n<mask token>\nprint(get.text)\n", "step-3": "list1 = [('北京大洋路', '红蛋', '散框批发', '120-125', '44', '落', '8车'), ('北京回龙观',\n '红蛋', '散框批发', '124', '44', '落', ''), ('北京石门', '红蛋', '散框批发', '124', '44',\n '落', '')]\nmysql_data = []\n<mask token>\nfor l in list1:\n array = np.array(l)\n tolist = array.tolist()\n tolist.insert(0, 'ppp')\n tolist.append('lll')\n mysql_data.append(tolist)\nprint(mysql_data)\n<mask token>\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'\n }\nget = requests.get('http://www.baidu.com', headers=headers)\nprint(get.text)\n", "step-4": "list1 = [('北京大洋路', '红蛋', '散框批发', '120-125', '44', '落', '8车'), ('北京回龙观',\n '红蛋', '散框批发', '124', '44', '落', ''), ('北京石门', '红蛋', '散框批发', '124', '44',\n '落', '')]\nmysql_data = []\nimport numpy as np\nfor l in list1:\n array = np.array(l)\n tolist = array.tolist()\n tolist.insert(0, 'ppp')\n tolist.append('lll')\n mysql_data.append(tolist)\nprint(mysql_data)\nimport requests\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'\n }\nget = requests.get('http://www.baidu.com', headers=headers)\nprint(get.text)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
source = open("input.txt", "r") total = 0 def calculateWeight( weight ): fuel = calculateFuel(weight) if fuel > 0: sum = fuel + calculateWeight(fuel) return sum else: return max(0, fuel) def calculateFuel ( weight ): return weight // 3 -2 for line in source.readlines(): total += calculateWeight(int(line)) print(total)
normal
{ "blob_id": "bea1a5bc9c92d095a2f187a4c06d18d0a939f233", "index": 3376, "step-1": "<mask token>\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef calculateWeight(weight):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef calculateWeight(weight):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\nfor line in source.readlines():\n total += calculateWeight(int(line))\nprint(total)\n", "step-4": "source = open('input.txt', 'r')\ntotal = 0\n\n\ndef calculateWeight(weight):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\nfor line in source.readlines():\n total += calculateWeight(int(line))\nprint(total)\n", "step-5": "source = open(\"input.txt\", \"r\")\ntotal = 0\n\ndef calculateWeight( weight ):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel ( weight ):\n return weight // 3 -2\n\nfor line in source.readlines():\n total += calculateWeight(int(line))\n\nprint(total)", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import os import sqlite3 import operator from collections import OrderedDict import matplotlib.pyplot as plt def parse(url): try: parsed_url_components = url.split('//') sublevel_split = parsed_url_components[1].split('/', 1) domain = sublevel_split[0].replace("www.", "") return domain except IndexError: print("URL format error!") def analyze(results): prompt = input("[.] Type <c> to print or <p> to plot\n[>] ") if prompt == "c": for site, count in list(sites_count_sorted.items()): print(site, count) elif prompt == "p": plt.bar(list(range(len(results))), list(results.values()), align='edge') plt.xticks(rotation=45) plt.xticks(list(range(len(results))), list(results.keys())) plt.show() else: print("[.] Uh?") quit()
normal
{ "blob_id": "c74fc99bf8582fd83c312f27dfffbe894a2c8c1b", "index": 3431, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef parse(url):\n try:\n parsed_url_components = url.split('//')\n sublevel_split = parsed_url_components[1].split('/', 1)\n domain = sublevel_split[0].replace('www.', '')\n return domain\n except IndexError:\n print('URL format error!')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef parse(url):\n try:\n parsed_url_components = url.split('//')\n sublevel_split = parsed_url_components[1].split('/', 1)\n domain = sublevel_split[0].replace('www.', '')\n return domain\n except IndexError:\n print('URL format error!')\n\n\ndef analyze(results):\n prompt = input('[.] Type <c> to print or <p> to plot\\n[>] ')\n if prompt == 'c':\n for site, count in list(sites_count_sorted.items()):\n print(site, count)\n elif prompt == 'p':\n plt.bar(list(range(len(results))), list(results.values()), align='edge'\n )\n plt.xticks(rotation=45)\n plt.xticks(list(range(len(results))), list(results.keys()))\n plt.show()\n else:\n print('[.] Uh?')\n quit()\n", "step-4": "import os\nimport sqlite3\nimport operator\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\n\n\ndef parse(url):\n try:\n parsed_url_components = url.split('//')\n sublevel_split = parsed_url_components[1].split('/', 1)\n domain = sublevel_split[0].replace('www.', '')\n return domain\n except IndexError:\n print('URL format error!')\n\n\ndef analyze(results):\n prompt = input('[.] Type <c> to print or <p> to plot\\n[>] ')\n if prompt == 'c':\n for site, count in list(sites_count_sorted.items()):\n print(site, count)\n elif prompt == 'p':\n plt.bar(list(range(len(results))), list(results.values()), align='edge'\n )\n plt.xticks(rotation=45)\n plt.xticks(list(range(len(results))), list(results.keys()))\n plt.show()\n else:\n print('[.] Uh?')\n quit()\n", "step-5": "import os\nimport sqlite3\nimport operator\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\n\ndef parse(url):\n\ttry:\n\t\tparsed_url_components = url.split('//')\n\t\tsublevel_split = parsed_url_components[1].split('/', 1)\n\t\tdomain = sublevel_split[0].replace(\"www.\", \"\")\n\t\treturn domain\n\texcept IndexError:\n\t\tprint(\"URL format error!\")\n\ndef analyze(results):\n\n\tprompt = input(\"[.] Type <c> to print or <p> to plot\\n[>] \")\n\n\tif prompt == \"c\":\n\t\tfor site, count in list(sites_count_sorted.items()):\n\t\t\tprint(site, count)\n\telif prompt == \"p\":\n\t\tplt.bar(list(range(len(results))), list(results.values()), align='edge')\n\t\tplt.xticks(rotation=45)\n\t\tplt.xticks(list(range(len(results))), list(results.keys()))\n\t\tplt.show()\n\telse:\n\t\tprint(\"[.] Uh?\")\n\t\tquit()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/python3 # -*- coding: utf-8 -*- import random a = random.sample(range(100), 10) print("All items: {}".format(a)) it = iter(a) # call a.__iter__() print("Num01: {}".format(next(it))) # call it.__next__() print("Num02: {}".format(next(it))) print("Num03: {}".format(it.__next__())) it = iter(a) i = 1 while True: try: x = next(it) print("Num{:02d}: {}".format(i, x)) except StopIteration: break i += 1 class Node(): def __init__(self, value): self._value = value self._children = [] def __repr__(self): return 'Node({!r})'.format(self._value) def add_child(self, node): self._children.append(node) def __iter__(self): return iter(self._children) root = Node(0) root.add_child(Node(1)) root.add_child(Node(2)) for x in root: print(x) class Node2(): def __init__(self, value): self._value = value self._children = [] self._idx = 0 def __repr__(self): return 'Node2({!r})'.format(self._value) def add_child(self, node): self._children.append(node) def __iter__(self): self._idx = 0 return self # 返回自己, 说明自己是迭代器,须实现__next__() def __next__(self): if self._idx < len(self._children): idx = self._idx self._idx += 1 return self._children[idx] raise StopIteration root = Node2(10) root.add_child(Node2(11)) root.add_child(Node2(22)) for x in root: print(x) class Node3(): def __init__(self, value): self._value = value self._children = [] self._idx = 0 def __repr__(self): return 'Node3({!r})'.format(self._value) def add_child(self, node): self._children.append(node) def has_children(self): return len(self._children) != 0 def __iter__(self): self._idx = 0 return self # 返回自己, 说明自己是迭代器,须实现__next__() def __next__(self): if self._idx < len(self._children): idx = self._idx self._idx += 1 return self._children[idx] raise StopIteration def recur_show(root): print(root) if root.has_children(): for node in root: recur_show(node) def recur_show2(root): if root.has_children(): for node in root: recur_show2(node) print(root) # 0 # # 10 20 30 # # 11 12 31 root = Node3(0) c1 = Node3(10) c2 = Node3(20) c3 = Node3(30) c11 = Node3(11) c12 = Node3(12) c31 = Node3(31) root.add_child(c1) root.add_child(c2) root.add_child(c3) c1.add_child(c11) c1.add_child(c12) c3.add_child(c31) print("==================") recur_show(root) print("==================") recur_show2(root)
normal
{ "blob_id": "f5513bea4ca5f4c2ac80c4bf537a264a4052d1e9", "index": 8866, "step-1": "<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n <mask token>\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n\n def __repr__(self):\n return 'Node({!r})'.format(self._value)\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n", "step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport random\n\na = random.sample(range(100), 10)\nprint(\"All items: {}\".format(a))\n\nit = iter(a) # call a.__iter__()\n\nprint(\"Num01: {}\".format(next(it))) # call it.__next__()\nprint(\"Num02: {}\".format(next(it)))\nprint(\"Num03: {}\".format(it.__next__()))\n\nit = iter(a)\ni = 1\nwhile True:\n try:\n x = next(it)\n print(\"Num{:02d}: {}\".format(i, x))\n except StopIteration:\n break\n i += 1\n\n\nclass Node():\n def __init__(self, value):\n self._value = value\n self._children = []\n\n def __repr__(self):\n return 'Node({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n return iter(self._children)\n \nroot = Node(0)\nroot.add_child(Node(1))\nroot.add_child(Node(2))\n\nfor x in root:\n print(x)\n\nclass Node2():\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self # 返回自己, 说明自己是迭代器,须实现__next__()\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\nroot = Node2(10)\nroot.add_child(Node2(11))\nroot.add_child(Node2(22))\n\nfor x in root:\n print(x)\n\nclass Node3():\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self # 返回自己, 说明自己是迭代器,须实现__next__()\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\ndef recur_show(root):\n print(root)\n if root.has_children():\n for node in root:\n recur_show(node)\n\ndef recur_show2(root):\n if root.has_children():\n for node in root:\n recur_show2(node)\n print(root)\n\n# 0\n# \n# 10 20 30\n# \n# 11 12 31\n\nroot = Node3(0)\nc1 = Node3(10)\nc2 = Node3(20)\nc3 = Node3(30)\nc11 = Node3(11)\nc12 = Node3(12)\nc31 = Node3(31)\nroot.add_child(c1)\nroot.add_child(c2)\nroot.add_child(c3)\nc1.add_child(c11)\nc1.add_child(c12)\nc3.add_child(c31)\n\nprint(\"==================\")\nrecur_show(root)\nprint(\"==================\")\nrecur_show2(root)\n", "step-ids": [ 12, 13, 15, 16, 24 ] }
[ 12, 13, 15, 16, 24 ]
from PyInstaller.utils.hooks import collect_data_files hiddenimports = ['sklearn.utils.sparsetools._graph_validation', 'sklearn.utils.sparsetools._graph_tools', 'sklearn.utils.lgamma', 'sklearn.utils.weight_vector'] datas = collect_data_files('sklearn')
normal
{ "blob_id": "12396130dc52866cc54d6dc701cf0f9a41a168b6", "index": 8351, "step-1": "<mask token>\n", "step-2": "<mask token>\nhiddenimports = ['sklearn.utils.sparsetools._graph_validation',\n 'sklearn.utils.sparsetools._graph_tools', 'sklearn.utils.lgamma',\n 'sklearn.utils.weight_vector']\ndatas = collect_data_files('sklearn')\n", "step-3": "from PyInstaller.utils.hooks import collect_data_files\nhiddenimports = ['sklearn.utils.sparsetools._graph_validation',\n 'sklearn.utils.sparsetools._graph_tools', 'sklearn.utils.lgamma',\n 'sklearn.utils.weight_vector']\ndatas = collect_data_files('sklearn')\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import versatileimagefield.fields class Migration(migrations.Migration): dependencies = [ ('venue', '0001_initial'), ] operations = [ migrations.CreateModel( name='Images', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('image', versatileimagefield.fields.VersatileImageField(upload_to=b'images', verbose_name=b'Image')), ('created_at', models.DateTimeField(help_text=b'Date when category created.', verbose_name=b'Created At', auto_now_add=True)), ('updated_at', models.DateTimeField(help_text=b'Date when category updated.', verbose_name=b'Updated At', auto_now=True)), ('category', models.ForeignKey(related_name='images', blank=True, to='venue.Category', null=True)), ], ), ]
normal
{ "blob_id": "09bf7460b2c928bf6e1346d9d1e2e1276540c080", "index": 3099, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('venue', '0001_initial')]\n operations = [migrations.CreateModel(name='Images', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('image', versatileimagefield.fields.\n VersatileImageField(upload_to=b'images', verbose_name=b'Image')), (\n 'created_at', models.DateTimeField(help_text=\n b'Date when category created.', verbose_name=b'Created At',\n auto_now_add=True)), ('updated_at', models.DateTimeField(help_text=\n b'Date when category updated.', verbose_name=b'Updated At',\n auto_now=True)), ('category', models.ForeignKey(related_name=\n 'images', blank=True, to='venue.Category', null=True))])]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\nimport versatileimagefield.fields\n\n\nclass Migration(migrations.Migration):\n dependencies = [('venue', '0001_initial')]\n operations = [migrations.CreateModel(name='Images', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('image', versatileimagefield.fields.\n VersatileImageField(upload_to=b'images', verbose_name=b'Image')), (\n 'created_at', models.DateTimeField(help_text=\n b'Date when category created.', verbose_name=b'Created At',\n auto_now_add=True)), ('updated_at', models.DateTimeField(help_text=\n b'Date when category updated.', verbose_name=b'Updated At',\n auto_now=True)), ('category', models.ForeignKey(related_name=\n 'images', blank=True, to='venue.Category', null=True))])]\n", "step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport versatileimagefield.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('venue', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Images',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('image', versatileimagefield.fields.VersatileImageField(upload_to=b'images', verbose_name=b'Image')),\n ('created_at', models.DateTimeField(help_text=b'Date when category created.', verbose_name=b'Created At', auto_now_add=True)),\n ('updated_at', models.DateTimeField(help_text=b'Date when category updated.', verbose_name=b'Updated At', auto_now=True)),\n ('category', models.ForeignKey(related_name='images', blank=True, to='venue.Category', null=True)),\n ],\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from nltk.stem import SnowballStemmer import pandas as pd from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer import re import numpy as np trainname = 'train_data.csv' testname = 'testval_data.csv' wnl = WordNetLemmatizer() stemmer = SnowballStemmer("english") # stemmer = PorterStemmer() stoplist = set(stopwords.words("english")) # remove all the punctuation, whitespace and stop words, convert all the disparities of a word into their normalized form. def process_review(review): review = re.sub(r'[^a-zA-Z]', ' ', review) review = review.lower() # texts = [stemmer.stem(word) for word in review.lower().split() if word not in stoplist] texts = [wnl.lemmatize(word) for word in review.lower().split() if word not in stoplist] # texts = [word for word in review.lower().split() if word not in stoplist] return texts # Our list of functions to apply. transform_functions = [ lambda x: len(x), lambda x: x.count(" "), lambda x: x.count("."), lambda x: x.count("!"), lambda x: x.count("?"), lambda x: len(x) / (x.count(" ") + 1), lambda x: x.count(" ") / (x.count(".") + 1), lambda x: len(re.findall("\d", x)), lambda x: len(re.findall("[A-Z]", x)), ] # Apply each function and put the results into a list. columns = [] for func in transform_functions: columns.append(reviews["text"].apply(func)) # Convert the meta features to a numpy array. meta = np.asarray(columns).T # TfidfVectorizer tfv = TfidfVectorizer(analyzer='word',min_df=3,ngram_range=(1, 2), smooth_idf=1,stop_words=None, strip_accents=None, sublinear_tf=1,token_pattern=r'\w{1,}', use_idf=1).fit(x1) # CountVectorizer train = pd.read_csv(trainname) test = pd.read_csv(testname) x1 = train.loc[:, 'text'] x2 = test.loc[:, 'text'] cvt = CountVectorizer(analyzer=process_review).fit(x1) tx1 = cvt.transform(x1) tx2 = cvt.transform(x2) # np.savetxt("x.txt", tx.toarray(), delimiter=",") y1 = train.loc[:, 'stars']
normal
{ "blob_id": "658532e1b81b025b8295bbf468dc01ecf12b922a", "index": 6463, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef process_review(review):\n review = re.sub('[^a-zA-Z]', ' ', review)\n review = review.lower()\n texts = [wnl.lemmatize(word) for word in review.lower().split() if word\n not in stoplist]\n return texts\n\n\n<mask token>\nfor func in transform_functions:\n columns.append(reviews['text'].apply(func))\n<mask token>\n", "step-3": "<mask token>\ntrainname = 'train_data.csv'\ntestname = 'testval_data.csv'\nwnl = WordNetLemmatizer()\nstemmer = SnowballStemmer('english')\nstoplist = set(stopwords.words('english'))\n\n\ndef process_review(review):\n review = re.sub('[^a-zA-Z]', ' ', review)\n review = review.lower()\n texts = [wnl.lemmatize(word) for word in review.lower().split() if word\n not in stoplist]\n return texts\n\n\ntransform_functions = [lambda x: len(x), lambda x: x.count(' '), lambda x:\n x.count('.'), lambda x: x.count('!'), lambda x: x.count('?'), lambda x:\n len(x) / (x.count(' ') + 1), lambda x: x.count(' ') / (x.count('.') + 1\n ), lambda x: len(re.findall('\\\\d', x)), lambda x: len(re.findall(\n '[A-Z]', x))]\ncolumns = []\nfor func in transform_functions:\n columns.append(reviews['text'].apply(func))\nmeta = np.asarray(columns).T\ntfv = TfidfVectorizer(analyzer='word', min_df=3, ngram_range=(1, 2),\n smooth_idf=1, stop_words=None, strip_accents=None, sublinear_tf=1,\n token_pattern='\\\\w{1,}', use_idf=1).fit(x1)\ntrain = pd.read_csv(trainname)\ntest = pd.read_csv(testname)\nx1 = train.loc[:, 'text']\nx2 = test.loc[:, 'text']\ncvt = CountVectorizer(analyzer=process_review).fit(x1)\ntx1 = cvt.transform(x1)\ntx2 = cvt.transform(x2)\ny1 = train.loc[:, 'stars']\n", "step-4": "from nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem import SnowballStemmer\nimport pandas as pd\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport re\nimport numpy as np\ntrainname = 'train_data.csv'\ntestname = 'testval_data.csv'\nwnl = WordNetLemmatizer()\nstemmer = SnowballStemmer('english')\nstoplist = set(stopwords.words('english'))\n\n\ndef process_review(review):\n review = re.sub('[^a-zA-Z]', ' ', review)\n review = review.lower()\n texts = [wnl.lemmatize(word) for word in review.lower().split() if word\n not in stoplist]\n return texts\n\n\ntransform_functions = [lambda x: len(x), lambda x: x.count(' '), lambda x:\n x.count('.'), lambda x: x.count('!'), lambda x: x.count('?'), lambda x:\n len(x) / (x.count(' ') + 1), lambda x: x.count(' ') / (x.count('.') + 1\n ), lambda x: len(re.findall('\\\\d', x)), lambda x: len(re.findall(\n '[A-Z]', x))]\ncolumns = []\nfor func in transform_functions:\n columns.append(reviews['text'].apply(func))\nmeta = np.asarray(columns).T\ntfv = TfidfVectorizer(analyzer='word', min_df=3, ngram_range=(1, 2),\n smooth_idf=1, stop_words=None, strip_accents=None, sublinear_tf=1,\n token_pattern='\\\\w{1,}', use_idf=1).fit(x1)\ntrain = pd.read_csv(trainname)\ntest = pd.read_csv(testname)\nx1 = train.loc[:, 'text']\nx2 = test.loc[:, 'text']\ncvt = CountVectorizer(analyzer=process_review).fit(x1)\ntx1 = cvt.transform(x1)\ntx2 = cvt.transform(x2)\ny1 = train.loc[:, 'stars']\n", "step-5": "from nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem import SnowballStemmer\nimport pandas as pd\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport re\nimport numpy as np\ntrainname = 'train_data.csv'\ntestname = 'testval_data.csv'\nwnl = WordNetLemmatizer()\nstemmer = SnowballStemmer(\"english\")\n# stemmer = PorterStemmer()\nstoplist = set(stopwords.words(\"english\"))\n\n# remove all the punctuation, whitespace and stop words, convert all the disparities of a word into their normalized form.\ndef process_review(review):\n review = re.sub(r'[^a-zA-Z]', ' ', review)\n review = review.lower()\n # texts = [stemmer.stem(word) for word in review.lower().split() if word not in stoplist]\n texts = [wnl.lemmatize(word) for word in review.lower().split() if word not in stoplist]\n # texts = [word for word in review.lower().split() if word not in stoplist]\n return texts\n\n# Our list of functions to apply.\ntransform_functions = [\n lambda x: len(x),\n lambda x: x.count(\" \"),\n lambda x: x.count(\".\"),\n lambda x: x.count(\"!\"),\n lambda x: x.count(\"?\"),\n lambda x: len(x) / (x.count(\" \") + 1),\n lambda x: x.count(\" \") / (x.count(\".\") + 1),\n lambda x: len(re.findall(\"\\d\", x)),\n lambda x: len(re.findall(\"[A-Z]\", x)),\n]\n\n# Apply each function and put the results into a list.\ncolumns = []\nfor func in transform_functions:\n columns.append(reviews[\"text\"].apply(func))\n\n# Convert the meta features to a numpy array.\nmeta = np.asarray(columns).T\n\n\n# TfidfVectorizer\ntfv = TfidfVectorizer(analyzer='word',min_df=3,ngram_range=(1, 2), smooth_idf=1,stop_words=None, strip_accents=None, sublinear_tf=1,token_pattern=r'\\w{1,}', use_idf=1).fit(x1)\n\n\n# CountVectorizer\ntrain = pd.read_csv(trainname)\ntest = pd.read_csv(testname)\nx1 = train.loc[:, 'text']\nx2 = test.loc[:, 'text']\ncvt = CountVectorizer(analyzer=process_review).fit(x1)\ntx1 = cvt.transform(x1)\ntx2 = cvt.transform(x2)\n# np.savetxt(\"x.txt\", tx.toarray(), delimiter=\",\")\ny1 = train.loc[:, 'stars']\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
""" You are given pre-order traversal with a slight modification. It includes null pointers when a particular node has nil left/right child. Reconstruct the binary tree with this information. Ex. [H, B, F, None, None, E, A, None, None, None, C, None, D, None, G, I, None, None, None] H / \ B C / \ \ F E D / \ A G / I """ # time: O(n) def contruct_tree(pre_order, index=0): index += 1 if index >= len(pre_order): raise IndexError('wtf is wrong with you?') root = pre_order[index] if root is None: return (None, index) node = BST(root) node.left, index = construct(pre_order, index) node.right, index = construct(pre_order, index) return (node, index) # my solution without recursion # works? def contruct_tree(pre_order): tree = BST(pre_order[0]) curr = tree stack = [] i = 0 while i < len(pre_order)-1: if curr is not None: curr.left = L[i+1] stack.append(curr) cur = curr.left else: curr = stack.pop() curr.right = L[i+1] cur = curr.right return tree
normal
{ "blob_id": "3aee336956ac6f962c34f51a27dc4abebf2cc7c8", "index": 8474, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef contruct_tree(pre_order, index=0):\n index += 1\n if index >= len(pre_order):\n raise IndexError('wtf is wrong with you?')\n root = pre_order[index]\n if root is None:\n return None, index\n node = BST(root)\n node.left, index = construct(pre_order, index)\n node.right, index = construct(pre_order, index)\n return node, index\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef contruct_tree(pre_order, index=0):\n index += 1\n if index >= len(pre_order):\n raise IndexError('wtf is wrong with you?')\n root = pre_order[index]\n if root is None:\n return None, index\n node = BST(root)\n node.left, index = construct(pre_order, index)\n node.right, index = construct(pre_order, index)\n return node, index\n\n\ndef contruct_tree(pre_order):\n tree = BST(pre_order[0])\n curr = tree\n stack = []\n i = 0\n while i < len(pre_order) - 1:\n if curr is not None:\n curr.left = L[i + 1]\n stack.append(curr)\n cur = curr.left\n else:\n curr = stack.pop()\n curr.right = L[i + 1]\n cur = curr.right\n return tree\n", "step-4": "\"\"\"\nYou are given pre-order traversal with a slight modification. \nIt includes null pointers when a particular node has nil left/right child. \nReconstruct the binary tree with this information.\n\nEx. [H, B, F, None, None, E, A, None, None, None, C, None, D, None, G, I, None, None, None]\n\n H\n / \\\n B C\n / \\ \\\nF E D\n / \\\n A G\n /\n I\n\"\"\"\n\n# time: O(n)\ndef contruct_tree(pre_order, index=0):\n index += 1\n if index >= len(pre_order):\n raise IndexError('wtf is wrong with you?')\n\n root = pre_order[index]\n if root is None:\n return (None, index)\n\n\n node = BST(root)\n node.left, index = construct(pre_order, index)\n node.right, index = construct(pre_order, index)\n\n return (node, index)\n\n\n# my solution without recursion\n# works?\n\ndef contruct_tree(pre_order):\n tree = BST(pre_order[0])\n curr = tree\n stack = []\n i = 0\n while i < len(pre_order)-1:\n if curr is not None:\n curr.left = L[i+1]\n stack.append(curr)\n cur = curr.left\n else:\n curr = stack.pop()\n curr.right = L[i+1]\n cur = curr.right\n\n return tree\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Your code here d = dict() count = 0 fave_fast_food = input("Fave fast food restaurant: ") for i in range(1, 11): if fave_fast_food in d: d[fave_fast_food] += 1 else: d[fave_fast_food] = 1 count+= 1 fave_fast_food = input("Fave fast food restaurant: ") for k,v in d.items(): print('Fast Food Resturants that are ' + k + ": " + str(v)) maximum = max(d, key=d.get) # Just use 'min' instead of 'max' for minimum. print("The fast food restaurant " + maximum + " has this many votes:", d[maximum])
normal
{ "blob_id": "a494b3469682a909b76e67e1b78ad25affe99f24", "index": 8688, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(1, 11):\n if fave_fast_food in d:\n d[fave_fast_food] += 1\n else:\n d[fave_fast_food] = 1\n count += 1\n fave_fast_food = input('Fave fast food restaurant: ')\nfor k, v in d.items():\n print('Fast Food Resturants that are ' + k + ': ' + str(v))\n<mask token>\nprint('The fast food restaurant ' + maximum + ' has this many votes:', d[\n maximum])\n", "step-3": "d = dict()\ncount = 0\nfave_fast_food = input('Fave fast food restaurant: ')\nfor i in range(1, 11):\n if fave_fast_food in d:\n d[fave_fast_food] += 1\n else:\n d[fave_fast_food] = 1\n count += 1\n fave_fast_food = input('Fave fast food restaurant: ')\nfor k, v in d.items():\n print('Fast Food Resturants that are ' + k + ': ' + str(v))\nmaximum = max(d, key=d.get)\nprint('The fast food restaurant ' + maximum + ' has this many votes:', d[\n maximum])\n", "step-4": "# Your code here\nd = dict()\ncount = 0\nfave_fast_food = input(\"Fave fast food restaurant: \")\n\nfor i in range(1, 11):\n if fave_fast_food in d:\n d[fave_fast_food] += 1\n else:\n d[fave_fast_food] = 1\n count+= 1\n fave_fast_food = input(\"Fave fast food restaurant: \")\n\nfor k,v in d.items():\n print('Fast Food Resturants that are ' + k + \": \" + str(v))\n\nmaximum = max(d, key=d.get) # Just use 'min' instead of 'max' for minimum.\nprint(\"The fast food restaurant \" + maximum + \" has this many votes:\", d[maximum])", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from zipfile import ZipFile import reference_new_stdds import reader import os def runall(path): print("==========================") """get the current path """ abs_file_path = os.path.abspath(__file__) parent_dir = os.path.dirname(abs_file_path) parent_dir = os.path.dirname(parent_dir) """ path that stores xml files""" xml_path = parent_dir.replace("\\", "/") + "/Examples/xmls/"+path # print(xml_path) """ call RIE module""" ref_list = reference_new_stdds.get_contri_info(xml_path) reference_new_stdds.write_excel(ref_list) """ call reader module""" reader.write_csv(path) # Create a zip file with ZipFile(parent_dir.replace("\\", "/")+'/Output/xmlOutput/XMLOutput.zip', 'w') as zipObj: # # Add multiple files to the zip zipObj.write(parent_dir.replace("\\", "/")+'/Output/xmlOutput/TNC_Taxonomic_name_usage_XmlOutput.csv', "TNC_Taxonomic_name_usage_XmlOutput.csv") zipObj.write(parent_dir.replace("\\", "/")+'/Output/xmlOutput/TNC_Typification_XmlOutput.csv', "TNC_Typification_XmlOutput.csv") zipObj.write(parent_dir.replace("\\", "/")+'/Output/xmlOutput/{}_XmlOutput.csv'.format(path.replace(".xml","")), "{}_XmlOutput.csv".format(path.replace(".xml",""))) zipObj.write(parent_dir.replace("\\", "/") + '/Output/xmlOutput/BibliographicResource.csv', "BibliographicResource.csv") runall("A_new_genus_and_two_new_species_of_miniature_clingfishes.xml")
normal
{ "blob_id": "1158ab95ac67d62459284267a8cc9f587daf89b1", "index": 9329, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef runall(path):\n print('==========================')\n \"\"\"get the current path \"\"\"\n abs_file_path = os.path.abspath(__file__)\n parent_dir = os.path.dirname(abs_file_path)\n parent_dir = os.path.dirname(parent_dir)\n \"\"\" path that stores xml files\"\"\"\n xml_path = parent_dir.replace('\\\\', '/') + '/Examples/xmls/' + path\n \"\"\" call RIE module\"\"\"\n ref_list = reference_new_stdds.get_contri_info(xml_path)\n reference_new_stdds.write_excel(ref_list)\n \"\"\" call reader module\"\"\"\n reader.write_csv(path)\n with ZipFile(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/XMLOutput.zip', 'w') as zipObj:\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/TNC_Taxonomic_name_usage_XmlOutput.csv',\n 'TNC_Taxonomic_name_usage_XmlOutput.csv')\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/TNC_Typification_XmlOutput.csv',\n 'TNC_Typification_XmlOutput.csv')\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/{}_XmlOutput.csv'.format(path.replace('.xml',\n '')), '{}_XmlOutput.csv'.format(path.replace('.xml', '')))\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/BibliographicResource.csv',\n 'BibliographicResource.csv')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef runall(path):\n print('==========================')\n \"\"\"get the current path \"\"\"\n abs_file_path = os.path.abspath(__file__)\n parent_dir = os.path.dirname(abs_file_path)\n parent_dir = os.path.dirname(parent_dir)\n \"\"\" path that stores xml files\"\"\"\n xml_path = parent_dir.replace('\\\\', '/') + '/Examples/xmls/' + path\n \"\"\" call RIE module\"\"\"\n ref_list = reference_new_stdds.get_contri_info(xml_path)\n reference_new_stdds.write_excel(ref_list)\n \"\"\" call reader module\"\"\"\n reader.write_csv(path)\n with ZipFile(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/XMLOutput.zip', 'w') as zipObj:\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/TNC_Taxonomic_name_usage_XmlOutput.csv',\n 'TNC_Taxonomic_name_usage_XmlOutput.csv')\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/TNC_Typification_XmlOutput.csv',\n 'TNC_Typification_XmlOutput.csv')\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/{}_XmlOutput.csv'.format(path.replace('.xml',\n '')), '{}_XmlOutput.csv'.format(path.replace('.xml', '')))\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/BibliographicResource.csv',\n 'BibliographicResource.csv')\n\n\nrunall('A_new_genus_and_two_new_species_of_miniature_clingfishes.xml')\n", "step-4": "from zipfile import ZipFile\nimport reference_new_stdds\nimport reader\nimport os\n\n\ndef runall(path):\n print('==========================')\n \"\"\"get the current path \"\"\"\n abs_file_path = os.path.abspath(__file__)\n parent_dir = os.path.dirname(abs_file_path)\n parent_dir = os.path.dirname(parent_dir)\n \"\"\" path that stores xml files\"\"\"\n xml_path = parent_dir.replace('\\\\', '/') + '/Examples/xmls/' + path\n \"\"\" call RIE module\"\"\"\n ref_list = reference_new_stdds.get_contri_info(xml_path)\n reference_new_stdds.write_excel(ref_list)\n \"\"\" call reader module\"\"\"\n reader.write_csv(path)\n with ZipFile(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/XMLOutput.zip', 'w') as zipObj:\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/TNC_Taxonomic_name_usage_XmlOutput.csv',\n 'TNC_Taxonomic_name_usage_XmlOutput.csv')\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/TNC_Typification_XmlOutput.csv',\n 'TNC_Typification_XmlOutput.csv')\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/{}_XmlOutput.csv'.format(path.replace('.xml',\n '')), '{}_XmlOutput.csv'.format(path.replace('.xml', '')))\n zipObj.write(parent_dir.replace('\\\\', '/') +\n '/Output/xmlOutput/BibliographicResource.csv',\n 'BibliographicResource.csv')\n\n\nrunall('A_new_genus_and_two_new_species_of_miniature_clingfishes.xml')\n", "step-5": "from zipfile import ZipFile\n\nimport reference_new_stdds\n\nimport reader\nimport os\n\ndef runall(path):\n print(\"==========================\")\n \"\"\"get the current path \"\"\"\n abs_file_path = os.path.abspath(__file__)\n parent_dir = os.path.dirname(abs_file_path)\n parent_dir = os.path.dirname(parent_dir)\n\n \"\"\" path that stores xml files\"\"\"\n xml_path = parent_dir.replace(\"\\\\\", \"/\") + \"/Examples/xmls/\"+path\n # print(xml_path)\n\n \"\"\" call RIE module\"\"\"\n ref_list = reference_new_stdds.get_contri_info(xml_path)\n reference_new_stdds.write_excel(ref_list)\n \"\"\" call reader module\"\"\"\n reader.write_csv(path)\n\n # Create a zip file\n with ZipFile(parent_dir.replace(\"\\\\\", \"/\")+'/Output/xmlOutput/XMLOutput.zip', 'w') as zipObj:\n\n # # Add multiple files to the zip\n zipObj.write(parent_dir.replace(\"\\\\\", \"/\")+'/Output/xmlOutput/TNC_Taxonomic_name_usage_XmlOutput.csv', \"TNC_Taxonomic_name_usage_XmlOutput.csv\")\n zipObj.write(parent_dir.replace(\"\\\\\", \"/\")+'/Output/xmlOutput/TNC_Typification_XmlOutput.csv', \"TNC_Typification_XmlOutput.csv\")\n zipObj.write(parent_dir.replace(\"\\\\\", \"/\")+'/Output/xmlOutput/{}_XmlOutput.csv'.format(path.replace(\".xml\",\"\")), \"{}_XmlOutput.csv\".format(path.replace(\".xml\",\"\")))\n zipObj.write(parent_dir.replace(\"\\\\\", \"/\") + '/Output/xmlOutput/BibliographicResource.csv', \"BibliographicResource.csv\")\n\n\n\nrunall(\"A_new_genus_and_two_new_species_of_miniature_clingfishes.xml\")\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django.urls import path from . import views # url configuration for view.index function app_name = 'movies' urlpatterns = [ path('', views.index, name='index'), # represents a root of this app path('<int:movie_id>', views.detail, name='detail') ]
normal
{ "blob_id": "5aaac757b766b0143ca3ea54d8fc4b8936160ec7", "index": 5090, "step-1": "<mask token>\n", "step-2": "<mask token>\napp_name = 'movies'\nurlpatterns = [path('', views.index, name='index'), path('<int:movie_id>',\n views.detail, name='detail')]\n", "step-3": "from django.urls import path\nfrom . import views\napp_name = 'movies'\nurlpatterns = [path('', views.index, name='index'), path('<int:movie_id>',\n views.detail, name='detail')]\n", "step-4": "from django.urls import path\nfrom . import views\n\n# url configuration for view.index function\napp_name = 'movies'\nurlpatterns = [\n path('', views.index, name='index'), # represents a root of this app\n path('<int:movie_id>', views.detail, name='detail')\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from http import HTTPStatus from ninja import Router mock_post_router = Router() @mock_post_router.get( "/mock_posts", url_name="mock_post_list", summary="전체 mock post의 list를 반환한다", response={200: None}, ) def retrieve_all_mock_posts(request): return HTTPStatus.OK
normal
{ "blob_id": "dcb57ecf2c72b8ac816bb06986d80544ff97c669", "index": 5915, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@mock_post_router.get('/mock_posts', url_name='mock_post_list', summary=\n '전체 mock post의 list를 반환한다', response={(200): None})\ndef retrieve_all_mock_posts(request):\n return HTTPStatus.OK\n", "step-3": "<mask token>\nmock_post_router = Router()\n\n\n@mock_post_router.get('/mock_posts', url_name='mock_post_list', summary=\n '전체 mock post의 list를 반환한다', response={(200): None})\ndef retrieve_all_mock_posts(request):\n return HTTPStatus.OK\n", "step-4": "from http import HTTPStatus\nfrom ninja import Router\nmock_post_router = Router()\n\n\n@mock_post_router.get('/mock_posts', url_name='mock_post_list', summary=\n '전체 mock post의 list를 반환한다', response={(200): None})\ndef retrieve_all_mock_posts(request):\n return HTTPStatus.OK\n", "step-5": "from http import HTTPStatus\n\nfrom ninja import Router\n\nmock_post_router = Router()\n\n\n@mock_post_router.get(\n \"/mock_posts\",\n url_name=\"mock_post_list\",\n summary=\"전체 mock post의 list를 반환한다\",\n response={200: None},\n)\ndef retrieve_all_mock_posts(request):\n return HTTPStatus.OK\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
n=int(input("val : ")) def fact(n): c=1; for i in range(1,n+1): c*=i; return c; print(fact(n));
normal
{ "blob_id": "1f4d9f5406b91fd687c0ace8ed29e3c4dfb4d3d2", "index": 8748, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef fact(n):\n c = 1\n for i in range(1, n + 1):\n c *= i\n return c\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef fact(n):\n c = 1\n for i in range(1, n + 1):\n c *= i\n return c\n\n\nprint(fact(n))\n", "step-4": "n = int(input('val : '))\n\n\ndef fact(n):\n c = 1\n for i in range(1, n + 1):\n c *= i\n return c\n\n\nprint(fact(n))\n", "step-5": "n=int(input(\"val : \"))\r\n\r\n\r\ndef fact(n):\r\n c=1;\r\n for i in range(1,n+1):\r\n c*=i;\r\n return c;\r\n\r\nprint(fact(n));", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Print name and marks f = open("marks.txt", "rt") for line in f: line = line.strip() if len(line) == 0: # Blank line continue name, *marks = line.split(",") if len(marks) == 0: continue marks = filter(str.isdigit, marks) # Take only numbers total = sum(map(int, marks)) # Convert str to it and sum it print(f"{name:15} {total:4}") f.close()
normal
{ "blob_id": "00587de133ee68415f31649f147fbff7e9bf65d5", "index": 3337, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n name, *marks = line.split(',')\n if len(marks) == 0:\n continue\n marks = filter(str.isdigit, marks)\n total = sum(map(int, marks))\n print(f'{name:15} {total:4}')\nf.close()\n", "step-3": "f = open('marks.txt', 'rt')\nfor line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n name, *marks = line.split(',')\n if len(marks) == 0:\n continue\n marks = filter(str.isdigit, marks)\n total = sum(map(int, marks))\n print(f'{name:15} {total:4}')\nf.close()\n", "step-4": "# Print name and marks\nf = open(\"marks.txt\", \"rt\")\nfor line in f:\n line = line.strip()\n if len(line) == 0: # Blank line\n continue\n\n name, *marks = line.split(\",\")\n if len(marks) == 0:\n continue\n\n marks = filter(str.isdigit, marks) # Take only numbers\n total = sum(map(int, marks)) # Convert str to it and sum it\n print(f\"{name:15} {total:4}\")\n\nf.close()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import packaging.requirements import pydantic import pytest from prefect.software.pip import PipRequirement, current_environment_requirements class TestPipRequirement: def is_packaging_subclass(self): r = PipRequirement("prefect") assert isinstance(r, packaging.requirements.Requirement) def test_can_be_used_in_pydantic_model(self): class MyModel(pydantic.BaseModel): req: PipRequirement inst = MyModel(req="prefect") assert inst.req == PipRequirement("prefect") assert isinstance(inst.req, PipRequirement) def test_equality(self): assert PipRequirement("prefect") == PipRequirement("prefect") assert PipRequirement("prefect") != PipRequirement("prefect>=2") # TODO: Add tests that mock the working set so we can make meaningful assertions def test_current_environment_requirements(): requirements = current_environment_requirements( on_uninstallable_requirement="ignore" ) assert all(isinstance(r, PipRequirement) for r in requirements) names = [r.name for r in requirements] assert "prefect" not in names # Editable install is excluded assert len(names) == len(set(names)), "Names should not be repeated" def test_current_environment_requirements_warns_about_editable_prefect(): with pytest.warns( UserWarning, match=r"prefect.*is an editable installation", ): requirements = current_environment_requirements( on_uninstallable_requirement="warn" ) assert all(isinstance(r, PipRequirement) for r in requirements) names = [r.name for r in requirements] assert "prefect" not in names assert len(names) == len(set(names)), "Names should not be repeated" def test_current_environment_requirements_raises_on_editable_prefect(): with pytest.raises( ValueError, match=r"prefect.*is an editable installation", ): current_environment_requirements(on_uninstallable_requirement="raise") def test_current_environment_requirements_raises_on_bad_mode(): with pytest.raises( ValueError, match=r"Unknown mode for `on_uninstallable_requirement`", ): current_environment_requirements(on_uninstallable_requirement="foo") def test_current_environment_requirements_top_level_only(): requirements = current_environment_requirements( exclude_nested=True, on_uninstallable_requirement="ignore" ) all_requirements = current_environment_requirements( on_uninstallable_requirement="ignore" ) assert {r.name for r in requirements}.issubset({r.name for r in all_requirements}) assert len(requirements) < len(all_requirements) assert all(isinstance(r, PipRequirement) for r in requirements)
normal
{ "blob_id": "64366e8532ffe05db7e7b7313e1d573c78a4e030", "index": 796, "step-1": "<mask token>\n\n\nclass TestPipRequirement:\n\n def is_packaging_subclass(self):\n r = PipRequirement('prefect')\n assert isinstance(r, packaging.requirements.Requirement)\n\n def test_can_be_used_in_pydantic_model(self):\n\n\n class MyModel(pydantic.BaseModel):\n req: PipRequirement\n inst = MyModel(req='prefect')\n assert inst.req == PipRequirement('prefect')\n assert isinstance(inst.req, PipRequirement)\n\n def test_equality(self):\n assert PipRequirement('prefect') == PipRequirement('prefect')\n assert PipRequirement('prefect') != PipRequirement('prefect>=2')\n\n\n<mask token>\n\n\ndef test_current_environment_requirements_warns_about_editable_prefect():\n with pytest.warns(UserWarning, match='prefect.*is an editable installation'\n ):\n requirements = current_environment_requirements(\n on_uninstallable_requirement='warn')\n assert all(isinstance(r, PipRequirement) for r in requirements)\n names = [r.name for r in requirements]\n assert 'prefect' not in names\n assert len(names) == len(set(names)), 'Names should not be repeated'\n\n\n<mask token>\n\n\ndef test_current_environment_requirements_top_level_only():\n requirements = current_environment_requirements(exclude_nested=True,\n on_uninstallable_requirement='ignore')\n all_requirements = current_environment_requirements(\n on_uninstallable_requirement='ignore')\n assert {r.name for r in requirements}.issubset({r.name for r in\n all_requirements})\n assert len(requirements) < len(all_requirements)\n assert all(isinstance(r, PipRequirement) for r in requirements)\n", "step-2": "<mask token>\n\n\nclass TestPipRequirement:\n\n def is_packaging_subclass(self):\n r = PipRequirement('prefect')\n assert isinstance(r, packaging.requirements.Requirement)\n\n def test_can_be_used_in_pydantic_model(self):\n\n\n class MyModel(pydantic.BaseModel):\n req: PipRequirement\n inst = MyModel(req='prefect')\n assert inst.req == PipRequirement('prefect')\n assert isinstance(inst.req, PipRequirement)\n\n def test_equality(self):\n assert PipRequirement('prefect') == PipRequirement('prefect')\n assert PipRequirement('prefect') != PipRequirement('prefect>=2')\n\n\n<mask token>\n\n\ndef test_current_environment_requirements_warns_about_editable_prefect():\n with pytest.warns(UserWarning, match='prefect.*is an editable installation'\n ):\n requirements = current_environment_requirements(\n on_uninstallable_requirement='warn')\n assert all(isinstance(r, PipRequirement) for r in requirements)\n names = [r.name for r in requirements]\n assert 'prefect' not in names\n assert len(names) == len(set(names)), 'Names should not be repeated'\n\n\ndef test_current_environment_requirements_raises_on_editable_prefect():\n with pytest.raises(ValueError, match='prefect.*is an editable installation'\n ):\n current_environment_requirements(on_uninstallable_requirement='raise')\n\n\n<mask token>\n\n\ndef test_current_environment_requirements_top_level_only():\n requirements = current_environment_requirements(exclude_nested=True,\n on_uninstallable_requirement='ignore')\n all_requirements = current_environment_requirements(\n on_uninstallable_requirement='ignore')\n assert {r.name for r in requirements}.issubset({r.name for r in\n all_requirements})\n assert len(requirements) < len(all_requirements)\n assert all(isinstance(r, PipRequirement) for r in requirements)\n", "step-3": "<mask token>\n\n\nclass TestPipRequirement:\n\n def is_packaging_subclass(self):\n r = PipRequirement('prefect')\n assert isinstance(r, packaging.requirements.Requirement)\n\n def test_can_be_used_in_pydantic_model(self):\n\n\n class MyModel(pydantic.BaseModel):\n req: PipRequirement\n inst = MyModel(req='prefect')\n assert inst.req == PipRequirement('prefect')\n assert isinstance(inst.req, PipRequirement)\n\n def test_equality(self):\n assert PipRequirement('prefect') == PipRequirement('prefect')\n assert PipRequirement('prefect') != PipRequirement('prefect>=2')\n\n\ndef test_current_environment_requirements():\n requirements = current_environment_requirements(\n on_uninstallable_requirement='ignore')\n assert all(isinstance(r, PipRequirement) for r in requirements)\n names = [r.name for r in requirements]\n assert 'prefect' not in names\n assert len(names) == len(set(names)), 'Names should not be repeated'\n\n\ndef test_current_environment_requirements_warns_about_editable_prefect():\n with pytest.warns(UserWarning, match='prefect.*is an editable installation'\n ):\n requirements = current_environment_requirements(\n on_uninstallable_requirement='warn')\n assert all(isinstance(r, PipRequirement) for r in requirements)\n names = [r.name for r in requirements]\n assert 'prefect' not in names\n assert len(names) == len(set(names)), 'Names should not be repeated'\n\n\ndef test_current_environment_requirements_raises_on_editable_prefect():\n with pytest.raises(ValueError, match='prefect.*is an editable installation'\n ):\n current_environment_requirements(on_uninstallable_requirement='raise')\n\n\n<mask token>\n\n\ndef test_current_environment_requirements_top_level_only():\n requirements = current_environment_requirements(exclude_nested=True,\n on_uninstallable_requirement='ignore')\n all_requirements = current_environment_requirements(\n on_uninstallable_requirement='ignore')\n assert {r.name for r in requirements}.issubset({r.name for r in\n all_requirements})\n assert len(requirements) < len(all_requirements)\n assert all(isinstance(r, PipRequirement) for r in requirements)\n", "step-4": "import packaging.requirements\nimport pydantic\nimport pytest\nfrom prefect.software.pip import PipRequirement, current_environment_requirements\n\n\nclass TestPipRequirement:\n\n def is_packaging_subclass(self):\n r = PipRequirement('prefect')\n assert isinstance(r, packaging.requirements.Requirement)\n\n def test_can_be_used_in_pydantic_model(self):\n\n\n class MyModel(pydantic.BaseModel):\n req: PipRequirement\n inst = MyModel(req='prefect')\n assert inst.req == PipRequirement('prefect')\n assert isinstance(inst.req, PipRequirement)\n\n def test_equality(self):\n assert PipRequirement('prefect') == PipRequirement('prefect')\n assert PipRequirement('prefect') != PipRequirement('prefect>=2')\n\n\ndef test_current_environment_requirements():\n requirements = current_environment_requirements(\n on_uninstallable_requirement='ignore')\n assert all(isinstance(r, PipRequirement) for r in requirements)\n names = [r.name for r in requirements]\n assert 'prefect' not in names\n assert len(names) == len(set(names)), 'Names should not be repeated'\n\n\ndef test_current_environment_requirements_warns_about_editable_prefect():\n with pytest.warns(UserWarning, match='prefect.*is an editable installation'\n ):\n requirements = current_environment_requirements(\n on_uninstallable_requirement='warn')\n assert all(isinstance(r, PipRequirement) for r in requirements)\n names = [r.name for r in requirements]\n assert 'prefect' not in names\n assert len(names) == len(set(names)), 'Names should not be repeated'\n\n\ndef test_current_environment_requirements_raises_on_editable_prefect():\n with pytest.raises(ValueError, match='prefect.*is an editable installation'\n ):\n current_environment_requirements(on_uninstallable_requirement='raise')\n\n\ndef test_current_environment_requirements_raises_on_bad_mode():\n with pytest.raises(ValueError, match=\n 'Unknown mode for `on_uninstallable_requirement`'):\n current_environment_requirements(on_uninstallable_requirement='foo')\n\n\ndef test_current_environment_requirements_top_level_only():\n requirements = current_environment_requirements(exclude_nested=True,\n on_uninstallable_requirement='ignore')\n all_requirements = current_environment_requirements(\n on_uninstallable_requirement='ignore')\n assert {r.name for r in requirements}.issubset({r.name for r in\n all_requirements})\n assert len(requirements) < len(all_requirements)\n assert all(isinstance(r, PipRequirement) for r in requirements)\n", "step-5": "import packaging.requirements\nimport pydantic\nimport pytest\n\nfrom prefect.software.pip import PipRequirement, current_environment_requirements\n\n\nclass TestPipRequirement:\n def is_packaging_subclass(self):\n r = PipRequirement(\"prefect\")\n assert isinstance(r, packaging.requirements.Requirement)\n\n def test_can_be_used_in_pydantic_model(self):\n class MyModel(pydantic.BaseModel):\n req: PipRequirement\n\n inst = MyModel(req=\"prefect\")\n assert inst.req == PipRequirement(\"prefect\")\n assert isinstance(inst.req, PipRequirement)\n\n def test_equality(self):\n assert PipRequirement(\"prefect\") == PipRequirement(\"prefect\")\n assert PipRequirement(\"prefect\") != PipRequirement(\"prefect>=2\")\n\n\n# TODO: Add tests that mock the working set so we can make meaningful assertions\n\n\ndef test_current_environment_requirements():\n requirements = current_environment_requirements(\n on_uninstallable_requirement=\"ignore\"\n )\n assert all(isinstance(r, PipRequirement) for r in requirements)\n names = [r.name for r in requirements]\n assert \"prefect\" not in names # Editable install is excluded\n assert len(names) == len(set(names)), \"Names should not be repeated\"\n\n\ndef test_current_environment_requirements_warns_about_editable_prefect():\n with pytest.warns(\n UserWarning,\n match=r\"prefect.*is an editable installation\",\n ):\n requirements = current_environment_requirements(\n on_uninstallable_requirement=\"warn\"\n )\n assert all(isinstance(r, PipRequirement) for r in requirements)\n names = [r.name for r in requirements]\n assert \"prefect\" not in names\n assert len(names) == len(set(names)), \"Names should not be repeated\"\n\n\ndef test_current_environment_requirements_raises_on_editable_prefect():\n with pytest.raises(\n ValueError,\n match=r\"prefect.*is an editable installation\",\n ):\n current_environment_requirements(on_uninstallable_requirement=\"raise\")\n\n\ndef test_current_environment_requirements_raises_on_bad_mode():\n with pytest.raises(\n ValueError,\n match=r\"Unknown mode for `on_uninstallable_requirement`\",\n ):\n current_environment_requirements(on_uninstallable_requirement=\"foo\")\n\n\ndef test_current_environment_requirements_top_level_only():\n requirements = current_environment_requirements(\n exclude_nested=True, on_uninstallable_requirement=\"ignore\"\n )\n all_requirements = current_environment_requirements(\n on_uninstallable_requirement=\"ignore\"\n )\n assert {r.name for r in requirements}.issubset({r.name for r in all_requirements})\n assert len(requirements) < len(all_requirements)\n assert all(isinstance(r, PipRequirement) for r in requirements)\n", "step-ids": [ 6, 7, 8, 10, 11 ] }
[ 6, 7, 8, 10, 11 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sqlite3 # 连接到db文件 conn = sqlite3.connect('app.db') # 创建一个Cursor: cursor = conn.cursor() # 查询所有表名: cursor.execute("select name from sqlite_master where type = 'table' order by name") print("Tables name:", cursor.fetchall()) # 查询表user的结构: cursor.execute('PRAGMA table_info(user)') print("Table structure:", cursor.fetchall()) # 执行查询表user内的所有记录: cursor.execute('select * from user') print("Table record:", cursor.fetchall()) cursor.close() conn.close()
normal
{ "blob_id": "dd8f4b08b88d487b68e916e9f92c08c9c0bc39da", "index": 2681, "step-1": "<mask token>\n", "step-2": "<mask token>\ncursor.execute(\n \"select name from sqlite_master where type = 'table' order by name\")\nprint('Tables name:', cursor.fetchall())\ncursor.execute('PRAGMA table_info(user)')\nprint('Table structure:', cursor.fetchall())\ncursor.execute('select * from user')\nprint('Table record:', cursor.fetchall())\ncursor.close()\nconn.close()\n", "step-3": "<mask token>\nconn = sqlite3.connect('app.db')\ncursor = conn.cursor()\ncursor.execute(\n \"select name from sqlite_master where type = 'table' order by name\")\nprint('Tables name:', cursor.fetchall())\ncursor.execute('PRAGMA table_info(user)')\nprint('Table structure:', cursor.fetchall())\ncursor.execute('select * from user')\nprint('Table record:', cursor.fetchall())\ncursor.close()\nconn.close()\n", "step-4": "import sqlite3\nconn = sqlite3.connect('app.db')\ncursor = conn.cursor()\ncursor.execute(\n \"select name from sqlite_master where type = 'table' order by name\")\nprint('Tables name:', cursor.fetchall())\ncursor.execute('PRAGMA table_info(user)')\nprint('Table structure:', cursor.fetchall())\ncursor.execute('select * from user')\nprint('Table record:', cursor.fetchall())\ncursor.close()\nconn.close()\n", "step-5": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n \r\nimport sqlite3\r\n\r\n# 连接到db文件\r\nconn = sqlite3.connect('app.db')\r\n# 创建一个Cursor:\r\ncursor = conn.cursor()\r\n\r\n# 查询所有表名:\r\ncursor.execute(\"select name from sqlite_master where type = 'table' order by name\")\r\nprint(\"Tables name:\", cursor.fetchall())\r\n\r\n# 查询表user的结构:\r\ncursor.execute('PRAGMA table_info(user)')\r\nprint(\"Table structure:\", cursor.fetchall())\r\n\r\n# 执行查询表user内的所有记录:\r\ncursor.execute('select * from user')\r\nprint(\"Table record:\", cursor.fetchall())\r\n\r\ncursor.close()\r\nconn.close()\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Calcu.py # import os, sys def menuCalc(): os.system('clear') print("Esto parece un menu:") print("\t1 - Suma") print("\t2 - Resta") print("\t3 - Multiplicacion") print("\t4 - Division") print("\tq - Para salir") def calculadora(calcu,): if calcu == "1": os.system('clear') s1=int(input("Ingrese un numero\n")) s2=int(input("Ingrese otro\n")) os.system('clear') print(f"{s1} + {s2} = {s1+s2}") input("\nPresione una tecla para continuar.") elif calcu == "2": os.system('clear') s1=int(input("Ingrese un numero\n")) s2=int(input("Ingrese otro\n")) os.system('clear') print(f"{s1} - {s2} = {s1-s2}") input("\nPresione una tecla para continuar.") elif calcu == "3": os.system('clear') s1=int(input("Ingrese un numero\n")) s2=int(input("Ingrese otro\n")) os.system('clear') print(f" {s1} x {s2} = {s1*s2}") input("\nPresione una tecla para continuar.") elif calcu == "4": os.system('clear') s1=int(input("Ingrese un numero\n")) s2=int(input("Ingrese otro\n")) os.system('clear') print(f"{s1} / {s2} = {s1 / s2}") input("\nPresione una tecla para continuar.") elif calcu == "q": print("Gracias, Vuelva Prontoss") exit() else: os.system('clear') print("Lo siento no es un numero valido!") while True: menuCalc() calc = input("Ingrese su opcion: ") calculadora(calc)
normal
{ "blob_id": "ac033e45ea61770c302be677f4dfc95945e2cca5", "index": 6100, "step-1": "<mask token>\n\n\ndef calculadora(calcu):\n if calcu == '1':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} + {s2} = {s1 + s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '2':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} - {s2} = {s1 - s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '3':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f' {s1} x {s2} = {s1 * s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '4':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} / {s2} = {s1 / s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == 'q':\n print('Gracias, Vuelva Prontoss')\n exit()\n else:\n os.system('clear')\n print('Lo siento no es un numero valido!')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef menuCalc():\n os.system('clear')\n print('Esto parece un menu:')\n print('\\t1 - Suma')\n print('\\t2 - Resta')\n print('\\t3 - Multiplicacion')\n print('\\t4 - Division')\n print('\\tq - Para salir')\n\n\ndef calculadora(calcu):\n if calcu == '1':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} + {s2} = {s1 + s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '2':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} - {s2} = {s1 - s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '3':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f' {s1} x {s2} = {s1 * s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '4':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} / {s2} = {s1 / s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == 'q':\n print('Gracias, Vuelva Prontoss')\n exit()\n else:\n os.system('clear')\n print('Lo siento no es un numero valido!')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef menuCalc():\n os.system('clear')\n print('Esto parece un menu:')\n print('\\t1 - Suma')\n print('\\t2 - Resta')\n print('\\t3 - Multiplicacion')\n print('\\t4 - Division')\n print('\\tq - Para salir')\n\n\ndef calculadora(calcu):\n if calcu == '1':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} + {s2} = {s1 + s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '2':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} - {s2} = {s1 - s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '3':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f' {s1} x {s2} = {s1 * s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '4':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} / {s2} = {s1 / s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == 'q':\n print('Gracias, Vuelva Prontoss')\n exit()\n else:\n os.system('clear')\n print('Lo siento no es un numero valido!')\n\n\nwhile True:\n menuCalc()\n calc = input('Ingrese su opcion: ')\n calculadora(calc)\n", "step-4": "import os, sys\n\n\ndef menuCalc():\n os.system('clear')\n print('Esto parece un menu:')\n print('\\t1 - Suma')\n print('\\t2 - Resta')\n print('\\t3 - Multiplicacion')\n print('\\t4 - Division')\n print('\\tq - Para salir')\n\n\ndef calculadora(calcu):\n if calcu == '1':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} + {s2} = {s1 + s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '2':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} - {s2} = {s1 - s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '3':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f' {s1} x {s2} = {s1 * s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '4':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} / {s2} = {s1 / s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == 'q':\n print('Gracias, Vuelva Prontoss')\n exit()\n else:\n os.system('clear')\n print('Lo siento no es un numero valido!')\n\n\nwhile True:\n menuCalc()\n calc = input('Ingrese su opcion: ')\n calculadora(calc)\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Calcu.py\n# \n\nimport os, sys\n\ndef menuCalc():\n\n os.system('clear')\n print(\"Esto parece un menu:\")\n print(\"\\t1 - Suma\")\n print(\"\\t2 - Resta\")\n print(\"\\t3 - Multiplicacion\")\n print(\"\\t4 - Division\")\n print(\"\\tq - Para salir\")\n \ndef calculadora(calcu,):\n\tif calcu == \"1\":\n\t\tos.system('clear')\n\t\ts1=int(input(\"Ingrese un numero\\n\"))\n\t\ts2=int(input(\"Ingrese otro\\n\"))\n\t\tos.system('clear')\n\t\tprint(f\"{s1} + {s2} = {s1+s2}\")\n\t\tinput(\"\\nPresione una tecla para continuar.\")\n\telif calcu == \"2\":\n\t\tos.system('clear')\n\t\ts1=int(input(\"Ingrese un numero\\n\"))\n\t\ts2=int(input(\"Ingrese otro\\n\"))\n\t\tos.system('clear')\n\t\tprint(f\"{s1} - {s2} = {s1-s2}\")\n\t\tinput(\"\\nPresione una tecla para continuar.\")\n\telif calcu == \"3\":\n\t\tos.system('clear')\n\t\ts1=int(input(\"Ingrese un numero\\n\"))\n\t\ts2=int(input(\"Ingrese otro\\n\"))\n\t\tos.system('clear')\n\t\tprint(f\" {s1} x {s2} = {s1*s2}\")\n\t\tinput(\"\\nPresione una tecla para continuar.\")\n\telif calcu == \"4\":\n\t\tos.system('clear')\n\t\ts1=int(input(\"Ingrese un numero\\n\"))\n\t\ts2=int(input(\"Ingrese otro\\n\"))\n\t\tos.system('clear')\n\t\tprint(f\"{s1} / {s2} = {s1 / s2}\")\n\t\tinput(\"\\nPresione una tecla para continuar.\")\n\telif calcu == \"q\":\n\t\tprint(\"Gracias, Vuelva Prontoss\")\n\t\texit()\n\telse:\n\t\tos.system('clear')\n\t\tprint(\"Lo siento no es un numero valido!\")\n\nwhile True:\n \n menuCalc()\n calc = input(\"Ingrese su opcion: \")\n calculadora(calc)\n\t\n\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import matplotlib.pyplot as plt import pandas as pd from collections import Counter import numpy as np import imdb import csv import networkx as nx from networkx import * def split_data(data): df = pd.read_csv(data) ranks = df.groupby('userId')['timestamp'].rank(method='first') counts = df['userId'].map(df.groupby('userId')['timestamp'].apply(len)) # myes = (ranks / counts) > 0.8 df['new_col'] = (ranks / counts) > 0.8 # print(myes) print(df.head()) train = df.loc[df['new_col'] == False] test = df.loc[df['new_col'] == True] train = train.drop(['new_col'], axis=1) test = test.drop(['new_col'], axis=1) train.to_csv(r'C:\Users\Darkmaster\PycharmProjects\Recommender\Data\Cvorm\training.csv', header=False, index=False) test.to_csv(r'C:\Users\Darkmaster\PycharmProjects\Recommender\Data\Cvorm\testing.csv', header=False, index=False) # print(test.head()) # ----AND THEN SAVE THOSE AS CSV---- # for row in df.index # print(test_train) # print(ranks.head()) # print(counts.head()) # def make_train_or_test_txt(ratingdata): # df = pd.read_csv(ratingdata) # users = [] # [users.append(x) for x in df["userId"] if x not in users] # print(users) # with open('Data/KGAT/train.txt', 'w') as f: # # writer = csv.writer(f, delimiter='\t') # for x in users: # items = [] # items = df.query('userId == {}'.format(x))["movieId"] # items = items.values.tolist() # stringerbell = ''.join((str(e) + "\t") for e in items) # print(stringerbell) # # writer.writerow("{}{}".format(x, items)) # # writer.writerow(str(x) + stringerbell) # f.write(str(x) + "\t" + stringerbell + "\n") # # print(items) # # for j in range(len(df)): # # try: # # getitems = [x for x in df.loc[df["movieId"]]] # # except: # # continue # print(df.head()) # make_train_or_test_txt('Data/ratings.csv') split_data('C:\\Users\\Darkmaster\\PycharmProjects\\Recommender\\Data\\ratings.csv')
normal
{ "blob_id": "e3b39c6655fc14efec3b3f95b08bc7b2c036cbdc", "index": 4117, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef split_data(data):\n df = pd.read_csv(data)\n ranks = df.groupby('userId')['timestamp'].rank(method='first')\n counts = df['userId'].map(df.groupby('userId')['timestamp'].apply(len))\n df['new_col'] = ranks / counts > 0.8\n print(df.head())\n train = df.loc[df['new_col'] == False]\n test = df.loc[df['new_col'] == True]\n train = train.drop(['new_col'], axis=1)\n test = test.drop(['new_col'], axis=1)\n train.to_csv(\n 'C:\\\\Users\\\\Darkmaster\\\\PycharmProjects\\\\Recommender\\\\Data\\\\Cvorm\\\\training.csv'\n , header=False, index=False)\n test.to_csv(\n 'C:\\\\Users\\\\Darkmaster\\\\PycharmProjects\\\\Recommender\\\\Data\\\\Cvorm\\\\testing.csv'\n , header=False, index=False)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef split_data(data):\n df = pd.read_csv(data)\n ranks = df.groupby('userId')['timestamp'].rank(method='first')\n counts = df['userId'].map(df.groupby('userId')['timestamp'].apply(len))\n df['new_col'] = ranks / counts > 0.8\n print(df.head())\n train = df.loc[df['new_col'] == False]\n test = df.loc[df['new_col'] == True]\n train = train.drop(['new_col'], axis=1)\n test = test.drop(['new_col'], axis=1)\n train.to_csv(\n 'C:\\\\Users\\\\Darkmaster\\\\PycharmProjects\\\\Recommender\\\\Data\\\\Cvorm\\\\training.csv'\n , header=False, index=False)\n test.to_csv(\n 'C:\\\\Users\\\\Darkmaster\\\\PycharmProjects\\\\Recommender\\\\Data\\\\Cvorm\\\\testing.csv'\n , header=False, index=False)\n\n\nsplit_data(\n 'C:\\\\Users\\\\Darkmaster\\\\PycharmProjects\\\\Recommender\\\\Data\\\\ratings.csv')\n", "step-4": "import matplotlib.pyplot as plt\nimport pandas as pd\nfrom collections import Counter\nimport numpy as np\nimport imdb\nimport csv\nimport networkx as nx\nfrom networkx import *\n\n\ndef split_data(data):\n df = pd.read_csv(data)\n ranks = df.groupby('userId')['timestamp'].rank(method='first')\n counts = df['userId'].map(df.groupby('userId')['timestamp'].apply(len))\n df['new_col'] = ranks / counts > 0.8\n print(df.head())\n train = df.loc[df['new_col'] == False]\n test = df.loc[df['new_col'] == True]\n train = train.drop(['new_col'], axis=1)\n test = test.drop(['new_col'], axis=1)\n train.to_csv(\n 'C:\\\\Users\\\\Darkmaster\\\\PycharmProjects\\\\Recommender\\\\Data\\\\Cvorm\\\\training.csv'\n , header=False, index=False)\n test.to_csv(\n 'C:\\\\Users\\\\Darkmaster\\\\PycharmProjects\\\\Recommender\\\\Data\\\\Cvorm\\\\testing.csv'\n , header=False, index=False)\n\n\nsplit_data(\n 'C:\\\\Users\\\\Darkmaster\\\\PycharmProjects\\\\Recommender\\\\Data\\\\ratings.csv')\n", "step-5": "import matplotlib.pyplot as plt\nimport pandas as pd\nfrom collections import Counter\nimport numpy as np\nimport imdb\nimport csv\nimport networkx as nx\nfrom networkx import *\ndef split_data(data):\n df = pd.read_csv(data)\n ranks = df.groupby('userId')['timestamp'].rank(method='first')\n counts = df['userId'].map(df.groupby('userId')['timestamp'].apply(len))\n # myes = (ranks / counts) > 0.8\n df['new_col'] = (ranks / counts) > 0.8\n # print(myes)\n print(df.head())\n train = df.loc[df['new_col'] == False]\n test = df.loc[df['new_col'] == True]\n\n train = train.drop(['new_col'], axis=1)\n test = test.drop(['new_col'], axis=1)\n\n train.to_csv(r'C:\\Users\\Darkmaster\\PycharmProjects\\Recommender\\Data\\Cvorm\\training.csv', header=False, index=False)\n test.to_csv(r'C:\\Users\\Darkmaster\\PycharmProjects\\Recommender\\Data\\Cvorm\\testing.csv', header=False, index=False)\n\n # print(test.head())\n\n\n # ----AND THEN SAVE THOSE AS CSV----\n\n # for row in df.index\n # print(test_train)\n # print(ranks.head())\n # print(counts.head())\n\n\n\n# def make_train_or_test_txt(ratingdata):\n# df = pd.read_csv(ratingdata)\n# users = []\n# [users.append(x) for x in df[\"userId\"] if x not in users]\n# print(users)\n# with open('Data/KGAT/train.txt', 'w') as f:\n# # writer = csv.writer(f, delimiter='\\t')\n# for x in users:\n# items = []\n# items = df.query('userId == {}'.format(x))[\"movieId\"]\n# items = items.values.tolist()\n# stringerbell = ''.join((str(e) + \"\\t\") for e in items)\n# print(stringerbell)\n# # writer.writerow(\"{}{}\".format(x, items))\n# # writer.writerow(str(x) + stringerbell)\n# f.write(str(x) + \"\\t\" + stringerbell + \"\\n\")\n# # print(items)\n# # for j in range(len(df)):\n# # try:\n# # getitems = [x for x in df.loc[df[\"movieId\"]]]\n# # except:\n# # continue\n# print(df.head())\n\n\n\n\n\n# make_train_or_test_txt('Data/ratings.csv')\nsplit_data('C:\\\\Users\\\\Darkmaster\\\\PycharmProjects\\\\Recommender\\\\Data\\\\ratings.csv')", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" Main CLI endpoint for GeoCube """ import importlib.metadata import click from click import group import geocube.cli.commands as cmd_modules from geocube import show_versions CONTEXT_SETTINGS = { "help_option_names": ["-h", "--help"], "token_normalize_func": lambda x: x.replace("-", "_"), } def check_version(ctx, _, value): """ Print current version, and check for latest version. Called via 'geocube --version' :param ctx: Application context object (click.Context) :param value: Passed in by Click :return None """ if not value or ctx.resilient_parsing: return click.echo(f"geocube v{importlib.metadata.version('geocube')}") ctx.exit() def cli_show_version(ctx, _, value): """ Print debugging version information. :param ctx: Application context object (click.Context) :param value: Passed in by Click :return None """ if not value or ctx.resilient_parsing: return show_versions() ctx.exit() @group(context_settings=CONTEXT_SETTINGS) @click.option( "-v", "--version", is_flag=True, is_eager=True, expose_value=False, callback=check_version, help="Show the current version", ) @click.option( "--show-versions", is_flag=True, is_eager=True, expose_value=False, callback=cli_show_version, help="Show debugging version information", ) def geocube(): """Top-level command and entry point into the GeoCube CLI""" def _add_subcommands(): """ Individual commands (and sub-commands) are encapsulated in separate files under /commands. Collect these command groups, and add them underneath the top-level command (geocube). """ geocube.add_command(cmd_modules.make_geocube.make_geocube) _add_subcommands()
normal
{ "blob_id": "0964121d88fad2906311de7532eac52ff784fff6", "index": 8306, "step-1": "<mask token>\n\n\ndef check_version(ctx, _, value):\n \"\"\"\n Print current version, and check for latest version.\n\n Called via 'geocube --version'\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n ctx.exit()\n\n\ndef cli_show_version(ctx, _, value):\n \"\"\"\n Print debugging version information.\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n show_versions()\n ctx.exit()\n\n\n@group(context_settings=CONTEXT_SETTINGS)\[email protected]('-v', '--version', is_flag=True, is_eager=True, expose_value=\n False, callback=check_version, help='Show the current version')\[email protected]('--show-versions', is_flag=True, is_eager=True, expose_value=\n False, callback=cli_show_version, help='Show debugging version information'\n )\ndef geocube():\n \"\"\"Top-level command and entry point into the GeoCube CLI\"\"\"\n\n\ndef _add_subcommands():\n \"\"\"\n Individual commands (and sub-commands) are encapsulated in separate files\n under /commands. Collect these command groups, and add them underneath the\n top-level command (geocube).\n \"\"\"\n geocube.add_command(cmd_modules.make_geocube.make_geocube)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef check_version(ctx, _, value):\n \"\"\"\n Print current version, and check for latest version.\n\n Called via 'geocube --version'\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n ctx.exit()\n\n\ndef cli_show_version(ctx, _, value):\n \"\"\"\n Print debugging version information.\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n show_versions()\n ctx.exit()\n\n\n@group(context_settings=CONTEXT_SETTINGS)\[email protected]('-v', '--version', is_flag=True, is_eager=True, expose_value=\n False, callback=check_version, help='Show the current version')\[email protected]('--show-versions', is_flag=True, is_eager=True, expose_value=\n False, callback=cli_show_version, help='Show debugging version information'\n )\ndef geocube():\n \"\"\"Top-level command and entry point into the GeoCube CLI\"\"\"\n\n\ndef _add_subcommands():\n \"\"\"\n Individual commands (and sub-commands) are encapsulated in separate files\n under /commands. Collect these command groups, and add them underneath the\n top-level command (geocube).\n \"\"\"\n geocube.add_command(cmd_modules.make_geocube.make_geocube)\n\n\n_add_subcommands()\n", "step-3": "<mask token>\nCONTEXT_SETTINGS = {'help_option_names': ['-h', '--help'],\n 'token_normalize_func': lambda x: x.replace('-', '_')}\n\n\ndef check_version(ctx, _, value):\n \"\"\"\n Print current version, and check for latest version.\n\n Called via 'geocube --version'\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n ctx.exit()\n\n\ndef cli_show_version(ctx, _, value):\n \"\"\"\n Print debugging version information.\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n show_versions()\n ctx.exit()\n\n\n@group(context_settings=CONTEXT_SETTINGS)\[email protected]('-v', '--version', is_flag=True, is_eager=True, expose_value=\n False, callback=check_version, help='Show the current version')\[email protected]('--show-versions', is_flag=True, is_eager=True, expose_value=\n False, callback=cli_show_version, help='Show debugging version information'\n )\ndef geocube():\n \"\"\"Top-level command and entry point into the GeoCube CLI\"\"\"\n\n\ndef _add_subcommands():\n \"\"\"\n Individual commands (and sub-commands) are encapsulated in separate files\n under /commands. Collect these command groups, and add them underneath the\n top-level command (geocube).\n \"\"\"\n geocube.add_command(cmd_modules.make_geocube.make_geocube)\n\n\n_add_subcommands()\n", "step-4": "<mask token>\nimport importlib.metadata\nimport click\nfrom click import group\nimport geocube.cli.commands as cmd_modules\nfrom geocube import show_versions\nCONTEXT_SETTINGS = {'help_option_names': ['-h', '--help'],\n 'token_normalize_func': lambda x: x.replace('-', '_')}\n\n\ndef check_version(ctx, _, value):\n \"\"\"\n Print current version, and check for latest version.\n\n Called via 'geocube --version'\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n ctx.exit()\n\n\ndef cli_show_version(ctx, _, value):\n \"\"\"\n Print debugging version information.\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n show_versions()\n ctx.exit()\n\n\n@group(context_settings=CONTEXT_SETTINGS)\[email protected]('-v', '--version', is_flag=True, is_eager=True, expose_value=\n False, callback=check_version, help='Show the current version')\[email protected]('--show-versions', is_flag=True, is_eager=True, expose_value=\n False, callback=cli_show_version, help='Show debugging version information'\n )\ndef geocube():\n \"\"\"Top-level command and entry point into the GeoCube CLI\"\"\"\n\n\ndef _add_subcommands():\n \"\"\"\n Individual commands (and sub-commands) are encapsulated in separate files\n under /commands. Collect these command groups, and add them underneath the\n top-level command (geocube).\n \"\"\"\n geocube.add_command(cmd_modules.make_geocube.make_geocube)\n\n\n_add_subcommands()\n", "step-5": "\"\"\"\nMain CLI endpoint for GeoCube\n\"\"\"\nimport importlib.metadata\n\nimport click\nfrom click import group\n\nimport geocube.cli.commands as cmd_modules\nfrom geocube import show_versions\n\nCONTEXT_SETTINGS = {\n \"help_option_names\": [\"-h\", \"--help\"],\n \"token_normalize_func\": lambda x: x.replace(\"-\", \"_\"),\n}\n\n\ndef check_version(ctx, _, value):\n \"\"\"\n Print current version, and check for latest version.\n\n Called via 'geocube --version'\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()\n\n\ndef cli_show_version(ctx, _, value):\n \"\"\"\n Print debugging version information.\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n\n show_versions()\n\n ctx.exit()\n\n\n@group(context_settings=CONTEXT_SETTINGS)\[email protected](\n \"-v\",\n \"--version\",\n is_flag=True,\n is_eager=True,\n expose_value=False,\n callback=check_version,\n help=\"Show the current version\",\n)\[email protected](\n \"--show-versions\",\n is_flag=True,\n is_eager=True,\n expose_value=False,\n callback=cli_show_version,\n help=\"Show debugging version information\",\n)\ndef geocube():\n \"\"\"Top-level command and entry point into the GeoCube CLI\"\"\"\n\n\ndef _add_subcommands():\n \"\"\"\n Individual commands (and sub-commands) are encapsulated in separate files\n under /commands. Collect these command groups, and add them underneath the\n top-level command (geocube).\n \"\"\"\n geocube.add_command(cmd_modules.make_geocube.make_geocube)\n\n\n_add_subcommands()\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
nome = str(input('Digite um nome completo: ')).lower() silva = 'silva' in nome if silva == True: print('Existe Silva nesse nome') else: print('Não há Silva nesse nome')
normal
{ "blob_id": "faebefcadbc184fab29deb2988089223a8f09e7e", "index": 8219, "step-1": "<mask token>\n", "step-2": "<mask token>\nif silva == True:\n print('Existe Silva nesse nome')\nelse:\n print('Não há Silva nesse nome')\n", "step-3": "nome = str(input('Digite um nome completo: ')).lower()\nsilva = 'silva' in nome\nif silva == True:\n print('Existe Silva nesse nome')\nelse:\n print('Não há Silva nesse nome')\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#!/usr/bin/env python3 from collections import deque from itertools import permutations INS_ADD = 1 INS_MULTIPLY = 2 INS_INPUT = 3 INS_OUTPUT = 4 INS_JUMP_IF_TRUE = 5 INS_JUMP_IF_FALSE = 6 INS_LESS_THAN = 7 INS_EQUALS = 8 INS_ADJUST_RELATIVE_BASE = 9 INS_DONE = 99 MODE_POSITION = 0 MODE_IMMEDIATE = 1 MODE_RELATIVE = 2 class InvalidInstructionException (Exception): def __init__(self, instruction): super().__init__("<%d>" % instruction) class InvalidModeException (Exception): pass class Computer: def __init__(self, data, inputs, memory_size=8192, interactive=True): self._memory = [0] * memory_size for i in range(len(data)): self._memory[i] = data[i] self._pc = 0 self._inputs = deque(inputs) self._outputs = [] self._relative_base = 0 self._interactive = interactive def input(self, value): self._inputs.append(value) def _parse_modes(self, instruction): i = "%.5d" % instruction return (int(i[2]), int(i[1]), int(i[0])) def _fetch(self): instruction = self._memory[self._pc] self._pc += 1 if instruction > 100: return instruction % 100, self._parse_modes(instruction) else: return instruction, (MODE_POSITION, MODE_POSITION, MODE_POSITION) def _pop(self): v = self._memory[self._pc] self._pc += 1 return v def _load(self, a, mode): if mode == MODE_IMMEDIATE: return a elif mode == MODE_POSITION: return self._memory[a] elif mode == MODE_RELATIVE: return self._memory[self._relative_base + a] else: raise InvalidModeException() def _store(self, a, mode, v): if mode == MODE_IMMEDIATE: pass if mode == MODE_POSITION: self._memory[a] = v elif mode == MODE_RELATIVE: self._memory[self._relative_base + a] = v else: raise InvalidModeException() def _add(self, modes, a, b, d): self._store(d, modes[2], self._load(a, modes[0]) + self._load(b, modes[1])) def _multiply(self, modes, a, b, d): self._store(d, modes[2], self._load(a, modes[0]) * self._load(b, modes[1])) def _input(self, modes, a): if self._interactive: self._store(a, modes[0], int(input("=> "))) else: self._store(a, modes[0], self._inputs.popleft()) def _output(self, modes, s): v = self._load(s, modes[0]) if self._interactive: print(v) else: self._outputs.append(v) def _jump_if_true(self, modes, a, d): if self._load(a, modes[0]) != 0: self._pc = self._load(d, modes[1]) def _jump_if_false(self, modes, a, d): if self._load(a, modes[0]) == 0: self._pc = self._load(d, modes[1]) def _less_than(self, modes, a, b, d): if self._load(a, modes[0]) < self._load(b, modes[1]): self._store(d, modes[2], 1) else: self._store(d, modes[2], 0) def _equals(self, modes, a, b, d): if self._load(a, modes[0]) == self._load(b, modes[1]): self._store(d, modes[2], 1) else: self._store(d, modes[2], 0) def _adjust_relative_base(self, modes, a): self._relative_base += self._load(a, modes[0]) def run(self, debug = False): while True: instruction, modes = self._fetch() if debug: print(instruction, modes) if instruction == INS_ADD: self._add(modes, self._pop(), self._pop(), self._pop()) elif instruction == INS_MULTIPLY: self._multiply(modes, self._pop(), self._pop(), self._pop()) elif instruction == INS_INPUT: self._input(modes, self._pop()) elif instruction == INS_OUTPUT: v = self._output(modes, self._pop()) if not self._interactive: return v elif instruction == INS_JUMP_IF_TRUE: self._jump_if_true(modes, self._pop(), self._pop()) elif instruction == INS_JUMP_IF_FALSE: self._jump_if_false(modes, self._pop(), self._pop()) elif instruction == INS_LESS_THAN: self._less_than(modes, self._pop(), self._pop(), self._pop()) elif instruction == INS_EQUALS: self._equals(modes, self._pop(), self._pop(), self._pop()) elif instruction == INS_ADJUST_RELATIVE_BASE: self._adjust_relative_base(modes, self._pop()) elif instruction == INS_DONE: return self._outputs else: raise InvalidInstructionException(instruction) PROGRAM = [1102,34463338,34463338,63,1007,63,34463338,63,1005,63,53,1101,0,3,1000,109,988,209,12,9,1000,209,6,209,3,203,0,1008,1000,1,63,1005,63,65,1008,1000,2,63,1005,63,904,1008,1000,0,63,1005,63,58,4,25,104,0,99,4,0,104,0,99,4,17,104,0,99,0,0,1101,0,396,1029,1101,0,356,1023,1101,401,0,1028,1101,24,0,1008,1101,33,0,1019,1101,35,0,1010,1102,359,1,1022,1102,32,1,1001,1101,37,0,1004,1101,0,31,1009,1101,0,30,1003,1101,28,0,1002,1102,1,36,1014,1102,20,1,1012,1101,21,0,1000,1101,0,22,1015,1102,23,1,1013,1102,1,1,1021,1102,1,39,1007,1102,26,1,1017,1101,0,38,1016,1101,0,437,1024,1102,432,1,1025,1101,0,421,1026,1101,0,29,1005,1101,27,0,1011,1102,1,0,1020,1101,0,25,1018,1101,0,414,1027,1102,34,1,1006,109,6,2108,33,-3,63,1005,63,201,1001,64,1,64,1105,1,203,4,187,1002,64,2,64,109,14,21108,40,40,-6,1005,1014,221,4,209,1105,1,225,1001,64,1,64,1002,64,2,64,109,-21,2102,1,3,63,1008,63,28,63,1005,63,251,4,231,1001,64,1,64,1106,0,251,1002,64,2,64,109,12,2101,0,-3,63,1008,63,21,63,1005,63,275,1001,64,1,64,1105,1,277,4,257,1002,64,2,64,109,-10,1207,1,27,63,1005,63,293,1105,1,299,4,283,1001,64,1,64,1002,64,2,64,109,9,21108,41,42,3,1005,1013,315,1105,1,321,4,305,1001,64,1,64,1002,64,2,64,109,-12,1202,6,1,63,1008,63,37,63,1005,63,347,4,327,1001,64,1,64,1105,1,347,1002,64,2,64,109,29,2105,1,-4,1105,1,365,4,353,1001,64,1,64,1002,64,2,64,109,-17,2108,32,-9,63,1005,63,387,4,371,1001,64,1,64,1105,1,387,1002,64,2,64,109,17,2106,0,1,4,393,1105,1,405,1001,64,1,64,1002,64,2,64,109,1,2106,0,-1,1001,64,1,64,1106,0,423,4,411,1002,64,2,64,109,-13,2105,1,9,4,429,1106,0,441,1001,64,1,64,1002,64,2,64,109,3,21107,42,41,-1,1005,1017,461,1001,64,1,64,1106,0,463,4,447,1002,64,2,64,109,-4,21107,43,44,1,1005,1015,481,4,469,1106,0,485,1001,64,1,64,1002,64,2,64,109,-6,21101,44,0,6,1008,1014,47,63,1005,63,505,1106,0,511,4,491,1001,64,1,64,1002,64,2,64,109,-6,1208,-1,32,63,1005,63,529,4,517,1105,1,533,1001,64,1,64,1002,64,2,64,109,11,1205,7,545,1106,0,551,4,539,1001,64,1,64,1002,64,2,64,109,11,21102,45,1,-7,1008,1017,48,63,1005,63,575,1001,64,1,64,1106,0,577,4,557,1002,64,2,64,109,-8,1206,5,593,1001,64,1,64,1105,1,595,4,583,1002,64,2,64,109,7,1206,-3,609,4,601,1106,0,613,1001,64,1,64,1002,64,2,64,109,-10,2101,0,-6,63,1008,63,39,63,1005,63,635,4,619,1106,0,639,1001,64,1,64,1002,64,2,64,109,-9,1208,0,39,63,1005,63,655,1106,0,661,4,645,1001,64,1,64,1002,64,2,64,109,4,2107,25,0,63,1005,63,681,1001,64,1,64,1105,1,683,4,667,1002,64,2,64,109,-5,2107,31,-2,63,1005,63,701,4,689,1106,0,705,1001,64,1,64,1002,64,2,64,109,19,1205,-1,719,4,711,1105,1,723,1001,64,1,64,1002,64,2,64,109,-17,1201,3,0,63,1008,63,24,63,1005,63,745,4,729,1106,0,749,1001,64,1,64,1002,64,2,64,109,13,21102,46,1,-3,1008,1015,46,63,1005,63,771,4,755,1105,1,775,1001,64,1,64,1002,64,2,64,109,-13,1207,4,32,63,1005,63,793,4,781,1106,0,797,1001,64,1,64,1002,64,2,64,109,7,2102,1,-9,63,1008,63,27,63,1005,63,821,1001,64,1,64,1105,1,823,4,803,1002,64,2,64,109,-18,1201,8,0,63,1008,63,25,63,1005,63,847,1001,64,1,64,1106,0,849,4,829,1002,64,2,64,109,23,21101,47,0,2,1008,1019,47,63,1005,63,871,4,855,1106,0,875,1001,64,1,64,1002,64,2,64,109,-22,1202,5,1,63,1008,63,19,63,1005,63,899,1001,64,1,64,1106,0,901,4,881,4,64,99,21102,27,1,1,21102,1,915,0,1105,1,922,21201,1,25165,1,204,1,99,109,3,1207,-2,3,63,1005,63,964,21201,-2,-1,1,21102,942,1,0,1105,1,922,22102,1,1,-1,21201,-2,-3,1,21101,0,957,0,1105,1,922,22201,1,-1,-2,1106,0,968,21201,-2,0,-2,109,-3,2105,1,0] if __name__ == "__main__": c = Computer(PROGRAM, []) c.run()
normal
{ "blob_id": "121fddf022c4eed7fd00e81edcb2df6a7a3b7510", "index": 4903, "step-1": "<mask token>\n\n\nclass Computer:\n\n def __init__(self, data, inputs, memory_size=8192, interactive=True):\n self._memory = [0] * memory_size\n for i in range(len(data)):\n self._memory[i] = data[i]\n self._pc = 0\n self._inputs = deque(inputs)\n self._outputs = []\n self._relative_base = 0\n self._interactive = interactive\n\n def input(self, value):\n self._inputs.append(value)\n\n def _parse_modes(self, instruction):\n i = '%.5d' % instruction\n return int(i[2]), int(i[1]), int(i[0])\n\n def _fetch(self):\n instruction = self._memory[self._pc]\n self._pc += 1\n if instruction > 100:\n return instruction % 100, self._parse_modes(instruction)\n else:\n return instruction, (MODE_POSITION, MODE_POSITION, MODE_POSITION)\n\n def _pop(self):\n v = self._memory[self._pc]\n self._pc += 1\n return v\n\n def _load(self, a, mode):\n if mode == MODE_IMMEDIATE:\n return a\n elif mode == MODE_POSITION:\n return self._memory[a]\n elif mode == MODE_RELATIVE:\n return self._memory[self._relative_base + a]\n else:\n raise InvalidModeException()\n\n def _store(self, a, mode, v):\n if mode == MODE_IMMEDIATE:\n pass\n if mode == MODE_POSITION:\n self._memory[a] = v\n elif mode == MODE_RELATIVE:\n self._memory[self._relative_base + a] = v\n else:\n raise InvalidModeException()\n\n def _add(self, modes, a, b, d):\n self._store(d, modes[2], self._load(a, modes[0]) + self._load(b,\n modes[1]))\n\n def _multiply(self, modes, a, b, d):\n self._store(d, modes[2], self._load(a, modes[0]) * self._load(b,\n modes[1]))\n\n def _input(self, modes, a):\n if self._interactive:\n self._store(a, modes[0], int(input('=> ')))\n else:\n self._store(a, modes[0], self._inputs.popleft())\n\n def _output(self, modes, s):\n v = self._load(s, modes[0])\n if self._interactive:\n print(v)\n else:\n self._outputs.append(v)\n\n def _jump_if_true(self, modes, a, d):\n if self._load(a, modes[0]) != 0:\n self._pc = self._load(d, modes[1])\n\n def _jump_if_false(self, modes, a, d):\n if self._load(a, modes[0]) == 0:\n self._pc = self._load(d, modes[1])\n\n def _less_than(self, modes, a, b, d):\n if self._load(a, modes[0]) < self._load(b, modes[1]):\n self._store(d, modes[2], 1)\n else:\n self._store(d, modes[2], 0)\n\n def _equals(self, modes, a, b, d):\n if self._load(a, modes[0]) == self._load(b, modes[1]):\n self._store(d, modes[2], 1)\n else:\n self._store(d, modes[2], 0)\n\n def _adjust_relative_base(self, modes, a):\n self._relative_base += self._load(a, modes[0])\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass InvalidModeException(Exception):\n pass\n\n\nclass Computer:\n\n def __init__(self, data, inputs, memory_size=8192, interactive=True):\n self._memory = [0] * memory_size\n for i in range(len(data)):\n self._memory[i] = data[i]\n self._pc = 0\n self._inputs = deque(inputs)\n self._outputs = []\n self._relative_base = 0\n self._interactive = interactive\n\n def input(self, value):\n self._inputs.append(value)\n\n def _parse_modes(self, instruction):\n i = '%.5d' % instruction\n return int(i[2]), int(i[1]), int(i[0])\n\n def _fetch(self):\n instruction = self._memory[self._pc]\n self._pc += 1\n if instruction > 100:\n return instruction % 100, self._parse_modes(instruction)\n else:\n return instruction, (MODE_POSITION, MODE_POSITION, MODE_POSITION)\n\n def _pop(self):\n v = self._memory[self._pc]\n self._pc += 1\n return v\n\n def _load(self, a, mode):\n if mode == MODE_IMMEDIATE:\n return a\n elif mode == MODE_POSITION:\n return self._memory[a]\n elif mode == MODE_RELATIVE:\n return self._memory[self._relative_base + a]\n else:\n raise InvalidModeException()\n\n def _store(self, a, mode, v):\n if mode == MODE_IMMEDIATE:\n pass\n if mode == MODE_POSITION:\n self._memory[a] = v\n elif mode == MODE_RELATIVE:\n self._memory[self._relative_base + a] = v\n else:\n raise InvalidModeException()\n\n def _add(self, modes, a, b, d):\n self._store(d, modes[2], self._load(a, modes[0]) + self._load(b,\n modes[1]))\n\n def _multiply(self, modes, a, b, d):\n self._store(d, modes[2], self._load(a, modes[0]) * self._load(b,\n modes[1]))\n\n def _input(self, modes, a):\n if self._interactive:\n self._store(a, modes[0], int(input('=> ')))\n else:\n self._store(a, modes[0], self._inputs.popleft())\n\n def _output(self, modes, s):\n v = self._load(s, modes[0])\n if self._interactive:\n print(v)\n else:\n self._outputs.append(v)\n\n def _jump_if_true(self, modes, a, d):\n if self._load(a, modes[0]) != 0:\n self._pc = self._load(d, modes[1])\n\n def _jump_if_false(self, modes, a, d):\n if self._load(a, modes[0]) == 0:\n self._pc = self._load(d, modes[1])\n\n def _less_than(self, modes, a, b, d):\n if self._load(a, modes[0]) < self._load(b, modes[1]):\n self._store(d, modes[2], 1)\n else:\n self._store(d, modes[2], 0)\n\n def _equals(self, modes, a, b, d):\n if self._load(a, modes[0]) == self._load(b, modes[1]):\n self._store(d, modes[2], 1)\n else:\n self._store(d, modes[2], 0)\n\n def _adjust_relative_base(self, modes, a):\n self._relative_base += self._load(a, modes[0])\n\n def run(self, debug=False):\n while True:\n instruction, modes = self._fetch()\n if debug:\n print(instruction, modes)\n if instruction == INS_ADD:\n self._add(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_MULTIPLY:\n self._multiply(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_INPUT:\n self._input(modes, self._pop())\n elif instruction == INS_OUTPUT:\n v = self._output(modes, self._pop())\n if not self._interactive:\n return v\n elif instruction == INS_JUMP_IF_TRUE:\n self._jump_if_true(modes, self._pop(), self._pop())\n elif instruction == INS_JUMP_IF_FALSE:\n self._jump_if_false(modes, self._pop(), self._pop())\n elif instruction == INS_LESS_THAN:\n self._less_than(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_EQUALS:\n self._equals(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_ADJUST_RELATIVE_BASE:\n self._adjust_relative_base(modes, self._pop())\n elif instruction == INS_DONE:\n return self._outputs\n else:\n raise InvalidInstructionException(instruction)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass InvalidInstructionException(Exception):\n\n def __init__(self, instruction):\n super().__init__('<%d>' % instruction)\n\n\nclass InvalidModeException(Exception):\n pass\n\n\nclass Computer:\n\n def __init__(self, data, inputs, memory_size=8192, interactive=True):\n self._memory = [0] * memory_size\n for i in range(len(data)):\n self._memory[i] = data[i]\n self._pc = 0\n self._inputs = deque(inputs)\n self._outputs = []\n self._relative_base = 0\n self._interactive = interactive\n\n def input(self, value):\n self._inputs.append(value)\n\n def _parse_modes(self, instruction):\n i = '%.5d' % instruction\n return int(i[2]), int(i[1]), int(i[0])\n\n def _fetch(self):\n instruction = self._memory[self._pc]\n self._pc += 1\n if instruction > 100:\n return instruction % 100, self._parse_modes(instruction)\n else:\n return instruction, (MODE_POSITION, MODE_POSITION, MODE_POSITION)\n\n def _pop(self):\n v = self._memory[self._pc]\n self._pc += 1\n return v\n\n def _load(self, a, mode):\n if mode == MODE_IMMEDIATE:\n return a\n elif mode == MODE_POSITION:\n return self._memory[a]\n elif mode == MODE_RELATIVE:\n return self._memory[self._relative_base + a]\n else:\n raise InvalidModeException()\n\n def _store(self, a, mode, v):\n if mode == MODE_IMMEDIATE:\n pass\n if mode == MODE_POSITION:\n self._memory[a] = v\n elif mode == MODE_RELATIVE:\n self._memory[self._relative_base + a] = v\n else:\n raise InvalidModeException()\n\n def _add(self, modes, a, b, d):\n self._store(d, modes[2], self._load(a, modes[0]) + self._load(b,\n modes[1]))\n\n def _multiply(self, modes, a, b, d):\n self._store(d, modes[2], self._load(a, modes[0]) * self._load(b,\n modes[1]))\n\n def _input(self, modes, a):\n if self._interactive:\n self._store(a, modes[0], int(input('=> ')))\n else:\n self._store(a, modes[0], self._inputs.popleft())\n\n def _output(self, modes, s):\n v = self._load(s, modes[0])\n if self._interactive:\n print(v)\n else:\n self._outputs.append(v)\n\n def _jump_if_true(self, modes, a, d):\n if self._load(a, modes[0]) != 0:\n self._pc = self._load(d, modes[1])\n\n def _jump_if_false(self, modes, a, d):\n if self._load(a, modes[0]) == 0:\n self._pc = self._load(d, modes[1])\n\n def _less_than(self, modes, a, b, d):\n if self._load(a, modes[0]) < self._load(b, modes[1]):\n self._store(d, modes[2], 1)\n else:\n self._store(d, modes[2], 0)\n\n def _equals(self, modes, a, b, d):\n if self._load(a, modes[0]) == self._load(b, modes[1]):\n self._store(d, modes[2], 1)\n else:\n self._store(d, modes[2], 0)\n\n def _adjust_relative_base(self, modes, a):\n self._relative_base += self._load(a, modes[0])\n\n def run(self, debug=False):\n while True:\n instruction, modes = self._fetch()\n if debug:\n print(instruction, modes)\n if instruction == INS_ADD:\n self._add(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_MULTIPLY:\n self._multiply(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_INPUT:\n self._input(modes, self._pop())\n elif instruction == INS_OUTPUT:\n v = self._output(modes, self._pop())\n if not self._interactive:\n return v\n elif instruction == INS_JUMP_IF_TRUE:\n self._jump_if_true(modes, self._pop(), self._pop())\n elif instruction == INS_JUMP_IF_FALSE:\n self._jump_if_false(modes, self._pop(), self._pop())\n elif instruction == INS_LESS_THAN:\n self._less_than(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_EQUALS:\n self._equals(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_ADJUST_RELATIVE_BASE:\n self._adjust_relative_base(modes, self._pop())\n elif instruction == INS_DONE:\n return self._outputs\n else:\n raise InvalidInstructionException(instruction)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass InvalidInstructionException(Exception):\n\n def __init__(self, instruction):\n super().__init__('<%d>' % instruction)\n\n\nclass InvalidModeException(Exception):\n pass\n\n\nclass Computer:\n\n def __init__(self, data, inputs, memory_size=8192, interactive=True):\n self._memory = [0] * memory_size\n for i in range(len(data)):\n self._memory[i] = data[i]\n self._pc = 0\n self._inputs = deque(inputs)\n self._outputs = []\n self._relative_base = 0\n self._interactive = interactive\n\n def input(self, value):\n self._inputs.append(value)\n\n def _parse_modes(self, instruction):\n i = '%.5d' % instruction\n return int(i[2]), int(i[1]), int(i[0])\n\n def _fetch(self):\n instruction = self._memory[self._pc]\n self._pc += 1\n if instruction > 100:\n return instruction % 100, self._parse_modes(instruction)\n else:\n return instruction, (MODE_POSITION, MODE_POSITION, MODE_POSITION)\n\n def _pop(self):\n v = self._memory[self._pc]\n self._pc += 1\n return v\n\n def _load(self, a, mode):\n if mode == MODE_IMMEDIATE:\n return a\n elif mode == MODE_POSITION:\n return self._memory[a]\n elif mode == MODE_RELATIVE:\n return self._memory[self._relative_base + a]\n else:\n raise InvalidModeException()\n\n def _store(self, a, mode, v):\n if mode == MODE_IMMEDIATE:\n pass\n if mode == MODE_POSITION:\n self._memory[a] = v\n elif mode == MODE_RELATIVE:\n self._memory[self._relative_base + a] = v\n else:\n raise InvalidModeException()\n\n def _add(self, modes, a, b, d):\n self._store(d, modes[2], self._load(a, modes[0]) + self._load(b,\n modes[1]))\n\n def _multiply(self, modes, a, b, d):\n self._store(d, modes[2], self._load(a, modes[0]) * self._load(b,\n modes[1]))\n\n def _input(self, modes, a):\n if self._interactive:\n self._store(a, modes[0], int(input('=> ')))\n else:\n self._store(a, modes[0], self._inputs.popleft())\n\n def _output(self, modes, s):\n v = self._load(s, modes[0])\n if self._interactive:\n print(v)\n else:\n self._outputs.append(v)\n\n def _jump_if_true(self, modes, a, d):\n if self._load(a, modes[0]) != 0:\n self._pc = self._load(d, modes[1])\n\n def _jump_if_false(self, modes, a, d):\n if self._load(a, modes[0]) == 0:\n self._pc = self._load(d, modes[1])\n\n def _less_than(self, modes, a, b, d):\n if self._load(a, modes[0]) < self._load(b, modes[1]):\n self._store(d, modes[2], 1)\n else:\n self._store(d, modes[2], 0)\n\n def _equals(self, modes, a, b, d):\n if self._load(a, modes[0]) == self._load(b, modes[1]):\n self._store(d, modes[2], 1)\n else:\n self._store(d, modes[2], 0)\n\n def _adjust_relative_base(self, modes, a):\n self._relative_base += self._load(a, modes[0])\n\n def run(self, debug=False):\n while True:\n instruction, modes = self._fetch()\n if debug:\n print(instruction, modes)\n if instruction == INS_ADD:\n self._add(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_MULTIPLY:\n self._multiply(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_INPUT:\n self._input(modes, self._pop())\n elif instruction == INS_OUTPUT:\n v = self._output(modes, self._pop())\n if not self._interactive:\n return v\n elif instruction == INS_JUMP_IF_TRUE:\n self._jump_if_true(modes, self._pop(), self._pop())\n elif instruction == INS_JUMP_IF_FALSE:\n self._jump_if_false(modes, self._pop(), self._pop())\n elif instruction == INS_LESS_THAN:\n self._less_than(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_EQUALS:\n self._equals(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_ADJUST_RELATIVE_BASE:\n self._adjust_relative_base(modes, self._pop())\n elif instruction == INS_DONE:\n return self._outputs\n else:\n raise InvalidInstructionException(instruction)\n\n\n<mask token>\nif __name__ == '__main__':\n c = Computer(PROGRAM, [])\n c.run()\n", "step-5": "#!/usr/bin/env python3\n\n\nfrom collections import deque\nfrom itertools import permutations\n\n\nINS_ADD = 1\nINS_MULTIPLY = 2\nINS_INPUT = 3\nINS_OUTPUT = 4\nINS_JUMP_IF_TRUE = 5\nINS_JUMP_IF_FALSE = 6\nINS_LESS_THAN = 7\nINS_EQUALS = 8\nINS_ADJUST_RELATIVE_BASE = 9\nINS_DONE = 99\n\nMODE_POSITION = 0\nMODE_IMMEDIATE = 1\nMODE_RELATIVE = 2\n\n\nclass InvalidInstructionException (Exception):\n def __init__(self, instruction):\n super().__init__(\"<%d>\" % instruction)\n\n\nclass InvalidModeException (Exception):\n pass\n\n\nclass Computer:\n\n def __init__(self, data, inputs, memory_size=8192, interactive=True):\n self._memory = [0] * memory_size\n for i in range(len(data)):\n self._memory[i] = data[i]\n self._pc = 0\n self._inputs = deque(inputs)\n self._outputs = []\n self._relative_base = 0\n self._interactive = interactive\n\n def input(self, value):\n self._inputs.append(value)\n \n def _parse_modes(self, instruction):\n i = \"%.5d\" % instruction\n return (int(i[2]), int(i[1]), int(i[0]))\n \n def _fetch(self):\n instruction = self._memory[self._pc]\n self._pc += 1\n if instruction > 100:\n return instruction % 100, self._parse_modes(instruction)\n else:\n return instruction, (MODE_POSITION, MODE_POSITION, MODE_POSITION)\n\n def _pop(self):\n v = self._memory[self._pc]\n self._pc += 1\n return v\n \n def _load(self, a, mode):\n if mode == MODE_IMMEDIATE:\n return a\n elif mode == MODE_POSITION:\n return self._memory[a]\n elif mode == MODE_RELATIVE:\n return self._memory[self._relative_base + a]\n else:\n raise InvalidModeException()\n\n def _store(self, a, mode, v):\n if mode == MODE_IMMEDIATE:\n pass\n if mode == MODE_POSITION:\n self._memory[a] = v\n elif mode == MODE_RELATIVE:\n self._memory[self._relative_base + a] = v\n else:\n raise InvalidModeException()\n \n def _add(self, modes, a, b, d):\n self._store(d, modes[2], self._load(a, modes[0]) + self._load(b, modes[1]))\n \n def _multiply(self, modes, a, b, d):\n self._store(d, modes[2], self._load(a, modes[0]) * self._load(b, modes[1]))\n\n def _input(self, modes, a):\n if self._interactive:\n self._store(a, modes[0], int(input(\"=> \")))\n else:\n self._store(a, modes[0], self._inputs.popleft())\n\n def _output(self, modes, s):\n v = self._load(s, modes[0])\n if self._interactive:\n print(v)\n else:\n self._outputs.append(v)\n \n def _jump_if_true(self, modes, a, d):\n if self._load(a, modes[0]) != 0:\n self._pc = self._load(d, modes[1])\n\n def _jump_if_false(self, modes, a, d):\n if self._load(a, modes[0]) == 0:\n self._pc = self._load(d, modes[1])\n\n def _less_than(self, modes, a, b, d):\n if self._load(a, modes[0]) < self._load(b, modes[1]):\n self._store(d, modes[2], 1)\n else:\n self._store(d, modes[2], 0)\n\n def _equals(self, modes, a, b, d):\n if self._load(a, modes[0]) == self._load(b, modes[1]):\n self._store(d, modes[2], 1)\n else:\n self._store(d, modes[2], 0)\n\n def _adjust_relative_base(self, modes, a):\n self._relative_base += self._load(a, modes[0])\n \n def run(self, debug = False):\n while True:\n instruction, modes = self._fetch()\n if debug:\n print(instruction, modes)\n if instruction == INS_ADD:\n self._add(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_MULTIPLY:\n self._multiply(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_INPUT:\n self._input(modes, self._pop())\n elif instruction == INS_OUTPUT:\n v = self._output(modes, self._pop())\n if not self._interactive:\n return v\n elif instruction == INS_JUMP_IF_TRUE:\n self._jump_if_true(modes, self._pop(), self._pop())\n elif instruction == INS_JUMP_IF_FALSE:\n self._jump_if_false(modes, self._pop(), self._pop())\n elif instruction == INS_LESS_THAN:\n self._less_than(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_EQUALS:\n self._equals(modes, self._pop(), self._pop(), self._pop())\n elif instruction == INS_ADJUST_RELATIVE_BASE:\n self._adjust_relative_base(modes, self._pop())\n elif instruction == INS_DONE:\n return self._outputs\n else:\n raise InvalidInstructionException(instruction)\n\n\nPROGRAM = [1102,34463338,34463338,63,1007,63,34463338,63,1005,63,53,1101,0,3,1000,109,988,209,12,9,1000,209,6,209,3,203,0,1008,1000,1,63,1005,63,65,1008,1000,2,63,1005,63,904,1008,1000,0,63,1005,63,58,4,25,104,0,99,4,0,104,0,99,4,17,104,0,99,0,0,1101,0,396,1029,1101,0,356,1023,1101,401,0,1028,1101,24,0,1008,1101,33,0,1019,1101,35,0,1010,1102,359,1,1022,1102,32,1,1001,1101,37,0,1004,1101,0,31,1009,1101,0,30,1003,1101,28,0,1002,1102,1,36,1014,1102,20,1,1012,1101,21,0,1000,1101,0,22,1015,1102,23,1,1013,1102,1,1,1021,1102,1,39,1007,1102,26,1,1017,1101,0,38,1016,1101,0,437,1024,1102,432,1,1025,1101,0,421,1026,1101,0,29,1005,1101,27,0,1011,1102,1,0,1020,1101,0,25,1018,1101,0,414,1027,1102,34,1,1006,109,6,2108,33,-3,63,1005,63,201,1001,64,1,64,1105,1,203,4,187,1002,64,2,64,109,14,21108,40,40,-6,1005,1014,221,4,209,1105,1,225,1001,64,1,64,1002,64,2,64,109,-21,2102,1,3,63,1008,63,28,63,1005,63,251,4,231,1001,64,1,64,1106,0,251,1002,64,2,64,109,12,2101,0,-3,63,1008,63,21,63,1005,63,275,1001,64,1,64,1105,1,277,4,257,1002,64,2,64,109,-10,1207,1,27,63,1005,63,293,1105,1,299,4,283,1001,64,1,64,1002,64,2,64,109,9,21108,41,42,3,1005,1013,315,1105,1,321,4,305,1001,64,1,64,1002,64,2,64,109,-12,1202,6,1,63,1008,63,37,63,1005,63,347,4,327,1001,64,1,64,1105,1,347,1002,64,2,64,109,29,2105,1,-4,1105,1,365,4,353,1001,64,1,64,1002,64,2,64,109,-17,2108,32,-9,63,1005,63,387,4,371,1001,64,1,64,1105,1,387,1002,64,2,64,109,17,2106,0,1,4,393,1105,1,405,1001,64,1,64,1002,64,2,64,109,1,2106,0,-1,1001,64,1,64,1106,0,423,4,411,1002,64,2,64,109,-13,2105,1,9,4,429,1106,0,441,1001,64,1,64,1002,64,2,64,109,3,21107,42,41,-1,1005,1017,461,1001,64,1,64,1106,0,463,4,447,1002,64,2,64,109,-4,21107,43,44,1,1005,1015,481,4,469,1106,0,485,1001,64,1,64,1002,64,2,64,109,-6,21101,44,0,6,1008,1014,47,63,1005,63,505,1106,0,511,4,491,1001,64,1,64,1002,64,2,64,109,-6,1208,-1,32,63,1005,63,529,4,517,1105,1,533,1001,64,1,64,1002,64,2,64,109,11,1205,7,545,1106,0,551,4,539,1001,64,1,64,1002,64,2,64,109,11,21102,45,1,-7,1008,1017,48,63,1005,63,575,1001,64,1,64,1106,0,577,4,557,1002,64,2,64,109,-8,1206,5,593,1001,64,1,64,1105,1,595,4,583,1002,64,2,64,109,7,1206,-3,609,4,601,1106,0,613,1001,64,1,64,1002,64,2,64,109,-10,2101,0,-6,63,1008,63,39,63,1005,63,635,4,619,1106,0,639,1001,64,1,64,1002,64,2,64,109,-9,1208,0,39,63,1005,63,655,1106,0,661,4,645,1001,64,1,64,1002,64,2,64,109,4,2107,25,0,63,1005,63,681,1001,64,1,64,1105,1,683,4,667,1002,64,2,64,109,-5,2107,31,-2,63,1005,63,701,4,689,1106,0,705,1001,64,1,64,1002,64,2,64,109,19,1205,-1,719,4,711,1105,1,723,1001,64,1,64,1002,64,2,64,109,-17,1201,3,0,63,1008,63,24,63,1005,63,745,4,729,1106,0,749,1001,64,1,64,1002,64,2,64,109,13,21102,46,1,-3,1008,1015,46,63,1005,63,771,4,755,1105,1,775,1001,64,1,64,1002,64,2,64,109,-13,1207,4,32,63,1005,63,793,4,781,1106,0,797,1001,64,1,64,1002,64,2,64,109,7,2102,1,-9,63,1008,63,27,63,1005,63,821,1001,64,1,64,1105,1,823,4,803,1002,64,2,64,109,-18,1201,8,0,63,1008,63,25,63,1005,63,847,1001,64,1,64,1106,0,849,4,829,1002,64,2,64,109,23,21101,47,0,2,1008,1019,47,63,1005,63,871,4,855,1106,0,875,1001,64,1,64,1002,64,2,64,109,-22,1202,5,1,63,1008,63,19,63,1005,63,899,1001,64,1,64,1106,0,901,4,881,4,64,99,21102,27,1,1,21102,1,915,0,1105,1,922,21201,1,25165,1,204,1,99,109,3,1207,-2,3,63,1005,63,964,21201,-2,-1,1,21102,942,1,0,1105,1,922,22102,1,1,-1,21201,-2,-3,1,21101,0,957,0,1105,1,922,22201,1,-1,-2,1106,0,968,21201,-2,0,-2,109,-3,2105,1,0]\n\n\nif __name__ == \"__main__\":\n c = Computer(PROGRAM, [])\n c.run()\n", "step-ids": [ 17, 19, 21, 22, 25 ] }
[ 17, 19, 21, 22, 25 ]
import re from pathlib import Path RAW_DUMP_XML = Path("raw_data/Wikipedia.xml") def count_regexp(): """Counts the occurences of the regular expressions you will write. """ # Here's an example regular expression that roughly matches a valid email address. # The ones you write below should be shorter than this email = re.compile("[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\.[a-zA-Z]{2,5}") ###### Write below ######### subheading = re.compile("\=\=+.*\=\=+") link_to_subheading = re.compile("\[\[[\w\'*\-*\:*\(*\)*\_*\s*]+[#][\s*\w\\'*\-*\:*\(*\)*\_*s*]+\|*") doi_citation = re.compile("\{\{[c][ite](?!{{).*[dD][oO][iI]\s*[:|,=\/]*\s*[0-9]+\.[0-9]+.*\}\}") ###### End of your work ######### patterns = { "emails": email, "subheadings": subheading, "links to subheadings": link_to_subheading, "citations with DOI numbers": doi_citation, } with open(RAW_DUMP_XML, encoding="utf-8") as f: dump_text = f.read() for name, pattern in patterns.items(): if pattern is None: continue matches = pattern.findall(dump_text) count = len(matches) example_matches = [matches[i * (count // 5)] for i in range(5)] print("Found {} occurences of {}".format(count, name)) print("Here are examples:") print("\n".join(example_matches)) print("\n") if __name__ == "__main__": count_regexp()
normal
{ "blob_id": "8a4269f2094fa8ab8f6a93e653183dafb141232e", "index": 5717, "step-1": "<mask token>\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\nif __name__ == '__main__':\n count_regexp()\n", "step-3": "<mask token>\nRAW_DUMP_XML = Path('raw_data/Wikipedia.xml')\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\nif __name__ == '__main__':\n count_regexp()\n", "step-4": "import re\nfrom pathlib import Path\nRAW_DUMP_XML = Path('raw_data/Wikipedia.xml')\n\n\ndef count_regexp():\n \"\"\"Counts the occurences of the regular expressions you will write.\n \"\"\"\n email = re.compile('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\\\.[a-zA-Z]{2,5}')\n subheading = re.compile('\\\\=\\\\=+.*\\\\=\\\\=+')\n link_to_subheading = re.compile(\n \"\\\\[\\\\[[\\\\w'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*\\\\s*]+[#][\\\\s*\\\\w\\\\'*\\\\-*\\\\:*\\\\(*\\\\)*\\\\_*s*]+\\\\|*\"\n )\n doi_citation = re.compile(\n '\\\\{\\\\{[c][ite](?!{{).*[dD][oO][iI]\\\\s*[:|,=\\\\/]*\\\\s*[0-9]+\\\\.[0-9]+.*\\\\}\\\\}'\n )\n patterns = {'emails': email, 'subheadings': subheading,\n 'links to subheadings': link_to_subheading,\n 'citations with DOI numbers': doi_citation}\n with open(RAW_DUMP_XML, encoding='utf-8') as f:\n dump_text = f.read()\n for name, pattern in patterns.items():\n if pattern is None:\n continue\n matches = pattern.findall(dump_text)\n count = len(matches)\n example_matches = [matches[i * (count // 5)] for i in range(5)]\n print('Found {} occurences of {}'.format(count, name))\n print('Here are examples:')\n print('\\n'.join(example_matches))\n print('\\n')\n\n\nif __name__ == '__main__':\n count_regexp()\n", "step-5": "import re\r\nfrom pathlib import Path\r\n\r\nRAW_DUMP_XML = Path(\"raw_data/Wikipedia.xml\")\r\n\r\n\r\ndef count_regexp():\r\n \"\"\"Counts the occurences of the regular expressions you will write.\r\n \"\"\"\r\n # Here's an example regular expression that roughly matches a valid email address.\r\n # The ones you write below should be shorter than this\r\n email = re.compile(\"[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\\.[a-zA-Z]{2,5}\")\r\n\r\n ###### Write below #########\r\n subheading = re.compile(\"\\=\\=+.*\\=\\=+\")\r\n link_to_subheading = re.compile(\"\\[\\[[\\w\\'*\\-*\\:*\\(*\\)*\\_*\\s*]+[#][\\s*\\w\\\\'*\\-*\\:*\\(*\\)*\\_*s*]+\\|*\")\r\n doi_citation = re.compile(\"\\{\\{[c][ite](?!{{).*[dD][oO][iI]\\s*[:|,=\\/]*\\s*[0-9]+\\.[0-9]+.*\\}\\}\")\r\n ###### End of your work #########\r\n\r\n patterns = {\r\n \"emails\": email,\r\n \"subheadings\": subheading,\r\n \"links to subheadings\": link_to_subheading,\r\n \"citations with DOI numbers\": doi_citation,\r\n }\r\n\r\n with open(RAW_DUMP_XML, encoding=\"utf-8\") as f:\r\n dump_text = f.read()\r\n for name, pattern in patterns.items():\r\n if pattern is None:\r\n continue\r\n matches = pattern.findall(dump_text)\r\n count = len(matches)\r\n\r\n example_matches = [matches[i * (count // 5)] for i in range(5)]\r\n\r\n print(\"Found {} occurences of {}\".format(count, name))\r\n print(\"Here are examples:\")\r\n print(\"\\n\".join(example_matches))\r\n print(\"\\n\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n count_regexp()\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!/usr/bin/env python3 from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4 from ev3dev2.sensor.lego import ColorSensor, UltrasonicSensor from ev3dev2.power import PowerSupply # initiate color sensors # the colour sensor needs to be between 1-2 cm away from the surface you are trying to measure. (color mode) # TODO confirm the mapping colorSensor_lt = ColorSensor(INPUT_4) colorSensor_rt = ColorSensor(INPUT_1) ultrasonicSensor = UltrasonicSensor(INPUT_2) # COL-REFLECT COL-AMBIENT COL-COLOR RGB-RAW colorSensor_mode_default = "COL-COLOR" colorSensor_lt.mode="COL-COLOR" colorSensor_rt.mode="COL-COLOR" ultrasonicSensor.mode="US-DIST-CM" powerSupply = PowerSupply() def getColorString(color_reading): if(color_reading==1): return "black" elif(color_reading==2): #return "blue" return "white" elif(color_reading==3): return "green" elif(color_reading==4): #return "yellow" return "white" elif(color_reading==5): return "red" elif(color_reading==6): return "white" elif(color_reading==7): return "brown" return str(color_reading) def getColorReadingInString(sensor_positon): if(sensor_positon=="left"): return getColorString(colorSensor_lt.value()) if(sensor_positon=="right"): return getColorString(colorSensor_rt.value())
normal
{ "blob_id": "84a13e3dea885d6c4a5f195dfac51c7110102fc2", "index": 6729, "step-1": "<mask token>\n\n\ndef getColorString(color_reading):\n if color_reading == 1:\n return 'black'\n elif color_reading == 2:\n return 'white'\n elif color_reading == 3:\n return 'green'\n elif color_reading == 4:\n return 'white'\n elif color_reading == 5:\n return 'red'\n elif color_reading == 6:\n return 'white'\n elif color_reading == 7:\n return 'brown'\n return str(color_reading)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef getColorString(color_reading):\n if color_reading == 1:\n return 'black'\n elif color_reading == 2:\n return 'white'\n elif color_reading == 3:\n return 'green'\n elif color_reading == 4:\n return 'white'\n elif color_reading == 5:\n return 'red'\n elif color_reading == 6:\n return 'white'\n elif color_reading == 7:\n return 'brown'\n return str(color_reading)\n\n\ndef getColorReadingInString(sensor_positon):\n if sensor_positon == 'left':\n return getColorString(colorSensor_lt.value())\n if sensor_positon == 'right':\n return getColorString(colorSensor_rt.value())\n", "step-3": "<mask token>\ncolorSensor_lt = ColorSensor(INPUT_4)\ncolorSensor_rt = ColorSensor(INPUT_1)\nultrasonicSensor = UltrasonicSensor(INPUT_2)\ncolorSensor_mode_default = 'COL-COLOR'\ncolorSensor_lt.mode = 'COL-COLOR'\ncolorSensor_rt.mode = 'COL-COLOR'\nultrasonicSensor.mode = 'US-DIST-CM'\npowerSupply = PowerSupply()\n\n\ndef getColorString(color_reading):\n if color_reading == 1:\n return 'black'\n elif color_reading == 2:\n return 'white'\n elif color_reading == 3:\n return 'green'\n elif color_reading == 4:\n return 'white'\n elif color_reading == 5:\n return 'red'\n elif color_reading == 6:\n return 'white'\n elif color_reading == 7:\n return 'brown'\n return str(color_reading)\n\n\ndef getColorReadingInString(sensor_positon):\n if sensor_positon == 'left':\n return getColorString(colorSensor_lt.value())\n if sensor_positon == 'right':\n return getColorString(colorSensor_rt.value())\n", "step-4": "from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4\nfrom ev3dev2.sensor.lego import ColorSensor, UltrasonicSensor\nfrom ev3dev2.power import PowerSupply\ncolorSensor_lt = ColorSensor(INPUT_4)\ncolorSensor_rt = ColorSensor(INPUT_1)\nultrasonicSensor = UltrasonicSensor(INPUT_2)\ncolorSensor_mode_default = 'COL-COLOR'\ncolorSensor_lt.mode = 'COL-COLOR'\ncolorSensor_rt.mode = 'COL-COLOR'\nultrasonicSensor.mode = 'US-DIST-CM'\npowerSupply = PowerSupply()\n\n\ndef getColorString(color_reading):\n if color_reading == 1:\n return 'black'\n elif color_reading == 2:\n return 'white'\n elif color_reading == 3:\n return 'green'\n elif color_reading == 4:\n return 'white'\n elif color_reading == 5:\n return 'red'\n elif color_reading == 6:\n return 'white'\n elif color_reading == 7:\n return 'brown'\n return str(color_reading)\n\n\ndef getColorReadingInString(sensor_positon):\n if sensor_positon == 'left':\n return getColorString(colorSensor_lt.value())\n if sensor_positon == 'right':\n return getColorString(colorSensor_rt.value())\n", "step-5": "#!/usr/bin/env python3\nfrom ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4\nfrom ev3dev2.sensor.lego import ColorSensor, UltrasonicSensor\nfrom ev3dev2.power import PowerSupply\n\n# initiate color sensors\n# the colour sensor needs to be between 1-2 cm away from the surface you are trying to measure. (color mode)\n# TODO confirm the mapping\ncolorSensor_lt = ColorSensor(INPUT_4)\ncolorSensor_rt = ColorSensor(INPUT_1)\nultrasonicSensor = UltrasonicSensor(INPUT_2)\n\n# COL-REFLECT COL-AMBIENT COL-COLOR RGB-RAW\ncolorSensor_mode_default = \"COL-COLOR\"\ncolorSensor_lt.mode=\"COL-COLOR\"\ncolorSensor_rt.mode=\"COL-COLOR\"\nultrasonicSensor.mode=\"US-DIST-CM\"\n\npowerSupply = PowerSupply()\n\ndef getColorString(color_reading):\n if(color_reading==1):\n return \"black\"\n elif(color_reading==2):\n #return \"blue\"\n return \"white\"\n elif(color_reading==3):\n return \"green\"\n elif(color_reading==4):\n #return \"yellow\"\n return \"white\"\n elif(color_reading==5):\n return \"red\"\n elif(color_reading==6):\n return \"white\"\n elif(color_reading==7):\n return \"brown\"\n return str(color_reading)\n\ndef getColorReadingInString(sensor_positon):\n if(sensor_positon==\"left\"):\n return getColorString(colorSensor_lt.value())\n if(sensor_positon==\"right\"):\n return getColorString(colorSensor_rt.value())\n\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import cv2 import os """ 视频场景拼接 """ stich_path="stichImage\\" def read_video(filename): ''' 将视频每秒的内容提取出来 :param filename: 视频文件路径 :return: 视频文件名,用来拼接 ''' cap=cv2.VideoCapture(filename) rate = cap.get(cv2.CAP_PROP_FPS) count=0 success, frame = cap.read() imageCount=0 while success: success, frame = cap.read() count+=1 if count>=rate: if not os.path.exists(stich_path): os.mkdir(stich_path) (shotname, extension)=os.path.splitext(filename) shotname=shotname.split('\\')[len(shotname.split('\\'))-1] if not os.path.exists(stich_path+shotname): os.mkdir(stich_path+shotname) # frame=cv2.resize(frame,(960,544)) cv2.imencode(".jpg", frame)[1].tofile( stich_path+shotname+'\\'+str(imageCount)+'.jpg') imageCount+=1 count=0 stitcher_image(shotname) def stitcher_image(shotname): """ 使用OpenCV的stitcher进行拼接 ****需要OpenCV 3.3.0**** OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615 :param shotname: """ imgs=[] for file in os.listdir(stich_path+shotname): imgs.append(cv2.imread(stich_path+shotname+'\\'+file)) stitcher = cv2.createStitcher(False) result = stitcher.stitch(imgs) cv2.imwrite(stich_path+shotname+'\\'+"stich_result.jpg", result[1]) def read_file_list(path): if os.path.isdir(path): pathlist=os.listdir(path) for file in pathlist: read_video(path+'\\'+file) # read_video('E:\\2.mp4')
normal
{ "blob_id": "a8506420b1bc558fa953f0cec3f8c16beaf44909", "index": 9886, "step-1": "<mask token>\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist = os.listdir(path)\n for file in pathlist:\n read_video(path + '\\\\' + file)\n", "step-3": "<mask token>\nstich_path = 'stichImage\\\\'\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist = os.listdir(path)\n for file in pathlist:\n read_video(path + '\\\\' + file)\n", "step-4": "import cv2\nimport os\n<mask token>\nstich_path = 'stichImage\\\\'\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist = os.listdir(path)\n for file in pathlist:\n read_video(path + '\\\\' + file)\n", "step-5": "import cv2\nimport os\n\"\"\"\n视频场景拼接\n\"\"\"\nstich_path=\"stichImage\\\\\"\n\ndef read_video(filename):\n '''\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n '''\n cap=cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count=0\n success, frame = cap.read()\n imageCount=0\n while success:\n success, frame = cap.read()\n count+=1\n if count>=rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n (shotname, extension)=os.path.splitext(filename)\n shotname=shotname.split('\\\\')[len(shotname.split('\\\\'))-1]\n if not os.path.exists(stich_path+shotname):\n os.mkdir(stich_path+shotname)\n # frame=cv2.resize(frame,(960,544))\n cv2.imencode(\".jpg\", frame)[1].tofile(\n stich_path+shotname+'\\\\'+str(imageCount)+'.jpg')\n imageCount+=1\n count=0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs=[]\n for file in os.listdir(stich_path+shotname):\n imgs.append(cv2.imread(stich_path+shotname+'\\\\'+file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path+shotname+'\\\\'+\"stich_result.jpg\", result[1])\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist=os.listdir(path)\n for file in pathlist:\n read_video(path+'\\\\'+file)\n\n\n\n# read_video('E:\\\\2.mp4')\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
v0 = 5 g = 9.81 t = 0.6 y = v0 * t - 0.5 * g * t ** 2 print(y)
normal
{ "blob_id": "378032a8d02bc49e5ed8ebccbeddfbb281c2cbd7", "index": 6231, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(y)\n", "step-3": "v0 = 5\ng = 9.81\nt = 0.6\ny = v0 * t - 0.5 * g * t ** 2\nprint(y)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import os import shutil from tqdm import tqdm from pathlib import Path from eval_mead import PERCENT DATAPATH = '../../../data/test' # MEAD_DIR = 'mead' MEAD_DIR = os.path.abspath('mead') MEAD_DATA_PATH = f'{MEAD_DIR}/data' MEAD_BIN = f'{MEAD_DIR}/bin' MEAD_LIB = f'{MEAD_DIR}/lib' MEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting' MEAD_DID = f'{MEAD_DIR}/did' TARGET = 'MEAD_TEST' DATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET) parse = True if os.path.exists(DATA_DIR): override = input('Data exist, override (delete and re-parse)? (Y/n): ') if override.lower() == 'y': shutil.rmtree(DATA_DIR) else: parse = False os.makedirs(DATA_DIR, exist_ok=True) cluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster') config_file = os.path.join(DATA_DIR, 'MEAD_TEST.config') CONFIG = f"""<?xml version='1.0' encoding='utf-8'?> <MEAD-CONFIG LANG="ENG" TARGET="MEAD_TEST" CLUSTER-PATH="{DATA_DIR}" DOC-DIRECTORY="{DATA_DIR}/docsent"> <FEATURE-SET BASE-DIRECTORY="{DATA_DIR}/feature"> <FEATURE NAME="Position" SCRIPT="{MEAD_BIN}/feature-scripts/Position.pl" /> <FEATURE NAME="Length" SCRIPT="{MEAD_BIN}/feature-scripts/Length.pl" /> <FEATURE NAME="Centroid" SCRIPT="{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG" /> </FEATURE-SET> <CLASSIFIER COMMAND-LINE="{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0" SYSTEM="MEADORIG" /> <COMPRESSION BASIS="sentences" PERCENT="1" /> </MEAD-CONFIG> """ if parse: ### Get raw text ### with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream: raw_papers = stream.readlines() papers = [paper.strip().split('##SENT##') for paper in raw_papers] # Setting Env. Var. with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r') as stream: print('Make sure you have change the following line to absolute path to', os.path.abspath(MEAD_DID)) print('line 18 of', os.path.join( MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm')) print(stream.readlines()[17]) with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream: print('Make sure you have change the following line to absolute path to', os.path.abspath(MEAD_DIR)) print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm')) print(stream.readlines()[30]) print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS)) os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS)) os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS) # Write raw text, cluster file # This stuff should be generated by text2cluster.pl # cluster_lines = [] # cluster_lines.append("<?xml version = '1.0' encoding='utf-8'?>\n") # cluster_lines.append("<CLUSTER LANG='ENG'>\n") print('Converting src to raw text...') for i, paper in tqdm(enumerate(papers), total=len(papers)): # did = f'raw_text_{i+1}.txt' did = f'{i+1}' text_file = os.path.join(DATA_DIR, did) with open(text_file, 'w') as stream: # make sure the sent split are the same as our annotation stream.write('\n'.join(paper)) # delete </ pattern or XML might break # os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/<\///g"') # https://stackoverflow.com/questions/8914435/awk-sed-how-to-remove-parentheses-in-simple-text-file # os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/[><]//g"') # https://validator.w3.org/feed/docs/error/SAXError.html # https://www.w3.org/TR/REC-xml/#dt-chardata print('Clean up stuff that might influence XML parsing...') os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/</&lt;/g"') os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/&/&amp;/g"') os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/>/&gt;/g"') # cluster_lines.append(f"\t<D DID='{did}' />\n") # cluster_lines.append('</CLUSTER>\n') # Get docsent # with open(cluster_file, 'w') as stream: # stream.writelines(cluster_lines) # Path(cluster_file).touch() print('Create cluster and docsent files...') os.system( f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}') if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0: print( 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl') print("Currently, it has bug and can't create file") # Run config # with open(config_file, 'w') as stream: # stream.write(CONFIG) # extract_file = os.path.join(DATA_DIR, f'{TARGET}.extract') # os.system( # f'cat {config_file} | {MEAD_BIN}/driver.pl > {extract_file}') # https://askubuntu.com/questions/20414/find-and-replace-text-within-a-file-using-commands os.system( f'find {DATA_DIR} -name "*.cluster" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"') os.system( f'find {DATA_DIR} -name "*.docsent" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"') OUTPUT_PATH = '../output' OUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead') if os.path.exists(OUTPUT_DIR): override = input('Result exist, do you want to re-run? (Y/n): ') if override.lower() == 'y': shutil.rmtree(OUTPUT_DIR) os.makedirs(OUTPUT_DIR, exist_ok=True) summary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary') extract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract') # compression basis is "sentence", and give PERCENT% summary shared_parameters = f'-sentences -percent {PERCENT}' # os.system( # f'perl {MEAD_BIN}/mead.pl {shared_parameters} -summary -output {summary_file} {TARGET}') os.system( f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}')
normal
{ "blob_id": "887ae9b7c629be679bf4f5fb4311c31bff605c73", "index": 8874, "step-1": "<mask token>\n", "step-2": "<mask token>\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\n<mask token>\nif parse:\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'\n ) as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,\n 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n did = f'{i + 1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n stream.write('\\n'.join(paper))\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</&lt;/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&amp;/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/&gt;/g\"')\n print('Create cluster and docsent files...')\n os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'\n )\n print(\"Currently, it has bug and can't create file\")\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n<mask token>\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\n<mask token>\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'\n )\n", "step-3": "<mask token>\nDATAPATH = '../../../data/test'\nMEAD_DIR = os.path.abspath('mead')\nMEAD_DATA_PATH = f'{MEAD_DIR}/data'\nMEAD_BIN = f'{MEAD_DIR}/bin'\nMEAD_LIB = f'{MEAD_DIR}/lib'\nMEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'\nMEAD_DID = f'{MEAD_DIR}/did'\nTARGET = 'MEAD_TEST'\nDATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)\nparse = True\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\ncluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')\nconfig_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')\nCONFIG = f\"\"\"<?xml version='1.0' encoding='utf-8'?>\n<MEAD-CONFIG LANG=\"ENG\" TARGET=\"MEAD_TEST\" CLUSTER-PATH=\"{DATA_DIR}\" DOC-DIRECTORY=\"{DATA_DIR}/docsent\">\n<FEATURE-SET BASE-DIRECTORY=\"{DATA_DIR}/feature\">\n<FEATURE NAME=\"Position\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Position.pl\" />\n<FEATURE NAME=\"Length\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Length.pl\" />\n<FEATURE NAME=\"Centroid\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG\" />\n</FEATURE-SET>\n<CLASSIFIER COMMAND-LINE=\"{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0\" SYSTEM=\"MEADORIG\" />\n<COMPRESSION BASIS=\"sentences\" PERCENT=\"1\" />\n</MEAD-CONFIG>\n\"\"\"\nif parse:\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'\n ) as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,\n 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n did = f'{i + 1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n stream.write('\\n'.join(paper))\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</&lt;/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&amp;/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/&gt;/g\"')\n print('Create cluster and docsent files...')\n os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'\n )\n print(\"Currently, it has bug and can't create file\")\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\nOUTPUT_PATH = '../output'\nOUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\nsummary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')\nextract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')\nshared_parameters = f'-sentences -percent {PERCENT}'\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'\n )\n", "step-4": "import os\nimport shutil\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom eval_mead import PERCENT\nDATAPATH = '../../../data/test'\nMEAD_DIR = os.path.abspath('mead')\nMEAD_DATA_PATH = f'{MEAD_DIR}/data'\nMEAD_BIN = f'{MEAD_DIR}/bin'\nMEAD_LIB = f'{MEAD_DIR}/lib'\nMEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'\nMEAD_DID = f'{MEAD_DIR}/did'\nTARGET = 'MEAD_TEST'\nDATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)\nparse = True\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\ncluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')\nconfig_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')\nCONFIG = f\"\"\"<?xml version='1.0' encoding='utf-8'?>\n<MEAD-CONFIG LANG=\"ENG\" TARGET=\"MEAD_TEST\" CLUSTER-PATH=\"{DATA_DIR}\" DOC-DIRECTORY=\"{DATA_DIR}/docsent\">\n<FEATURE-SET BASE-DIRECTORY=\"{DATA_DIR}/feature\">\n<FEATURE NAME=\"Position\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Position.pl\" />\n<FEATURE NAME=\"Length\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Length.pl\" />\n<FEATURE NAME=\"Centroid\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG\" />\n</FEATURE-SET>\n<CLASSIFIER COMMAND-LINE=\"{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0\" SYSTEM=\"MEADORIG\" />\n<COMPRESSION BASIS=\"sentences\" PERCENT=\"1\" />\n</MEAD-CONFIG>\n\"\"\"\nif parse:\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'\n ) as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,\n 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n did = f'{i + 1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n stream.write('\\n'.join(paper))\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</&lt;/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&amp;/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/&gt;/g\"')\n print('Create cluster and docsent files...')\n os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'\n )\n print(\"Currently, it has bug and can't create file\")\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\nOUTPUT_PATH = '../output'\nOUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\nsummary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')\nextract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')\nshared_parameters = f'-sentences -percent {PERCENT}'\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'\n )\n", "step-5": "import os\nimport shutil\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom eval_mead import PERCENT\n\nDATAPATH = '../../../data/test'\n# MEAD_DIR = 'mead'\nMEAD_DIR = os.path.abspath('mead')\nMEAD_DATA_PATH = f'{MEAD_DIR}/data'\nMEAD_BIN = f'{MEAD_DIR}/bin'\nMEAD_LIB = f'{MEAD_DIR}/lib'\nMEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'\nMEAD_DID = f'{MEAD_DIR}/did'\nTARGET = 'MEAD_TEST'\n\n\nDATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)\nparse = True\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\n\ncluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')\nconfig_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')\n\nCONFIG = f\"\"\"<?xml version='1.0' encoding='utf-8'?>\n<MEAD-CONFIG LANG=\"ENG\" TARGET=\"MEAD_TEST\" CLUSTER-PATH=\"{DATA_DIR}\" DOC-DIRECTORY=\"{DATA_DIR}/docsent\">\n<FEATURE-SET BASE-DIRECTORY=\"{DATA_DIR}/feature\">\n<FEATURE NAME=\"Position\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Position.pl\" />\n<FEATURE NAME=\"Length\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Length.pl\" />\n<FEATURE NAME=\"Centroid\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG\" />\n</FEATURE-SET>\n<CLASSIFIER COMMAND-LINE=\"{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0\" SYSTEM=\"MEADORIG\" />\n<COMPRESSION BASIS=\"sentences\" PERCENT=\"1\" />\n</MEAD-CONFIG>\n\"\"\"\n\nif parse:\n\n ### Get raw text ###\n\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n\n # Setting Env. Var.\n\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r') as stream:\n print('Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(\n MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print('Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n\n # Write raw text, cluster file\n\n # This stuff should be generated by text2cluster.pl\n # cluster_lines = []\n # cluster_lines.append(\"<?xml version = '1.0' encoding='utf-8'?>\\n\")\n # cluster_lines.append(\"<CLUSTER LANG='ENG'>\\n\")\n\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n\n # did = f'raw_text_{i+1}.txt'\n did = f'{i+1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n # make sure the sent split are the same as our annotation\n stream.write('\\n'.join(paper))\n\n # delete </ pattern or XML might break\n # os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/<\\///g\"')\n # https://stackoverflow.com/questions/8914435/awk-sed-how-to-remove-parentheses-in-simple-text-file\n # os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/[><]//g\"')\n\n # https://validator.w3.org/feed/docs/error/SAXError.html\n # https://www.w3.org/TR/REC-xml/#dt-chardata\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</&lt;/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&amp;/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/&gt;/g\"')\n\n # cluster_lines.append(f\"\\t<D DID='{did}' />\\n\")\n # cluster_lines.append('</CLUSTER>\\n')\n\n # Get docsent\n\n # with open(cluster_file, 'w') as stream:\n # stream.writelines(cluster_lines)\n\n # Path(cluster_file).touch()\n\n print('Create cluster and docsent files...')\n os.system(\n f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl')\n print(\"Currently, it has bug and can't create file\")\n\n # Run config\n\n # with open(config_file, 'w') as stream:\n # stream.write(CONFIG)\n\n # extract_file = os.path.join(DATA_DIR, f'{TARGET}.extract')\n # os.system(\n # f'cat {config_file} | {MEAD_BIN}/driver.pl > {extract_file}')\n\n # https://askubuntu.com/questions/20414/find-and-replace-text-within-a-file-using-commands\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"')\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"')\n\n\nOUTPUT_PATH = '../output'\nOUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\n\nsummary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')\nextract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')\n# compression basis is \"sentence\", and give PERCENT% summary\nshared_parameters = f'-sentences -percent {PERCENT}'\n\n# os.system(\n# f'perl {MEAD_BIN}/mead.pl {shared_parameters} -summary -output {summary_file} {TARGET}')\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
with open("out.txt", "w", encoding = "utf_8") as file: file.write("明日の天気です∖n") file.write("関西地方はおおむね晴れ.") file.write("紅葉を見るには絶好の日和でしょう∖n") file.write(“映像は嵐山の様子です.") file.write("今年も大変な数の観光客が訪れているようですね.∖n")
normal
{ "blob_id": "4fea9941defd6703be3cae034d979933262074e3", "index": 3728, "step-1": "with open(\"out.txt\", \"w\", encoding = \"utf_8\") as file:\n file.write(\"明日の天気です∖n\")\n file.write(\"関西地方はおおむね晴れ.\")\n file.write(\"紅葉を見るには絶好の日和でしょう∖n\")\n file.write(“映像は嵐山の様子です.\")\n file.write(\"今年も大変な数の観光客が訪れているようですね.∖n\")\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python3 import json import sys import time import zmq log_file = "./mavlink-log.txt" zmq_context = zmq.Context() connect_to = sys.argv[1] send_socket = zmq_context.socket(zmq.PUSH) send_socket.connect(connect_to) def get_first_timestamp(log_file): with open(log_file) as f: for line in f: line_json = json.loads(line) return line_json["timestamp"] start_time_file = get_first_timestamp(log_file) start_time_importer = time.time() with open(log_file) as f: for line in f: line_json = json.loads(line) importer_age = time.time() - start_time_importer line_age = line_json["timestamp"] - start_time_file sleep_time = line_age - importer_age if sleep_time > 0: #print(str(line_age)+" - "+str(importer_age)) #print(sleep_time) time.sleep(sleep_time) print(line_json) send_socket.send_json(line_json)
normal
{ "blob_id": "49679782ac696b3dc4f5038565f88304a44098e1", "index": 6188, "step-1": "<mask token>\n\n\ndef get_first_timestamp(log_file):\n with open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n return line_json['timestamp']\n\n\n<mask token>\n", "step-2": "<mask token>\nsend_socket.connect(connect_to)\n\n\ndef get_first_timestamp(log_file):\n with open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n return line_json['timestamp']\n\n\n<mask token>\nwith open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n importer_age = time.time() - start_time_importer\n line_age = line_json['timestamp'] - start_time_file\n sleep_time = line_age - importer_age\n if sleep_time > 0:\n time.sleep(sleep_time)\n print(line_json)\n send_socket.send_json(line_json)\n", "step-3": "<mask token>\nlog_file = './mavlink-log.txt'\nzmq_context = zmq.Context()\nconnect_to = sys.argv[1]\nsend_socket = zmq_context.socket(zmq.PUSH)\nsend_socket.connect(connect_to)\n\n\ndef get_first_timestamp(log_file):\n with open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n return line_json['timestamp']\n\n\nstart_time_file = get_first_timestamp(log_file)\nstart_time_importer = time.time()\nwith open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n importer_age = time.time() - start_time_importer\n line_age = line_json['timestamp'] - start_time_file\n sleep_time = line_age - importer_age\n if sleep_time > 0:\n time.sleep(sleep_time)\n print(line_json)\n send_socket.send_json(line_json)\n", "step-4": "import json\nimport sys\nimport time\nimport zmq\nlog_file = './mavlink-log.txt'\nzmq_context = zmq.Context()\nconnect_to = sys.argv[1]\nsend_socket = zmq_context.socket(zmq.PUSH)\nsend_socket.connect(connect_to)\n\n\ndef get_first_timestamp(log_file):\n with open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n return line_json['timestamp']\n\n\nstart_time_file = get_first_timestamp(log_file)\nstart_time_importer = time.time()\nwith open(log_file) as f:\n for line in f:\n line_json = json.loads(line)\n importer_age = time.time() - start_time_importer\n line_age = line_json['timestamp'] - start_time_file\n sleep_time = line_age - importer_age\n if sleep_time > 0:\n time.sleep(sleep_time)\n print(line_json)\n send_socket.send_json(line_json)\n", "step-5": "#!/usr/bin/env python3\n\nimport json\nimport sys\nimport time\nimport zmq\n\nlog_file = \"./mavlink-log.txt\"\n\nzmq_context = zmq.Context()\n\nconnect_to = sys.argv[1]\nsend_socket = zmq_context.socket(zmq.PUSH)\nsend_socket.connect(connect_to)\n\ndef get_first_timestamp(log_file):\n\twith open(log_file) as f:\n\t\tfor line in f:\n\t\t\tline_json = json.loads(line)\n\t\t\treturn line_json[\"timestamp\"]\n\n\n\nstart_time_file = get_first_timestamp(log_file)\nstart_time_importer = time.time()\n\nwith open(log_file) as f:\n\tfor line in f:\n\t\tline_json = json.loads(line)\n\n\t\timporter_age = time.time() - start_time_importer\n\t\tline_age = line_json[\"timestamp\"] - start_time_file\n\n\t\tsleep_time = line_age - importer_age\n\n\t\tif sleep_time > 0:\n\t\t\t#print(str(line_age)+\" - \"+str(importer_age))\n\t\t\t#print(sleep_time)\n\t\t\ttime.sleep(sleep_time)\n\n\n\t\tprint(line_json)\n\t\tsend_socket.send_json(line_json)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# Duy B. Lam # 61502602 # Project 3 # A module that reads the input and constructs the objects # that will generate the program's output. This is the only # module that should have an if __name__ == '__main__' block # to make it executable; you would execute this module to run your program. import Module1 #USED TO RETREIVE THE NUMBER OF LOCATIONS def tripQuantity() -> int: try: locationQ = int(input()) return locationQ finally: print('locationQ: ' + str(locationQ)) #USED TO RETREIVE THE NUMBER OF REQUESTED OUTPUTS def outputQ() -> int: try: outputQ = int(input()) return outputQ finally: print('output quantity:' + str(outputQ)) #USED TO RECORD SEARCH LOCATIONS def quantityToLocations(tripQ: int) -> list: locationCount = 0 locationList = list() while (locationCount < tripQ): locationList.append(input()) locationCount+=1 return locationList #USED TO RECORD OUTPUT OPTIONS def quantityToOutput(outputQ: int) -> list: outputCount = 0 outputList = list() while (outputCount < outputQ): outputList.append(input()) outputCount += 1 return outputList if __name__ == '__main__': #USED TO GET USER INPUTS locationQ = tripQuantity() locationList = quantityToLocations(locationQ) #print to double check #CREATES A NEW SEARCH INSTANCE AND IT'S REQUEST URL newSearch = Module1.URL() newSearch.set_from_location(locationList[0]) newSearch.set_to_location(locationList[1:len(locationList)]) print(str(newSearch.get_To_Location())) newSearch.set_request_url() newSearch_request_url = newSearch.get_Request_URL() #print to double check #THIS FUNCTION MAKES THE REQUEST AND GATHERS RESPONSE INTO DICTIONARY newSearch_reponse = newSearch.search_request_response() #USED TO GET USER OUTPUTS #outputQ = outputQ() #outputList = quantityToOutput(outputQ) #print(outputList) ''' #USED TO REQUEST MAPQUEST SEARCH x = urllib.request.urlopen(url) #USED TO DECODE MAPQUEST RESPONSE y = x.read().decode(encoding = 'utf-8') print(y) # USE decoded response string to check with pretty json #USED TO CONVERT DECODED STRING TO DICT/LISTS z = json.loads(y) #dictionary of mapquest response which also includes lists print(type(z['route']['locations'])) locationsList = z['route']['locations'] print(locationsList) print(locationsList[1]['latLng']) i = 0 if i < len(locationsList): for key in locationsList[i]: if key == 'latLng': print(locationsList[i][key]) i+=1 #### i = 0 #### if i < len(locationsList): #### if locationList[i] == 'latLng': #### print(locationsList[i]) #### #print (y) '''
normal
{ "blob_id": "da19bc4fc999bd48a3d55b8cb5f47ba6208bc02b", "index": 4502, "step-1": "<mask token>\n\n\ndef outputQ() ->int:\n try:\n outputQ = int(input())\n return outputQ\n finally:\n print('output quantity:' + str(outputQ))\n\n\n<mask token>\n\n\ndef quantityToOutput(outputQ: int) ->list:\n outputCount = 0\n outputList = list()\n while outputCount < outputQ:\n outputList.append(input())\n outputCount += 1\n return outputList\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef tripQuantity() ->int:\n try:\n locationQ = int(input())\n return locationQ\n finally:\n print('locationQ: ' + str(locationQ))\n\n\ndef outputQ() ->int:\n try:\n outputQ = int(input())\n return outputQ\n finally:\n print('output quantity:' + str(outputQ))\n\n\ndef quantityToLocations(tripQ: int) ->list:\n locationCount = 0\n locationList = list()\n while locationCount < tripQ:\n locationList.append(input())\n locationCount += 1\n return locationList\n\n\ndef quantityToOutput(outputQ: int) ->list:\n outputCount = 0\n outputList = list()\n while outputCount < outputQ:\n outputList.append(input())\n outputCount += 1\n return outputList\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef tripQuantity() ->int:\n try:\n locationQ = int(input())\n return locationQ\n finally:\n print('locationQ: ' + str(locationQ))\n\n\ndef outputQ() ->int:\n try:\n outputQ = int(input())\n return outputQ\n finally:\n print('output quantity:' + str(outputQ))\n\n\ndef quantityToLocations(tripQ: int) ->list:\n locationCount = 0\n locationList = list()\n while locationCount < tripQ:\n locationList.append(input())\n locationCount += 1\n return locationList\n\n\ndef quantityToOutput(outputQ: int) ->list:\n outputCount = 0\n outputList = list()\n while outputCount < outputQ:\n outputList.append(input())\n outputCount += 1\n return outputList\n\n\nif __name__ == '__main__':\n locationQ = tripQuantity()\n locationList = quantityToLocations(locationQ)\n newSearch = Module1.URL()\n newSearch.set_from_location(locationList[0])\n newSearch.set_to_location(locationList[1:len(locationList)])\n print(str(newSearch.get_To_Location()))\n newSearch.set_request_url()\n newSearch_request_url = newSearch.get_Request_URL()\n newSearch_reponse = newSearch.search_request_response()\n \"\"\"\n #USED TO REQUEST MAPQUEST SEARCH\n x = urllib.request.urlopen(url)\n \n #USED TO DECODE MAPQUEST RESPONSE\n y = x.read().decode(encoding = 'utf-8')\n print(y) # USE decoded response string to check with pretty json\n \n #USED TO CONVERT DECODED STRING TO DICT/LISTS\n z = json.loads(y) #dictionary of mapquest response which also includes lists\n\n print(type(z['route']['locations']))\n \n locationsList = z['route']['locations']\n print(locationsList)\n print(locationsList[1]['latLng'])\n i = 0\n if i < len(locationsList):\n for key in locationsList[i]:\n if key == 'latLng':\n print(locationsList[i][key])\n i+=1\n#### i = 0\n#### if i < len(locationsList):\n#### if locationList[i] == 'latLng':\n#### print(locationsList[i])\n#### \n \n #print (y)\n\"\"\"\n", "step-4": "import Module1\n\n\ndef tripQuantity() ->int:\n try:\n locationQ = int(input())\n return locationQ\n finally:\n print('locationQ: ' + str(locationQ))\n\n\ndef outputQ() ->int:\n try:\n outputQ = int(input())\n return outputQ\n finally:\n print('output quantity:' + str(outputQ))\n\n\ndef quantityToLocations(tripQ: int) ->list:\n locationCount = 0\n locationList = list()\n while locationCount < tripQ:\n locationList.append(input())\n locationCount += 1\n return locationList\n\n\ndef quantityToOutput(outputQ: int) ->list:\n outputCount = 0\n outputList = list()\n while outputCount < outputQ:\n outputList.append(input())\n outputCount += 1\n return outputList\n\n\nif __name__ == '__main__':\n locationQ = tripQuantity()\n locationList = quantityToLocations(locationQ)\n newSearch = Module1.URL()\n newSearch.set_from_location(locationList[0])\n newSearch.set_to_location(locationList[1:len(locationList)])\n print(str(newSearch.get_To_Location()))\n newSearch.set_request_url()\n newSearch_request_url = newSearch.get_Request_URL()\n newSearch_reponse = newSearch.search_request_response()\n \"\"\"\n #USED TO REQUEST MAPQUEST SEARCH\n x = urllib.request.urlopen(url)\n \n #USED TO DECODE MAPQUEST RESPONSE\n y = x.read().decode(encoding = 'utf-8')\n print(y) # USE decoded response string to check with pretty json\n \n #USED TO CONVERT DECODED STRING TO DICT/LISTS\n z = json.loads(y) #dictionary of mapquest response which also includes lists\n\n print(type(z['route']['locations']))\n \n locationsList = z['route']['locations']\n print(locationsList)\n print(locationsList[1]['latLng'])\n i = 0\n if i < len(locationsList):\n for key in locationsList[i]:\n if key == 'latLng':\n print(locationsList[i][key])\n i+=1\n#### i = 0\n#### if i < len(locationsList):\n#### if locationList[i] == 'latLng':\n#### print(locationsList[i])\n#### \n \n #print (y)\n\"\"\"\n", "step-5": "# Duy B. Lam\r\n# 61502602\r\n# Project 3\r\n\r\n# A module that reads the input and constructs the objects\r\n# that will generate the program's output. This is the only\r\n# module that should have an if __name__ == '__main__' block\r\n# to make it executable; you would execute this module to run your program.\r\n\r\n\r\nimport Module1\r\n\r\n\r\n#USED TO RETREIVE THE NUMBER OF LOCATIONS\r\ndef tripQuantity() -> int:\r\n try:\r\n locationQ = int(input())\r\n return locationQ\r\n finally:\r\n print('locationQ: ' + str(locationQ))\r\n\r\n#USED TO RETREIVE THE NUMBER OF REQUESTED OUTPUTS \r\ndef outputQ() -> int:\r\n try:\r\n outputQ = int(input())\r\n return outputQ\r\n finally:\r\n print('output quantity:' + str(outputQ))\r\n\r\n#USED TO RECORD SEARCH LOCATIONS\r\ndef quantityToLocations(tripQ: int) -> list:\r\n locationCount = 0\r\n locationList = list()\r\n while (locationCount < tripQ):\r\n locationList.append(input())\r\n locationCount+=1\r\n return locationList \r\n\r\n#USED TO RECORD OUTPUT OPTIONS\r\ndef quantityToOutput(outputQ: int) -> list:\r\n outputCount = 0\r\n outputList = list()\r\n while (outputCount < outputQ):\r\n outputList.append(input())\r\n outputCount += 1\r\n return outputList\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n #USED TO GET USER INPUTS\r\n locationQ = tripQuantity()\r\n locationList = quantityToLocations(locationQ) #print to double check\r\n \r\n #CREATES A NEW SEARCH INSTANCE AND IT'S REQUEST URL\r\n newSearch = Module1.URL()\r\n newSearch.set_from_location(locationList[0])\r\n newSearch.set_to_location(locationList[1:len(locationList)])\r\n print(str(newSearch.get_To_Location()))\r\n newSearch.set_request_url()\r\n newSearch_request_url = newSearch.get_Request_URL() #print to double check\r\n\r\n #THIS FUNCTION MAKES THE REQUEST AND GATHERS RESPONSE INTO DICTIONARY\r\n newSearch_reponse = newSearch.search_request_response()\r\n \r\n #USED TO GET USER OUTPUTS\r\n #outputQ = outputQ()\r\n #outputList = quantityToOutput(outputQ)\r\n #print(outputList)\r\n\r\n \r\n \r\n \r\n '''\r\n #USED TO REQUEST MAPQUEST SEARCH\r\n x = urllib.request.urlopen(url)\r\n \r\n #USED TO DECODE MAPQUEST RESPONSE\r\n y = x.read().decode(encoding = 'utf-8')\r\n print(y) # USE decoded response string to check with pretty json\r\n \r\n #USED TO CONVERT DECODED STRING TO DICT/LISTS\r\n z = json.loads(y) #dictionary of mapquest response which also includes lists\r\n\r\n print(type(z['route']['locations']))\r\n \r\n locationsList = z['route']['locations']\r\n print(locationsList)\r\n print(locationsList[1]['latLng'])\r\n i = 0\r\n if i < len(locationsList):\r\n for key in locationsList[i]:\r\n if key == 'latLng':\r\n print(locationsList[i][key])\r\n i+=1\r\n#### i = 0\r\n#### if i < len(locationsList):\r\n#### if locationList[i] == 'latLng':\r\n#### print(locationsList[i])\r\n#### \r\n \r\n #print (y)\r\n'''\r\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
from discord.ext import commands import discord import os import random bot = commands.Bot(command_prefix="!") @bot.event async def on_ready(): print(f"Logged in as {bot.user.name}") @bot.command() async def ping(ctx): await ctx.send("pong") # Lucky command, it picks a number between 0-50 and spams your dm's with that number @bot.command() async def lucky(ctx): spamCount = random.randint(0, 50) for num in range(int(spamCount)): await ctx.message.author.send("ARE YOU FELLING LUCKY???") # Basic spam command, you can provide a message and specify how many messages @bot.command() async def spam(ctx, spamCtx="spam", spamCount=1): for num in range(int(spamCount)): await ctx.send(str(spamCtx)) # Lets you mention a specific user who would like to spam in their DM's, you can specify a message @bot.command() async def attack(ctx, user: discord.User, *, message="GET SPAMMED NERD"): spamCount = 10 for num in range(int(spamCount)): await user.send(message) if __name__ == "__main__": bot.run(os.environ['TOKEN'])
normal
{ "blob_id": "b48bc9475a8dc593ba858af8ed4e930ae290fd69", "index": 6479, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]\nasync def on_ready():\n print(f'Logged in as {bot.user.name}')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send('ARE YOU FELLING LUCKY???')\n\n\[email protected]()\nasync def spam(ctx, spamCtx='spam', spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n\[email protected]()\nasync def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\n\nif __name__ == '__main__':\n bot.run(os.environ['TOKEN'])\n", "step-3": "<mask token>\nbot = commands.Bot(command_prefix='!')\n\n\[email protected]\nasync def on_ready():\n print(f'Logged in as {bot.user.name}')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send('ARE YOU FELLING LUCKY???')\n\n\[email protected]()\nasync def spam(ctx, spamCtx='spam', spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n\[email protected]()\nasync def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\n\nif __name__ == '__main__':\n bot.run(os.environ['TOKEN'])\n", "step-4": "from discord.ext import commands\nimport discord\nimport os\nimport random\nbot = commands.Bot(command_prefix='!')\n\n\[email protected]\nasync def on_ready():\n print(f'Logged in as {bot.user.name}')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send('ARE YOU FELLING LUCKY???')\n\n\[email protected]()\nasync def spam(ctx, spamCtx='spam', spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n\[email protected]()\nasync def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\n\nif __name__ == '__main__':\n bot.run(os.environ['TOKEN'])\n", "step-5": "from discord.ext import commands\nimport discord\nimport os\nimport random\n\nbot = commands.Bot(command_prefix=\"!\")\n\[email protected]\nasync def on_ready():\n print(f\"Logged in as {bot.user.name}\")\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send(\"pong\")\n\n\n# Lucky command, it picks a number between 0-50 and spams your dm's with that number\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send(\"ARE YOU FELLING LUCKY???\")\n\n# Basic spam command, you can provide a message and specify how many messages\[email protected]()\nasync def spam(ctx, spamCtx=\"spam\", spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n# Lets you mention a specific user who would like to spam in their DM's, you can specify a message\[email protected]()\nasync def attack(ctx, user: discord.User, *, message=\"GET SPAMMED NERD\"):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\nif __name__ == \"__main__\":\n bot.run(os.environ['TOKEN'])", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Question : determine whether given number is power of 2 # logic : every no. of the form 2^i has bit represetntaion of the form : # 2 -> 10 1->01 # 4 -> 100 3->011 # 8 -> 1000 7->0111 # 16 -> 10000 15->01111 # 32 -> 100000 31->011111 # ... and so on # Thus there is a pattern here, ever predecessor of power of 2 has all 0 bits flipped and so as 1 bit itself # Complexity : using bit manipulation it can be done in O(1) time def is_power(n): if n==0: return 'not power of two' if n & (n-1) == 0 : return 'power of 2' return 'not power of 2' if __name__ == "__main__": input_number = int(input('enter the number : ')) print(is_power(input_number))
normal
{ "blob_id": "676aec735dd7441b0c481956ad18b012b8d98ea4", "index": 8459, "step-1": "<mask token>\n", "step-2": "def is_power(n):\n if n == 0:\n return 'not power of two'\n if n & n - 1 == 0:\n return 'power of 2'\n return 'not power of 2'\n\n\n<mask token>\n", "step-3": "def is_power(n):\n if n == 0:\n return 'not power of two'\n if n & n - 1 == 0:\n return 'power of 2'\n return 'not power of 2'\n\n\nif __name__ == '__main__':\n input_number = int(input('enter the number : '))\n print(is_power(input_number))\n", "step-4": "# Question : determine whether given number is power of 2\n\n# logic : every no. of the form 2^i has bit represetntaion of the form : \n# 2 -> 10 1->01\n# 4 -> 100 3->011\n# 8 -> 1000 7->0111\n# 16 -> 10000 15->01111\n# 32 -> 100000 31->011111\n# ... and so on\n\n# Thus there is a pattern here, ever predecessor of power of 2 has all 0 bits flipped and so as 1 bit itself\n\n# Complexity : using bit manipulation it can be done in O(1) time\n\n\ndef is_power(n):\n if n==0:\n return 'not power of two'\n if n & (n-1) == 0 :\n return 'power of 2'\n return 'not power of 2'\n\n\nif __name__ == \"__main__\":\n input_number = int(input('enter the number : '))\n print(is_power(input_number))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import numpy as np from feature.features import Features class RealWorldFeatures(Features): def __init__(self): super().__init__('tsagkias/real_world_features') def _extract_features(self, df): # weather from http://www.dwd.de/DE/leistungen/klimadatendeutschland/klimadatendeutschland.html features = [ df['temp_ham'], df['temp_fra'], df['temp_ber'], df['hum_ham'], df['hum_fra'], df['hum_ber'], ] return np.vstack(features).T
normal
{ "blob_id": "f6b2e66379b483c6a573d34d73ae0d10de7315a3", "index": 6815, "step-1": "<mask token>\n\n\nclass RealWorldFeatures(Features):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass RealWorldFeatures(Features):\n\n def __init__(self):\n super().__init__('tsagkias/real_world_features')\n <mask token>\n", "step-3": "<mask token>\n\n\nclass RealWorldFeatures(Features):\n\n def __init__(self):\n super().__init__('tsagkias/real_world_features')\n\n def _extract_features(self, df):\n features = [df['temp_ham'], df['temp_fra'], df['temp_ber'], df[\n 'hum_ham'], df['hum_fra'], df['hum_ber']]\n return np.vstack(features).T\n", "step-4": "import numpy as np\nfrom feature.features import Features\n\n\nclass RealWorldFeatures(Features):\n\n def __init__(self):\n super().__init__('tsagkias/real_world_features')\n\n def _extract_features(self, df):\n features = [df['temp_ham'], df['temp_fra'], df['temp_ber'], df[\n 'hum_ham'], df['hum_fra'], df['hum_ber']]\n return np.vstack(features).T\n", "step-5": "import numpy as np\nfrom feature.features import Features\n\nclass RealWorldFeatures(Features):\n def __init__(self):\n super().__init__('tsagkias/real_world_features')\n\n def _extract_features(self, df):\n # weather from http://www.dwd.de/DE/leistungen/klimadatendeutschland/klimadatendeutschland.html\n\n features = [\n df['temp_ham'],\n df['temp_fra'],\n df['temp_ber'],\n df['hum_ham'],\n df['hum_fra'],\n df['hum_ber'],\n ]\n\n return np.vstack(features).T\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# create item based on name using post method, get specific item or list of items using get method, update item using put and delete item using del method. import os from flask import Flask from flask_restful import Api from flask_jwt import JWT, timedelta from security import authenticate, identity from resources.user import UserRegister from resources.item import Item,ItemList from resources.store import Store, StoreList app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db') app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # turn off flask SQLAlchemy modification. app.secret_key = 'key123' api = Api(app) jwt = JWT(app, authenticate, identity) api.add_resource(Store,'/store/<string:name>') api.add_resource(Item,'/item/<string:name>') # http://localhost:5000/student/Rolf api.add_resource(ItemList,'/items') api.add_resource(StoreList,'/stores') api.add_resource(UserRegister, '/register') if __name__ == '__main__': from db import db db.init_app(app) app.run(debug=True)
normal
{ "blob_id": "7525691ece4fe66bb175e470db3ac78f701e3730", "index": 199, "step-1": "<mask token>\n", "step-2": "<mask token>\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(StoreList, '/stores')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n", "step-3": "<mask token>\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',\n 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'key123'\napi = Api(app)\njwt = JWT(app, authenticate, identity)\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(StoreList, '/stores')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n", "step-4": "import os\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt import JWT, timedelta\nfrom security import authenticate, identity\nfrom resources.user import UserRegister\nfrom resources.item import Item, ItemList\nfrom resources.store import Store, StoreList\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',\n 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'key123'\napi = Api(app)\njwt = JWT(app, authenticate, identity)\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(StoreList, '/stores')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n", "step-5": "# create item based on name using post method, get specific item or list of items using get method, update item using put and delete item using del method.\nimport os\n\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt import JWT, timedelta\n\nfrom security import authenticate, identity\nfrom resources.user import UserRegister\nfrom resources.item import Item,ItemList\nfrom resources.store import Store, StoreList\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # turn off flask SQLAlchemy modification.\napp.secret_key = 'key123'\napi = Api(app)\n\njwt = JWT(app, authenticate, identity)\n\napi.add_resource(Store,'/store/<string:name>')\napi.add_resource(Item,'/item/<string:name>') # http://localhost:5000/student/Rolf\napi.add_resource(ItemList,'/items')\napi.add_resource(StoreList,'/stores')\napi.add_resource(UserRegister, '/register')\n\n\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
"""A tiny example binary for the native Python rules of Bazel.""" import unittest from bazel_tutorial.examples.py.lib import GetNumber from bazel_tutorial.examples.py.fibonacci.fib import Fib class TestGetNumber(unittest.TestCase): def test_ok(self): self.assertEqual(GetNumber(), 42) def test_fib(self): self.assertEqual(Fib(5), 8) if __name__ == '__main__': unittest.main()
normal
{ "blob_id": "d126efa91b964a3a374d546bb860b39ae26dfa22", "index": 256, "step-1": "<mask token>\n\n\nclass TestGetNumber(unittest.TestCase):\n <mask token>\n\n def test_fib(self):\n self.assertEqual(Fib(5), 8)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TestGetNumber(unittest.TestCase):\n\n def test_ok(self):\n self.assertEqual(GetNumber(), 42)\n\n def test_fib(self):\n self.assertEqual(Fib(5), 8)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TestGetNumber(unittest.TestCase):\n\n def test_ok(self):\n self.assertEqual(GetNumber(), 42)\n\n def test_fib(self):\n self.assertEqual(Fib(5), 8)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "<mask token>\nimport unittest\nfrom bazel_tutorial.examples.py.lib import GetNumber\nfrom bazel_tutorial.examples.py.fibonacci.fib import Fib\n\n\nclass TestGetNumber(unittest.TestCase):\n\n def test_ok(self):\n self.assertEqual(GetNumber(), 42)\n\n def test_fib(self):\n self.assertEqual(Fib(5), 8)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "\"\"\"A tiny example binary for the native Python rules of Bazel.\"\"\"\n\nimport unittest\nfrom bazel_tutorial.examples.py.lib import GetNumber\nfrom bazel_tutorial.examples.py.fibonacci.fib import Fib\n\n\nclass TestGetNumber(unittest.TestCase):\n\n def test_ok(self):\n self.assertEqual(GetNumber(), 42)\n\n def test_fib(self):\n self.assertEqual(Fib(5), 8)\n\nif __name__ == '__main__':\n unittest.main()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# -*- coding: utf-8 -*- """ Created on Sun Dec 20 14:48:56 2020 @author: dhk1349 """ n = int(input()) #목표채널 m = int(input()) broken=[int(i) for i in input().split()] #망가진 버튼 normal=[i for i in range(10)] #사용가능한 버튼 ans=abs(n-100) #시작 시 정답 for i in broken: normal.remove(i) tempnum=0 iternum=1 def solve(lst, target): #가장 유사한 숫자를 뱉 while n!=0: val=n%10 n=n/10 if val not in normal: tempnum+=(iternum*val) iternum*=10
normal
{ "blob_id": "2a6ae615b427a7c970aacf9804865ea7952d065f", "index": 5888, "step-1": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 20 14:48:56 2020\n\n@author: dhk1349\n\"\"\"\n\nn = int(input()) #목표채널 \nm = int(input())\nbroken=[int(i) for i in input().split()] #망가진 버튼 \nnormal=[i for i in range(10)] #사용가능한 버튼 \nans=abs(n-100) #시작 시 정답 \n\n\nfor i in broken:\n normal.remove(i)\n\n\ntempnum=0\niternum=1\n\ndef solve(lst, target):\n #가장 유사한 숫자를 뱉\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\nwhile n!=0:\n val=n%10\n n=n/10\n \n if val not in normal:\n \n \n tempnum+=(iternum*val)\n iternum*=10\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
a=list(input("enter the string or sentence to perform caesar cipher : ")) b=int(input('enter the frequency to perform ceasar cipher ')) e=[] #print(a) #print (a[4]) c=len(a) #print(c) for i in range (0,c): d=ord(a[i]) #print(d) if b> 0: for j in range (1,b+1): if a[i] >='a' and a[i] <='z' or a[i] >= 'A' and a[i] <='Z': if d>= 65 and d< 90 or d>=97 and d<122: d+=1 elif d==90: d=65 elif d==122: d=97 else : pass f=chr(d) e.append(f) if b<0: g=abs(b) for j in range (1,g+1): if a[i] >='a' and a[i] <='z' or a[i] >= 'A' and a[i] <='Z': if d> 65 and d<= 90 or d>97 and d<=122: d-=1 elif d==97: d=122 elif d==65: d=90 else : pass f=chr(d) e.append(f) #print (e) for k in range (0,c): print(e[k],end='') '''65-90 A-Z 97-122 a-z'''
normal
{ "blob_id": "287d4c2d490c9dcdd7be7e86fe577139a3d30f54", "index": 6676, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(0, c):\n d = ord(a[i])\n if b > 0:\n for j in range(1, b + 1):\n if a[i] >= 'a' and a[i] <= 'z' or a[i] >= 'A' and a[i] <= 'Z':\n if d >= 65 and d < 90 or d >= 97 and d < 122:\n d += 1\n elif d == 90:\n d = 65\n elif d == 122:\n d = 97\n else:\n pass\n f = chr(d)\n e.append(f)\n if b < 0:\n g = abs(b)\n for j in range(1, g + 1):\n if a[i] >= 'a' and a[i] <= 'z' or a[i] >= 'A' and a[i] <= 'Z':\n if d > 65 and d <= 90 or d > 97 and d <= 122:\n d -= 1\n elif d == 97:\n d = 122\n elif d == 65:\n d = 90\n else:\n pass\n f = chr(d)\n e.append(f)\nfor k in range(0, c):\n print(e[k], end='')\n<mask token>\n", "step-3": "a = list(input('enter the string or sentence to perform caesar cipher : '))\nb = int(input('enter the frequency to perform ceasar cipher '))\ne = []\nc = len(a)\nfor i in range(0, c):\n d = ord(a[i])\n if b > 0:\n for j in range(1, b + 1):\n if a[i] >= 'a' and a[i] <= 'z' or a[i] >= 'A' and a[i] <= 'Z':\n if d >= 65 and d < 90 or d >= 97 and d < 122:\n d += 1\n elif d == 90:\n d = 65\n elif d == 122:\n d = 97\n else:\n pass\n f = chr(d)\n e.append(f)\n if b < 0:\n g = abs(b)\n for j in range(1, g + 1):\n if a[i] >= 'a' and a[i] <= 'z' or a[i] >= 'A' and a[i] <= 'Z':\n if d > 65 and d <= 90 or d > 97 and d <= 122:\n d -= 1\n elif d == 97:\n d = 122\n elif d == 65:\n d = 90\n else:\n pass\n f = chr(d)\n e.append(f)\nfor k in range(0, c):\n print(e[k], end='')\n<mask token>\n", "step-4": "a=list(input(\"enter the string or sentence to perform caesar cipher : \"))\r\nb=int(input('enter the frequency to perform ceasar cipher '))\r\ne=[]\r\n#print(a)\r\n#print (a[4])\r\nc=len(a)\r\n#print(c)\r\nfor i in range (0,c):\r\n d=ord(a[i])\r\n #print(d)\r\n if b> 0:\r\n for j in range (1,b+1):\r\n if a[i] >='a' and a[i] <='z' or a[i] >= 'A' and a[i] <='Z':\r\n if d>= 65 and d< 90 or d>=97 and d<122:\r\n d+=1\r\n elif d==90:\r\n d=65\r\n elif d==122:\r\n d=97\r\n else :\r\n pass\r\n f=chr(d)\r\n e.append(f)\r\n if b<0:\r\n g=abs(b)\r\n for j in range (1,g+1):\r\n if a[i] >='a' and a[i] <='z' or a[i] >= 'A' and a[i] <='Z':\r\n if d> 65 and d<= 90 or d>97 and d<=122:\r\n d-=1\r\n elif d==97:\r\n d=122\r\n elif d==65:\r\n d=90\r\n else :\r\n pass\r\n f=chr(d)\r\n e.append(f)\r\n#print (e)\r\nfor k in range (0,c):\r\n print(e[k],end='')\r\n'''65-90 A-Z\r\n 97-122 a-z'''\r\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import time import machine from machine import Timer import network import onewire, ds18x20 import ujson import ubinascii from umqtt.simple import MQTTClient import ntptime import errno #Thrown if an error that is fatal occurs, #stop measurement cycle. class Error(Exception): pass #Thrown if an error that is not fatal occurs, #goes to deep sleep and continues as normal. #For example no wifi connection at this time. class Warning(Exception): pass def gettimestr(): rtc=machine.RTC() curtime=rtc.datetime() _time="%04d" % curtime[0]+ "%02d" % curtime[1]+ "%02d" % curtime[2]+" "+ "%02d" % curtime[4]+ "%02d" % curtime[5] return _time def deepsleep(): # configure RTC.ALARM0 to be able to wake the device rtc = machine.RTC() rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP) # set RTC.ALARM0 to fire after 60 seconds (waking the device) rtc.alarm(rtc.ALARM0, 60000) # put the device to sleep machine.deepsleep() timer_index=20 def timercallback(tim): global timer_index if timer_index==0: print("Timer reached 0, something went wrong -> sleep.") deepsleep() print("Timer index "+str(timer_index)) timer_index=timer_index-1 #check if gpio4 is pulled down stoppin = machine.Pin(4,mode=machine.Pin.IN,pull=machine.Pin.PULL_UP) if stoppin.value()==0: print("Pin down, stop") else: try: #normal loop tim = Timer(-1) tim.init(period=1000, mode=Timer.PERIODIC, callback=timercallback) try: f = open('config.json', 'r') config = ujson.loads(f.readall()) except OSError as e: if e.args[0] == errno.MP_ENOENT or e.args[0] == errno.MP_EIO: print("I/O error({0}): {1}".format(e.args[0], e.args[1])) raise Error # the device is on GPIOxx ONEWIREPIN = config['ONEWIREPIN'] dat = machine.Pin(ONEWIREPIN) # create the onewire object ds = ds18x20.DS18X20(onewire.OneWire(dat)) # scan for devices on the bus roms = ds.scan() print('found devices:', roms) if (len(roms)>0): ds.convert_temp() time.sleep_ms(750) # Check if we have wifi, and wait for connection if not. print("Check wifi connection.") wifi = network.WLAN(network.STA_IF) i = 0 while not wifi.isconnected(): if (i>10): print("No wifi connection.") raise Warning print(".") time.sleep(1) i=i+1 try: print("Get time.") ntptime.settime() except OSError as e: if e.args[0] == errno.ETIMEDOUT: #OSError: [Errno 110] ETIMEDOUT print("Timeout error, didn't get ntptime.") #if we did not wake up from deep sleep #we cannot continue until we get correct time if (machine.reset_cause()!=machine.DEEPSLEEP): raise Warning if e.args[0] == -2: #OSError: dns error print("DNS error, didn't get ntptime.") #if we did not wake up from deep sleep #we cannot continue until we get correct time if (machine.reset_cause()!=machine.DEEPSLEEP): raise Warning else: raise _time=gettimestr() print("Open MQTT connection.") c = MQTTClient("umqtt_client", config['MQTT_BROKER']) c.connect() #check battery voltage? if (config['MEASURE_VOLTAGE']): adc = machine.ADC(0) voltage = adc.read(); topic="raw/esp8266/"+ubinascii.hexlify(machine.unique_id()).decode()+"/voltage" message=_time+" "+str(voltage) c.publish(topic,message) #loop ds18b20 and send results to mqtt broker for rom in roms: print("topic "+config['MQTT_TOPIC']+ubinascii.hexlify(rom).decode()) topic=config['MQTT_TOPIC']+ubinascii.hexlify(rom).decode()+"/temperature" print(_time) print(ds.read_temp(rom)) message=_time+' '+str(ds.read_temp(rom)) c.publish(topic,message) c.disconnect() deepsleep() except Warning: deepsleep() except Error: print("Error({0}): {1}".format(e.args[0], e.args[1]))
normal
{ "blob_id": "b934770e9e57a0ead124e245f394433ce853dec9", "index": 8691, "step-1": "<mask token>\n\n\nclass Error(Exception):\n pass\n\n\nclass Warning(Exception):\n pass\n\n\ndef gettimestr():\n rtc = machine.RTC()\n curtime = rtc.datetime()\n _time = '%04d' % curtime[0] + '%02d' % curtime[1] + '%02d' % curtime[2\n ] + ' ' + '%02d' % curtime[4] + '%02d' % curtime[5]\n return _time\n\n\ndef deepsleep():\n rtc = machine.RTC()\n rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)\n rtc.alarm(rtc.ALARM0, 60000)\n machine.deepsleep()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Error(Exception):\n pass\n\n\nclass Warning(Exception):\n pass\n\n\ndef gettimestr():\n rtc = machine.RTC()\n curtime = rtc.datetime()\n _time = '%04d' % curtime[0] + '%02d' % curtime[1] + '%02d' % curtime[2\n ] + ' ' + '%02d' % curtime[4] + '%02d' % curtime[5]\n return _time\n\n\ndef deepsleep():\n rtc = machine.RTC()\n rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)\n rtc.alarm(rtc.ALARM0, 60000)\n machine.deepsleep()\n\n\n<mask token>\n\n\ndef timercallback(tim):\n global timer_index\n if timer_index == 0:\n print('Timer reached 0, something went wrong -> sleep.')\n deepsleep()\n print('Timer index ' + str(timer_index))\n timer_index = timer_index - 1\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Error(Exception):\n pass\n\n\nclass Warning(Exception):\n pass\n\n\ndef gettimestr():\n rtc = machine.RTC()\n curtime = rtc.datetime()\n _time = '%04d' % curtime[0] + '%02d' % curtime[1] + '%02d' % curtime[2\n ] + ' ' + '%02d' % curtime[4] + '%02d' % curtime[5]\n return _time\n\n\ndef deepsleep():\n rtc = machine.RTC()\n rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)\n rtc.alarm(rtc.ALARM0, 60000)\n machine.deepsleep()\n\n\n<mask token>\n\n\ndef timercallback(tim):\n global timer_index\n if timer_index == 0:\n print('Timer reached 0, something went wrong -> sleep.')\n deepsleep()\n print('Timer index ' + str(timer_index))\n timer_index = timer_index - 1\n\n\n<mask token>\nif stoppin.value() == 0:\n print('Pin down, stop')\nelse:\n try:\n tim = Timer(-1)\n tim.init(period=1000, mode=Timer.PERIODIC, callback=timercallback)\n try:\n f = open('config.json', 'r')\n config = ujson.loads(f.readall())\n except OSError as e:\n if e.args[0] == errno.MP_ENOENT or e.args[0] == errno.MP_EIO:\n print('I/O error({0}): {1}'.format(e.args[0], e.args[1]))\n raise Error\n ONEWIREPIN = config['ONEWIREPIN']\n dat = machine.Pin(ONEWIREPIN)\n ds = ds18x20.DS18X20(onewire.OneWire(dat))\n roms = ds.scan()\n print('found devices:', roms)\n if len(roms) > 0:\n ds.convert_temp()\n time.sleep_ms(750)\n print('Check wifi connection.')\n wifi = network.WLAN(network.STA_IF)\n i = 0\n while not wifi.isconnected():\n if i > 10:\n print('No wifi connection.')\n raise Warning\n print('.')\n time.sleep(1)\n i = i + 1\n try:\n print('Get time.')\n ntptime.settime()\n except OSError as e:\n if e.args[0] == errno.ETIMEDOUT:\n print(\"Timeout error, didn't get ntptime.\")\n if machine.reset_cause() != machine.DEEPSLEEP:\n raise Warning\n if e.args[0] == -2:\n print(\"DNS error, didn't get ntptime.\")\n if machine.reset_cause() != machine.DEEPSLEEP:\n raise Warning\n else:\n raise\n _time = gettimestr()\n print('Open MQTT connection.')\n c = MQTTClient('umqtt_client', config['MQTT_BROKER'])\n c.connect()\n if config['MEASURE_VOLTAGE']:\n adc = machine.ADC(0)\n voltage = adc.read()\n topic = 'raw/esp8266/' + ubinascii.hexlify(machine.unique_id()\n ).decode() + '/voltage'\n message = _time + ' ' + str(voltage)\n c.publish(topic, message)\n for rom in roms:\n print('topic ' + config['MQTT_TOPIC'] + ubinascii.hexlify(rom).\n decode())\n topic = config['MQTT_TOPIC'] + ubinascii.hexlify(rom).decode(\n ) + '/temperature'\n print(_time)\n print(ds.read_temp(rom))\n message = _time + ' ' + str(ds.read_temp(rom))\n c.publish(topic, message)\n c.disconnect()\n deepsleep()\n except Warning:\n deepsleep()\n except Error:\n print('Error({0}): {1}'.format(e.args[0], e.args[1]))\n", "step-4": "import time\nimport machine\nfrom machine import Timer\nimport network\nimport onewire, ds18x20\nimport ujson\nimport ubinascii\nfrom umqtt.simple import MQTTClient\nimport ntptime\nimport errno\n\n\nclass Error(Exception):\n pass\n\n\nclass Warning(Exception):\n pass\n\n\ndef gettimestr():\n rtc = machine.RTC()\n curtime = rtc.datetime()\n _time = '%04d' % curtime[0] + '%02d' % curtime[1] + '%02d' % curtime[2\n ] + ' ' + '%02d' % curtime[4] + '%02d' % curtime[5]\n return _time\n\n\ndef deepsleep():\n rtc = machine.RTC()\n rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)\n rtc.alarm(rtc.ALARM0, 60000)\n machine.deepsleep()\n\n\ntimer_index = 20\n\n\ndef timercallback(tim):\n global timer_index\n if timer_index == 0:\n print('Timer reached 0, something went wrong -> sleep.')\n deepsleep()\n print('Timer index ' + str(timer_index))\n timer_index = timer_index - 1\n\n\nstoppin = machine.Pin(4, mode=machine.Pin.IN, pull=machine.Pin.PULL_UP)\nif stoppin.value() == 0:\n print('Pin down, stop')\nelse:\n try:\n tim = Timer(-1)\n tim.init(period=1000, mode=Timer.PERIODIC, callback=timercallback)\n try:\n f = open('config.json', 'r')\n config = ujson.loads(f.readall())\n except OSError as e:\n if e.args[0] == errno.MP_ENOENT or e.args[0] == errno.MP_EIO:\n print('I/O error({0}): {1}'.format(e.args[0], e.args[1]))\n raise Error\n ONEWIREPIN = config['ONEWIREPIN']\n dat = machine.Pin(ONEWIREPIN)\n ds = ds18x20.DS18X20(onewire.OneWire(dat))\n roms = ds.scan()\n print('found devices:', roms)\n if len(roms) > 0:\n ds.convert_temp()\n time.sleep_ms(750)\n print('Check wifi connection.')\n wifi = network.WLAN(network.STA_IF)\n i = 0\n while not wifi.isconnected():\n if i > 10:\n print('No wifi connection.')\n raise Warning\n print('.')\n time.sleep(1)\n i = i + 1\n try:\n print('Get time.')\n ntptime.settime()\n except OSError as e:\n if e.args[0] == errno.ETIMEDOUT:\n print(\"Timeout error, didn't get ntptime.\")\n if machine.reset_cause() != machine.DEEPSLEEP:\n raise Warning\n if e.args[0] == -2:\n print(\"DNS error, didn't get ntptime.\")\n if machine.reset_cause() != machine.DEEPSLEEP:\n raise Warning\n else:\n raise\n _time = gettimestr()\n print('Open MQTT connection.')\n c = MQTTClient('umqtt_client', config['MQTT_BROKER'])\n c.connect()\n if config['MEASURE_VOLTAGE']:\n adc = machine.ADC(0)\n voltage = adc.read()\n topic = 'raw/esp8266/' + ubinascii.hexlify(machine.unique_id()\n ).decode() + '/voltage'\n message = _time + ' ' + str(voltage)\n c.publish(topic, message)\n for rom in roms:\n print('topic ' + config['MQTT_TOPIC'] + ubinascii.hexlify(rom).\n decode())\n topic = config['MQTT_TOPIC'] + ubinascii.hexlify(rom).decode(\n ) + '/temperature'\n print(_time)\n print(ds.read_temp(rom))\n message = _time + ' ' + str(ds.read_temp(rom))\n c.publish(topic, message)\n c.disconnect()\n deepsleep()\n except Warning:\n deepsleep()\n except Error:\n print('Error({0}): {1}'.format(e.args[0], e.args[1]))\n", "step-5": "import time\nimport machine\nfrom machine import Timer\nimport network\nimport onewire, ds18x20\nimport ujson\nimport ubinascii\nfrom umqtt.simple import MQTTClient\nimport ntptime\nimport errno\n\n#Thrown if an error that is fatal occurs,\n#stop measurement cycle.\nclass Error(Exception):\n pass\n\n#Thrown if an error that is not fatal occurs,\n#goes to deep sleep and continues as normal.\n#For example no wifi connection at this time.\nclass Warning(Exception):\n pass\n \ndef gettimestr():\n rtc=machine.RTC()\n curtime=rtc.datetime()\n _time=\"%04d\" % curtime[0]+ \"%02d\" % curtime[1]+ \"%02d\" % curtime[2]+\" \"+ \"%02d\" % curtime[4]+ \"%02d\" % curtime[5]\n return _time\n\ndef deepsleep():\n # configure RTC.ALARM0 to be able to wake the device\n rtc = machine.RTC()\n rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)\n\n # set RTC.ALARM0 to fire after 60 seconds (waking the device)\n rtc.alarm(rtc.ALARM0, 60000)\n\n # put the device to sleep\n machine.deepsleep()\n\ntimer_index=20\n\ndef timercallback(tim):\n global timer_index\n if timer_index==0:\n print(\"Timer reached 0, something went wrong -> sleep.\")\n deepsleep()\n print(\"Timer index \"+str(timer_index))\n timer_index=timer_index-1\n \n#check if gpio4 is pulled down\nstoppin = machine.Pin(4,mode=machine.Pin.IN,pull=machine.Pin.PULL_UP)\nif stoppin.value()==0:\n print(\"Pin down, stop\")\nelse:\n try:\n #normal loop\n\n tim = Timer(-1)\n tim.init(period=1000, mode=Timer.PERIODIC, callback=timercallback)\n\n try:\n f = open('config.json', 'r')\n config = ujson.loads(f.readall())\n except OSError as e:\n if e.args[0] == errno.MP_ENOENT or e.args[0] == errno.MP_EIO:\n print(\"I/O error({0}): {1}\".format(e.args[0], e.args[1]))\n raise Error\n\n # the device is on GPIOxx\n ONEWIREPIN = config['ONEWIREPIN']\n dat = machine.Pin(ONEWIREPIN)\n\n # create the onewire object\n ds = ds18x20.DS18X20(onewire.OneWire(dat))\n\n # scan for devices on the bus\n roms = ds.scan()\n print('found devices:', roms)\n if (len(roms)>0):\n ds.convert_temp()\n time.sleep_ms(750)\n\n # Check if we have wifi, and wait for connection if not.\n print(\"Check wifi connection.\")\n wifi = network.WLAN(network.STA_IF)\n i = 0\n while not wifi.isconnected():\n if (i>10):\n print(\"No wifi connection.\")\n raise Warning\n print(\".\")\n time.sleep(1)\n i=i+1\n\n try:\n print(\"Get time.\")\n ntptime.settime()\n except OSError as e:\n if e.args[0] == errno.ETIMEDOUT: #OSError: [Errno 110] ETIMEDOUT\n print(\"Timeout error, didn't get ntptime.\")\n #if we did not wake up from deep sleep\n #we cannot continue until we get correct time\n if (machine.reset_cause()!=machine.DEEPSLEEP):\n raise Warning\n if e.args[0] == -2: #OSError: dns error\n print(\"DNS error, didn't get ntptime.\")\n #if we did not wake up from deep sleep\n #we cannot continue until we get correct time\n if (machine.reset_cause()!=machine.DEEPSLEEP):\n raise Warning\n else:\n raise\n _time=gettimestr()\n \n\n print(\"Open MQTT connection.\")\n c = MQTTClient(\"umqtt_client\", config['MQTT_BROKER'])\n c.connect()\n\n #check battery voltage?\n if (config['MEASURE_VOLTAGE']):\n adc = machine.ADC(0)\n voltage = adc.read();\n topic=\"raw/esp8266/\"+ubinascii.hexlify(machine.unique_id()).decode()+\"/voltage\"\n message=_time+\" \"+str(voltage)\n c.publish(topic,message)\n\n #loop ds18b20 and send results to mqtt broker\n for rom in roms:\n print(\"topic \"+config['MQTT_TOPIC']+ubinascii.hexlify(rom).decode())\n topic=config['MQTT_TOPIC']+ubinascii.hexlify(rom).decode()+\"/temperature\"\n print(_time)\n print(ds.read_temp(rom))\n message=_time+' '+str(ds.read_temp(rom))\n c.publish(topic,message)\n\n c.disconnect()\n\n deepsleep()\n except Warning:\n deepsleep()\n except Error:\n print(\"Error({0}): {1}\".format(e.args[0], e.args[1]))\n \n", "step-ids": [ 4, 5, 6, 8, 9 ] }
[ 4, 5, 6, 8, 9 ]
from .storage import Storage class ConnectionManager: def __init__(self): self.store = Storage() def handle(self,msg): if msg['type'] in {'register', 'heartbeat'}: self.store.reg_hb(**msg['payload']) elif msg['type'] == 'result': self.store.result(msg['payload']) return 'send back {}'.format(msg) def add_task(self,msg:dict): return self.store.add_task(msg) def get_task(self, agent_id): return self.store.get_task(agent_id) sendmsg = handle def get_agents(self): return self.store.get_agent() def set_task(self,task_id, state): self.store.tasks[task_id].state = state
normal
{ "blob_id": "03b38e6e2d0097d5d361b0794aba83b8e430323d", "index": 4370, "step-1": "<mask token>\n\n\nclass ConnectionManager:\n <mask token>\n\n def handle(self, msg):\n if msg['type'] in {'register', 'heartbeat'}:\n self.store.reg_hb(**msg['payload'])\n elif msg['type'] == 'result':\n self.store.result(msg['payload'])\n return 'send back {}'.format(msg)\n\n def add_task(self, msg: dict):\n return self.store.add_task(msg)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass ConnectionManager:\n <mask token>\n\n def handle(self, msg):\n if msg['type'] in {'register', 'heartbeat'}:\n self.store.reg_hb(**msg['payload'])\n elif msg['type'] == 'result':\n self.store.result(msg['payload'])\n return 'send back {}'.format(msg)\n\n def add_task(self, msg: dict):\n return self.store.add_task(msg)\n\n def get_task(self, agent_id):\n return self.store.get_task(agent_id)\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass ConnectionManager:\n\n def __init__(self):\n self.store = Storage()\n\n def handle(self, msg):\n if msg['type'] in {'register', 'heartbeat'}:\n self.store.reg_hb(**msg['payload'])\n elif msg['type'] == 'result':\n self.store.result(msg['payload'])\n return 'send back {}'.format(msg)\n\n def add_task(self, msg: dict):\n return self.store.add_task(msg)\n\n def get_task(self, agent_id):\n return self.store.get_task(agent_id)\n <mask token>\n\n def get_agents(self):\n return self.store.get_agent()\n\n def set_task(self, task_id, state):\n self.store.tasks[task_id].state = state\n", "step-4": "<mask token>\n\n\nclass ConnectionManager:\n\n def __init__(self):\n self.store = Storage()\n\n def handle(self, msg):\n if msg['type'] in {'register', 'heartbeat'}:\n self.store.reg_hb(**msg['payload'])\n elif msg['type'] == 'result':\n self.store.result(msg['payload'])\n return 'send back {}'.format(msg)\n\n def add_task(self, msg: dict):\n return self.store.add_task(msg)\n\n def get_task(self, agent_id):\n return self.store.get_task(agent_id)\n sendmsg = handle\n\n def get_agents(self):\n return self.store.get_agent()\n\n def set_task(self, task_id, state):\n self.store.tasks[task_id].state = state\n", "step-5": "from .storage import Storage\n\n\nclass ConnectionManager:\n def __init__(self):\n self.store = Storage()\n def handle(self,msg):\n if msg['type'] in {'register', 'heartbeat'}:\n self.store.reg_hb(**msg['payload'])\n elif msg['type'] == 'result':\n self.store.result(msg['payload'])\n return 'send back {}'.format(msg)\n\n def add_task(self,msg:dict):\n return self.store.add_task(msg)\n\n def get_task(self, agent_id):\n return self.store.get_task(agent_id)\n sendmsg = handle\n\n def get_agents(self):\n return self.store.get_agent()\n\n def set_task(self,task_id, state):\n self.store.tasks[task_id].state = state", "step-ids": [ 3, 4, 7, 8, 10 ] }
[ 3, 4, 7, 8, 10 ]
from .models import Owner, Vehicle from rest_framework import viewsets, permissions from .serializers import OwnerSerializer, VehicleSerializer class OwnerViewSet(viewsets.ModelViewSet): queryset = Owner.objects.all().order_by('id') serializer_class = OwnerSerializer permission_classes = [permissions.IsAuthenticated] class VehicleViewSet(viewsets.ModelViewSet): queryset = Vehicle.objects.all().order_by('id') serializer_class = VehicleSerializer permission_classes = [permissions.IsAuthenticated]
normal
{ "blob_id": "9290294b5df081ef0cae5450a9ea3baef789c041", "index": 6421, "step-1": "<mask token>\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n", "step-2": "<mask token>\n\n\nclass OwnerViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n", "step-3": "<mask token>\n\n\nclass OwnerViewSet(viewsets.ModelViewSet):\n queryset = Owner.objects.all().order_by('id')\n serializer_class = OwnerSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n", "step-4": "from .models import Owner, Vehicle\nfrom rest_framework import viewsets, permissions\nfrom .serializers import OwnerSerializer, VehicleSerializer\n\n\nclass OwnerViewSet(viewsets.ModelViewSet):\n queryset = Owner.objects.all().order_by('id')\n serializer_class = OwnerSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
# "Time Warner Python" Salma Hashem netid: sh5640 #Design costumer service application by asking users series of questions, and based on the customers' answers to the questions, provide them with instructions. #Ask the user to choose from the following options print("Choose from the following options: ") #assign each menu option to a number one= " 1. My internet is not working." two= "2. My cable is not working." three= "3. My phones are not working." four= "4. My bill is wrong." five= "5. I want to upgrade my plan." #Print the options each on its own line and ask the user to input a number and convert into an integer print(one, "\n", two, "\n", three, "\n", four, "\n", five) value= int(input("(Enter a value 1 - 5): ")) #assign variables to user inputs using if else statements for scenario one and print output based on user inputs if value==1: modem_on=input("\nIs your modem on? (Enter Y or N): ") if modem_on=="Y": router_on=input("\nIs your router on? (Enter Y or N): ") if router_on=="Y": redlight= input("\nDoes your router emit a red light? (Enter Y or N): ") if redlight=="Y": print("Unplug your router and wait thirty seconds. Then plug your router into the nearest outlet to restart your router. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!") else: comp_wifi_on=input("\nAre both your computer and wifi on? (Enter Y or N): ") if comp_wifi_on=="Y": print("It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.") else: print("If your computer is not on, please turn it on by pressing the power button. Also make sure your computer's wifi is on. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!") else: print("Plug your router into the nearest outlet to turn on your router. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!") else: print("Plug your modem into the nearest outlet to turn on your modem. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!") #assign variables to user inputs using if statements for scenario two and print output based on user inputs if value==2: cable_on=input("\nIs your cable box on? (Enter Y or N): ") if cable_on=="Y": tv_on=input("\nIs your TV on? (Enter Y or N): ") if tv_on=="Y": print("It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.") else: print("Plug your TV into the nearest outlet and press the power button on your remote to turn on your TV. If you still do not recieve a cable signal, restart this program. Note, this program will now terminate. Goodbye!") else: print("Plug your cable box into the nearest outlet to turn on your cable box. If you still do not recieve a cable signal, please restart this program. Note, this program will now terminate. Goodbye!") #assign variables to user inputs using if statements for scenario three and print output based on user inputs if value==3: phones_on=input("\nAre your phones on? (Enter Y or N): ") if phone_on=="Y": landline_plugged=input("\nIs there a landline wire plugged into each phone or the wireless phone terminal? (Enter Y or N): ") if landline_plugged=="Y": print("It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.") else: print("Plug a landline wire into each phone or phone terminal. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!") else: print("Plug your phones into the nearest outlet to turn them on. If you are using wireless phones, please make sure you change them before attempting to use them. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!") #assign variables to user inputs using if statements for scenario four and print output based on user inputs if value==4: late_payment= input("\nWere you late on your last payment? (Enter Y or N): ") if late_payment=="Y": print("If you were late on your last payment, you will be charged an additional 5% interest fee. Therefore, your bill may be more than usual. If you would like to contest your charge, please call 555-555-5555 for additional support with this matter. Thank you for your patience. Note, this program will now terminate. Goodbye!") else: print("It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.") #scenario 5--evaluate input and print output based on user input if value==5: print("It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.") #create if statements to evaluate invalid user inputs if value<1 or value>5: print("You entered an invalid menu choice. It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.")
normal
{ "blob_id": "736b84bbcf1d5954b491068be4060edeade2c1c5", "index": 2205, "step-1": "<mask token>\n", "step-2": "print('Choose from the following options: ')\n<mask token>\nprint(one, '\\n', two, '\\n', three, '\\n', four, '\\n', five)\n<mask token>\nif value == 1:\n modem_on = input('\\nIs your modem on? (Enter Y or N): ')\n if modem_on == 'Y':\n router_on = input('\\nIs your router on? (Enter Y or N): ')\n if router_on == 'Y':\n redlight = input(\n '\\nDoes your router emit a red light? (Enter Y or N): ')\n if redlight == 'Y':\n print(\n 'Unplug your router and wait thirty seconds. Then plug your router into the nearest outlet to restart your router. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n comp_wifi_on = input(\n '\\nAre both your computer and wifi on? (Enter Y or N): ')\n if comp_wifi_on == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n \"If your computer is not on, please turn it on by pressing the power button. Also make sure your computer's wifi is on. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!\"\n )\n else:\n print(\n 'Plug your router into the nearest outlet to turn on your router. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your modem into the nearest outlet to turn on your modem. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 2:\n cable_on = input('\\nIs your cable box on? (Enter Y or N): ')\n if cable_on == 'Y':\n tv_on = input('\\nIs your TV on? (Enter Y or N): ')\n if tv_on == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n 'Plug your TV into the nearest outlet and press the power button on your remote to turn on your TV. If you still do not recieve a cable signal, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your cable box into the nearest outlet to turn on your cable box. If you still do not recieve a cable signal, please restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 3:\n phones_on = input('\\nAre your phones on? (Enter Y or N): ')\n if phone_on == 'Y':\n landline_plugged = input(\n \"\"\"\nIs there a landline wire plugged into each phone or the wireless phone terminal? (Enter Y or N): \"\"\"\n )\n if landline_plugged == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n 'Plug a landline wire into each phone or phone terminal. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your phones into the nearest outlet to turn them on. If you are using wireless phones, please make sure you change them before attempting to use them. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 4:\n late_payment = input(\n '\\nWere you late on your last payment? (Enter Y or N): ')\n if late_payment == 'Y':\n print(\n 'If you were late on your last payment, you will be charged an additional 5% interest fee. Therefore, your bill may be more than usual. If you would like to contest your charge, please call 555-555-5555 for additional support with this matter. Thank you for your patience. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\nif value == 5:\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\nif value < 1 or value > 5:\n print(\n 'You entered an invalid menu choice. It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n", "step-3": "print('Choose from the following options: ')\none = ' 1. My internet is not working.'\ntwo = '2. My cable is not working.'\nthree = '3. My phones are not working.'\nfour = '4. My bill is wrong.'\nfive = '5. I want to upgrade my plan.'\nprint(one, '\\n', two, '\\n', three, '\\n', four, '\\n', five)\nvalue = int(input('(Enter a value 1 - 5): '))\nif value == 1:\n modem_on = input('\\nIs your modem on? (Enter Y or N): ')\n if modem_on == 'Y':\n router_on = input('\\nIs your router on? (Enter Y or N): ')\n if router_on == 'Y':\n redlight = input(\n '\\nDoes your router emit a red light? (Enter Y or N): ')\n if redlight == 'Y':\n print(\n 'Unplug your router and wait thirty seconds. Then plug your router into the nearest outlet to restart your router. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n comp_wifi_on = input(\n '\\nAre both your computer and wifi on? (Enter Y or N): ')\n if comp_wifi_on == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n \"If your computer is not on, please turn it on by pressing the power button. Also make sure your computer's wifi is on. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!\"\n )\n else:\n print(\n 'Plug your router into the nearest outlet to turn on your router. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your modem into the nearest outlet to turn on your modem. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 2:\n cable_on = input('\\nIs your cable box on? (Enter Y or N): ')\n if cable_on == 'Y':\n tv_on = input('\\nIs your TV on? (Enter Y or N): ')\n if tv_on == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n 'Plug your TV into the nearest outlet and press the power button on your remote to turn on your TV. If you still do not recieve a cable signal, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your cable box into the nearest outlet to turn on your cable box. If you still do not recieve a cable signal, please restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 3:\n phones_on = input('\\nAre your phones on? (Enter Y or N): ')\n if phone_on == 'Y':\n landline_plugged = input(\n \"\"\"\nIs there a landline wire plugged into each phone or the wireless phone terminal? (Enter Y or N): \"\"\"\n )\n if landline_plugged == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n 'Plug a landline wire into each phone or phone terminal. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your phones into the nearest outlet to turn them on. If you are using wireless phones, please make sure you change them before attempting to use them. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 4:\n late_payment = input(\n '\\nWere you late on your last payment? (Enter Y or N): ')\n if late_payment == 'Y':\n print(\n 'If you were late on your last payment, you will be charged an additional 5% interest fee. Therefore, your bill may be more than usual. If you would like to contest your charge, please call 555-555-5555 for additional support with this matter. Thank you for your patience. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\nif value == 5:\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\nif value < 1 or value > 5:\n print(\n 'You entered an invalid menu choice. It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n", "step-4": "# \"Time Warner Python\" Salma Hashem netid: sh5640\n#Design costumer service application by asking users series of questions, and based on the customers' answers to the questions, provide them with instructions. \n#Ask the user to choose from the following options \nprint(\"Choose from the following options: \")\n#assign each menu option to a number\none= \" 1. My internet is not working.\"\ntwo= \"2. My cable is not working.\"\nthree= \"3. My phones are not working.\"\nfour= \"4. My bill is wrong.\"\nfive= \"5. I want to upgrade my plan.\"\n#Print the options each on its own line and ask the user to input a number and convert into an integer\nprint(one, \"\\n\", two, \"\\n\", three, \"\\n\", four, \"\\n\", five)\nvalue= int(input(\"(Enter a value 1 - 5): \"))\n#assign variables to user inputs using if else statements for scenario one and print output based on user inputs \n\n\nif value==1:\n modem_on=input(\"\\nIs your modem on? (Enter Y or N): \")\n if modem_on==\"Y\":\n router_on=input(\"\\nIs your router on? (Enter Y or N): \")\n if router_on==\"Y\":\n redlight= input(\"\\nDoes your router emit a red light? (Enter Y or N): \")\n if redlight==\"Y\":\n print(\"Unplug your router and wait thirty seconds. Then plug your router into the nearest outlet to restart your router. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!\")\n else:\n comp_wifi_on=input(\"\\nAre both your computer and wifi on? (Enter Y or N): \")\n if comp_wifi_on==\"Y\":\n print(\"It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n else:\n print(\"If your computer is not on, please turn it on by pressing the power button. Also make sure your computer's wifi is on. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!\")\n else:\n print(\"Plug your router into the nearest outlet to turn on your router. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!\")\n \n else:\n print(\"Plug your modem into the nearest outlet to turn on your modem. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!\")\n#assign variables to user inputs using if statements for scenario two and print output based on user inputs \nif value==2:\n cable_on=input(\"\\nIs your cable box on? (Enter Y or N): \")\n if cable_on==\"Y\":\n tv_on=input(\"\\nIs your TV on? (Enter Y or N): \")\n if tv_on==\"Y\":\n print(\"It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n else:\n print(\"Plug your TV into the nearest outlet and press the power button on your remote to turn on your TV. If you still do not recieve a cable signal, restart this program. Note, this program will now terminate. Goodbye!\")\n else:\n print(\"Plug your cable box into the nearest outlet to turn on your cable box. If you still do not recieve a cable signal, please restart this program. Note, this program will now terminate. Goodbye!\")\n#assign variables to user inputs using if statements for scenario three and print output based on user inputs \nif value==3:\n phones_on=input(\"\\nAre your phones on? (Enter Y or N): \")\n if phone_on==\"Y\":\n landline_plugged=input(\"\\nIs there a landline wire plugged into each phone or the wireless phone terminal? (Enter Y or N): \")\n if landline_plugged==\"Y\":\n print(\"It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n else:\n print(\"Plug a landline wire into each phone or phone terminal. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!\")\n else:\n print(\"Plug your phones into the nearest outlet to turn them on. If you are using wireless phones, please make sure you change them before attempting to use them. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!\")\n#assign variables to user inputs using if statements for scenario four and print output based on user inputs\nif value==4:\n late_payment= input(\"\\nWere you late on your last payment? (Enter Y or N): \")\n if late_payment==\"Y\":\n print(\"If you were late on your last payment, you will be charged an additional 5% interest fee. Therefore, your bill may be more than usual. If you would like to contest your charge, please call 555-555-5555 for additional support with this matter. Thank you for your patience. Note, this program will now terminate. Goodbye!\")\n else:\n print(\"It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n#scenario 5--evaluate input and print output based on user input\nif value==5:\n print(\"It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n#create if statements to evaluate invalid user inputs\nif value<1 or value>5:\n print(\"You entered an invalid menu choice. It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n \n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- # @File :fi_handlers.py # @Author:ZengYu # @Date :2019/5/16 # @software:PyCharm import tornado.web import tornado.websocket from PIL import Image import base64 from model.flower_identify import flower_identify class FlowersInfo(): flowersInfo = ["月季花(学名:Rosa chinensis Jacq.): 被称为花中皇后,又称“月月红”,是常绿、半常绿低矮灌木,四季开花,一般为红色,或粉色、偶有白色和黄色,可作为观赏植物,也可作为药用植物,亦称月季。有三个自然变种,现代月季花型多样,有单瓣和重瓣,还有高心卷边等优美花型;其色彩艳丽、丰富,不仅有红、粉黄、白等单色,还有混色、银边等品种;多数品种有芳香。月季的品种繁多,世界上已有近万种,中国也有千种以上。", "绣球(学名:Hydrangea macrophylla (Thunb.) Ser. ): 为虎耳草科绣球属植物。灌木,高1-4米;茎常于基部发出多数放射枝而形成一圆形灌丛;枝圆柱形。叶纸质或近革质,倒卵形或阔椭圆形。伞房状聚伞花序近球形,直径8-20厘米,具短的总花梗,花密集,粉红色、淡蓝色或白色;花瓣长圆形,长3-3.5毫米。蒴果未成熟,长陀螺状;种子未熟。花期6-8月。", "万寿菊(Tagetes erecta L)为菊科万寿菊属一年生草本植物,茎直立,粗壮,具纵细条棱,分枝向上平展。叶羽状分裂;沿叶缘有少数腺体。头状花序单生;总苞杯状,顶端具齿尖;舌状花黄色或暗橙色;管状花花冠黄色。瘦果线形,基部缩小,黑色或褐色,被短微毛;冠毛有1-2个长芒和2-3个短而钝的鳞片。花期7-9月。", "三色堇(学名:Viola tricolor L.)是堇菜科堇菜属的二年或多年生草本植物。基生叶叶片长卵形或披针形,具长柄,茎生叶叶片卵形、长圆形或长圆披针形,先端圆或钝,边缘具稀疏的圆齿或钝锯齿。三色堇是欧洲常见的野花物种,也常栽培于公园中,是冰岛、波兰的国花。花朵通常每花有紫、白、黄三色,故名三色堇。该物种较耐寒,喜凉爽,开花受光照影响较大。", "石榴花,落叶灌木或小乔木石榴的花;为石榴属植物,石榴树干灰褐色,有片状剥落,嫩枝黄绿光滑,常呈四棱形,枝端多为刺状,无顶芽。石榴花单叶对生或簇生,矩圆形或倒卵形,新叶嫩绿或古铜色。花朵至数朵生于枝顶或叶腋,花萼钟形,肉质,先端6裂,表面光滑具腊质,橙红色,宿存。花瓣5~7枚红色或白色,单瓣或重瓣。"] class FlowerIdentify(tornado.web.RequestHandler): def get(self): self.render("flower_identify.html") class IdentifyHandler(tornado.websocket.WebSocketHandler): def post(self): # 从JSON字符串读取图片数据 dataUrl = self.get_body_argument("image") Orientation = self.get_body_argument("orientation") # 得到图片方向以便旋转处理 content = base64.b64decode(dataUrl) '''保存到图片target.jpg''' file = open('./static/images/target.jpg', 'wb') file.write(content) file.close() '''图片旋转270(根据实际情况)''' img = Image.open('./static/images/target.jpg') if Orientation == "3": img = img.rotate(180, expand=True) elif Orientation == "6": img = img.rotate(270, expand=True) elif Orientation == "8": img = img.rotate(90, expand=True) img.save('./static/images/target.jpg') '''调用函数识别''' flowerIndex = flower_identify() # 调用识别函数 flowerInfo = FlowersInfo.flowersInfo[flowerIndex] # 得到结果,并从FlowersInfo里找到该花的资料 self.render("fi_result.html", data=flowerInfo)
normal
{ "blob_id": "1c3b1776f14a085bec90be11028c87dc47f00293", "index": 1722, "step-1": "<mask token>\n\n\nclass FlowerIdentify(tornado.web.RequestHandler):\n\n def get(self):\n self.render('flower_identify.html')\n\n\nclass IdentifyHandler(tornado.websocket.WebSocketHandler):\n\n def post(self):\n dataUrl = self.get_body_argument('image')\n Orientation = self.get_body_argument('orientation')\n content = base64.b64decode(dataUrl)\n \"\"\"保存到图片target.jpg\"\"\"\n file = open('./static/images/target.jpg', 'wb')\n file.write(content)\n file.close()\n \"\"\"图片旋转270(根据实际情况)\"\"\"\n img = Image.open('./static/images/target.jpg')\n if Orientation == '3':\n img = img.rotate(180, expand=True)\n elif Orientation == '6':\n img = img.rotate(270, expand=True)\n elif Orientation == '8':\n img = img.rotate(90, expand=True)\n img.save('./static/images/target.jpg')\n \"\"\"调用函数识别\"\"\"\n flowerIndex = flower_identify()\n flowerInfo = FlowersInfo.flowersInfo[flowerIndex]\n self.render('fi_result.html', data=flowerInfo)\n", "step-2": "<mask token>\n\n\nclass FlowersInfo:\n <mask token>\n\n\nclass FlowerIdentify(tornado.web.RequestHandler):\n\n def get(self):\n self.render('flower_identify.html')\n\n\nclass IdentifyHandler(tornado.websocket.WebSocketHandler):\n\n def post(self):\n dataUrl = self.get_body_argument('image')\n Orientation = self.get_body_argument('orientation')\n content = base64.b64decode(dataUrl)\n \"\"\"保存到图片target.jpg\"\"\"\n file = open('./static/images/target.jpg', 'wb')\n file.write(content)\n file.close()\n \"\"\"图片旋转270(根据实际情况)\"\"\"\n img = Image.open('./static/images/target.jpg')\n if Orientation == '3':\n img = img.rotate(180, expand=True)\n elif Orientation == '6':\n img = img.rotate(270, expand=True)\n elif Orientation == '8':\n img = img.rotate(90, expand=True)\n img.save('./static/images/target.jpg')\n \"\"\"调用函数识别\"\"\"\n flowerIndex = flower_identify()\n flowerInfo = FlowersInfo.flowersInfo[flowerIndex]\n self.render('fi_result.html', data=flowerInfo)\n", "step-3": "<mask token>\n\n\nclass FlowersInfo:\n flowersInfo = [\n '月季花(学名:Rosa chinensis Jacq.): 被称为花中皇后,又称“月月红”,是常绿、半常绿低矮灌木,四季开花,一般为红色,或粉色、偶有白色和黄色,可作为观赏植物,也可作为药用植物,亦称月季。有三个自然变种,现代月季花型多样,有单瓣和重瓣,还有高心卷边等优美花型;其色彩艳丽、丰富,不仅有红、粉黄、白等单色,还有混色、银边等品种;多数品种有芳香。月季的品种繁多,世界上已有近万种,中国也有千种以上。'\n ,\n '绣球(学名:Hydrangea macrophylla (Thunb.) Ser. ): 为虎耳草科绣球属植物。灌木,高1-4米;茎常于基部发出多数放射枝而形成一圆形灌丛;枝圆柱形。叶纸质或近革质,倒卵形或阔椭圆形。伞房状聚伞花序近球形,直径8-20厘米,具短的总花梗,花密集,粉红色、淡蓝色或白色;花瓣长圆形,长3-3.5毫米。蒴果未成熟,长陀螺状;种子未熟。花期6-8月。'\n ,\n '万寿菊(Tagetes erecta L)为菊科万寿菊属一年生草本植物,茎直立,粗壮,具纵细条棱,分枝向上平展。叶羽状分裂;沿叶缘有少数腺体。头状花序单生;总苞杯状,顶端具齿尖;舌状花黄色或暗橙色;管状花花冠黄色。瘦果线形,基部缩小,黑色或褐色,被短微毛;冠毛有1-2个长芒和2-3个短而钝的鳞片。花期7-9月。'\n ,\n '三色堇(学名:Viola tricolor L.)是堇菜科堇菜属的二年或多年生草本植物。基生叶叶片长卵形或披针形,具长柄,茎生叶叶片卵形、长圆形或长圆披针形,先端圆或钝,边缘具稀疏的圆齿或钝锯齿。三色堇是欧洲常见的野花物种,也常栽培于公园中,是冰岛、波兰的国花。花朵通常每花有紫、白、黄三色,故名三色堇。该物种较耐寒,喜凉爽,开花受光照影响较大。'\n ,\n '石榴花,落叶灌木或小乔木石榴的花;为石榴属植物,石榴树干灰褐色,有片状剥落,嫩枝黄绿光滑,常呈四棱形,枝端多为刺状,无顶芽。石榴花单叶对生或簇生,矩圆形或倒卵形,新叶嫩绿或古铜色。花朵至数朵生于枝顶或叶腋,花萼钟形,肉质,先端6裂,表面光滑具腊质,橙红色,宿存。花瓣5~7枚红色或白色,单瓣或重瓣。'\n ]\n\n\nclass FlowerIdentify(tornado.web.RequestHandler):\n\n def get(self):\n self.render('flower_identify.html')\n\n\nclass IdentifyHandler(tornado.websocket.WebSocketHandler):\n\n def post(self):\n dataUrl = self.get_body_argument('image')\n Orientation = self.get_body_argument('orientation')\n content = base64.b64decode(dataUrl)\n \"\"\"保存到图片target.jpg\"\"\"\n file = open('./static/images/target.jpg', 'wb')\n file.write(content)\n file.close()\n \"\"\"图片旋转270(根据实际情况)\"\"\"\n img = Image.open('./static/images/target.jpg')\n if Orientation == '3':\n img = img.rotate(180, expand=True)\n elif Orientation == '6':\n img = img.rotate(270, expand=True)\n elif Orientation == '8':\n img = img.rotate(90, expand=True)\n img.save('./static/images/target.jpg')\n \"\"\"调用函数识别\"\"\"\n flowerIndex = flower_identify()\n flowerInfo = FlowersInfo.flowersInfo[flowerIndex]\n self.render('fi_result.html', data=flowerInfo)\n", "step-4": "import tornado.web\nimport tornado.websocket\nfrom PIL import Image\nimport base64\nfrom model.flower_identify import flower_identify\n\n\nclass FlowersInfo:\n flowersInfo = [\n '月季花(学名:Rosa chinensis Jacq.): 被称为花中皇后,又称“月月红”,是常绿、半常绿低矮灌木,四季开花,一般为红色,或粉色、偶有白色和黄色,可作为观赏植物,也可作为药用植物,亦称月季。有三个自然变种,现代月季花型多样,有单瓣和重瓣,还有高心卷边等优美花型;其色彩艳丽、丰富,不仅有红、粉黄、白等单色,还有混色、银边等品种;多数品种有芳香。月季的品种繁多,世界上已有近万种,中国也有千种以上。'\n ,\n '绣球(学名:Hydrangea macrophylla (Thunb.) Ser. ): 为虎耳草科绣球属植物。灌木,高1-4米;茎常于基部发出多数放射枝而形成一圆形灌丛;枝圆柱形。叶纸质或近革质,倒卵形或阔椭圆形。伞房状聚伞花序近球形,直径8-20厘米,具短的总花梗,花密集,粉红色、淡蓝色或白色;花瓣长圆形,长3-3.5毫米。蒴果未成熟,长陀螺状;种子未熟。花期6-8月。'\n ,\n '万寿菊(Tagetes erecta L)为菊科万寿菊属一年生草本植物,茎直立,粗壮,具纵细条棱,分枝向上平展。叶羽状分裂;沿叶缘有少数腺体。头状花序单生;总苞杯状,顶端具齿尖;舌状花黄色或暗橙色;管状花花冠黄色。瘦果线形,基部缩小,黑色或褐色,被短微毛;冠毛有1-2个长芒和2-3个短而钝的鳞片。花期7-9月。'\n ,\n '三色堇(学名:Viola tricolor L.)是堇菜科堇菜属的二年或多年生草本植物。基生叶叶片长卵形或披针形,具长柄,茎生叶叶片卵形、长圆形或长圆披针形,先端圆或钝,边缘具稀疏的圆齿或钝锯齿。三色堇是欧洲常见的野花物种,也常栽培于公园中,是冰岛、波兰的国花。花朵通常每花有紫、白、黄三色,故名三色堇。该物种较耐寒,喜凉爽,开花受光照影响较大。'\n ,\n '石榴花,落叶灌木或小乔木石榴的花;为石榴属植物,石榴树干灰褐色,有片状剥落,嫩枝黄绿光滑,常呈四棱形,枝端多为刺状,无顶芽。石榴花单叶对生或簇生,矩圆形或倒卵形,新叶嫩绿或古铜色。花朵至数朵生于枝顶或叶腋,花萼钟形,肉质,先端6裂,表面光滑具腊质,橙红色,宿存。花瓣5~7枚红色或白色,单瓣或重瓣。'\n ]\n\n\nclass FlowerIdentify(tornado.web.RequestHandler):\n\n def get(self):\n self.render('flower_identify.html')\n\n\nclass IdentifyHandler(tornado.websocket.WebSocketHandler):\n\n def post(self):\n dataUrl = self.get_body_argument('image')\n Orientation = self.get_body_argument('orientation')\n content = base64.b64decode(dataUrl)\n \"\"\"保存到图片target.jpg\"\"\"\n file = open('./static/images/target.jpg', 'wb')\n file.write(content)\n file.close()\n \"\"\"图片旋转270(根据实际情况)\"\"\"\n img = Image.open('./static/images/target.jpg')\n if Orientation == '3':\n img = img.rotate(180, expand=True)\n elif Orientation == '6':\n img = img.rotate(270, expand=True)\n elif Orientation == '8':\n img = img.rotate(90, expand=True)\n img.save('./static/images/target.jpg')\n \"\"\"调用函数识别\"\"\"\n flowerIndex = flower_identify()\n flowerInfo = FlowersInfo.flowersInfo[flowerIndex]\n self.render('fi_result.html', data=flowerInfo)\n", "step-5": "# -*- coding: utf-8 -*-\r\n# @File :fi_handlers.py\r\n# @Author:ZengYu\r\n# @Date :2019/5/16\r\n# @software:PyCharm\r\n\r\nimport tornado.web\r\nimport tornado.websocket\r\nfrom PIL import Image\r\nimport base64\r\nfrom model.flower_identify import flower_identify\r\n\r\nclass FlowersInfo():\r\n flowersInfo = [\"月季花(学名:Rosa chinensis Jacq.): 被称为花中皇后,又称“月月红”,是常绿、半常绿低矮灌木,四季开花,一般为红色,或粉色、偶有白色和黄色,可作为观赏植物,也可作为药用植物,亦称月季。有三个自然变种,现代月季花型多样,有单瓣和重瓣,还有高心卷边等优美花型;其色彩艳丽、丰富,不仅有红、粉黄、白等单色,还有混色、银边等品种;多数品种有芳香。月季的品种繁多,世界上已有近万种,中国也有千种以上。\",\r\n \"绣球(学名:Hydrangea macrophylla (Thunb.) Ser. ): 为虎耳草科绣球属植物。灌木,高1-4米;茎常于基部发出多数放射枝而形成一圆形灌丛;枝圆柱形。叶纸质或近革质,倒卵形或阔椭圆形。伞房状聚伞花序近球形,直径8-20厘米,具短的总花梗,花密集,粉红色、淡蓝色或白色;花瓣长圆形,长3-3.5毫米。蒴果未成熟,长陀螺状;种子未熟。花期6-8月。\",\r\n \"万寿菊(Tagetes erecta L)为菊科万寿菊属一年生草本植物,茎直立,粗壮,具纵细条棱,分枝向上平展。叶羽状分裂;沿叶缘有少数腺体。头状花序单生;总苞杯状,顶端具齿尖;舌状花黄色或暗橙色;管状花花冠黄色。瘦果线形,基部缩小,黑色或褐色,被短微毛;冠毛有1-2个长芒和2-3个短而钝的鳞片。花期7-9月。\",\r\n \"三色堇(学名:Viola tricolor L.)是堇菜科堇菜属的二年或多年生草本植物。基生叶叶片长卵形或披针形,具长柄,茎生叶叶片卵形、长圆形或长圆披针形,先端圆或钝,边缘具稀疏的圆齿或钝锯齿。三色堇是欧洲常见的野花物种,也常栽培于公园中,是冰岛、波兰的国花。花朵通常每花有紫、白、黄三色,故名三色堇。该物种较耐寒,喜凉爽,开花受光照影响较大。\",\r\n \"石榴花,落叶灌木或小乔木石榴的花;为石榴属植物,石榴树干灰褐色,有片状剥落,嫩枝黄绿光滑,常呈四棱形,枝端多为刺状,无顶芽。石榴花单叶对生或簇生,矩圆形或倒卵形,新叶嫩绿或古铜色。花朵至数朵生于枝顶或叶腋,花萼钟形,肉质,先端6裂,表面光滑具腊质,橙红色,宿存。花瓣5~7枚红色或白色,单瓣或重瓣。\"]\r\n\r\nclass FlowerIdentify(tornado.web.RequestHandler):\r\n def get(self):\r\n self.render(\"flower_identify.html\")\r\n\r\nclass IdentifyHandler(tornado.websocket.WebSocketHandler):\r\n def post(self):\r\n # 从JSON字符串读取图片数据\r\n dataUrl = self.get_body_argument(\"image\")\r\n Orientation = self.get_body_argument(\"orientation\") # 得到图片方向以便旋转处理\r\n content = base64.b64decode(dataUrl)\r\n '''保存到图片target.jpg'''\r\n file = open('./static/images/target.jpg', 'wb')\r\n file.write(content)\r\n file.close()\r\n\r\n '''图片旋转270(根据实际情况)'''\r\n img = Image.open('./static/images/target.jpg')\r\n if Orientation == \"3\":\r\n img = img.rotate(180, expand=True)\r\n elif Orientation == \"6\":\r\n img = img.rotate(270, expand=True)\r\n elif Orientation == \"8\":\r\n img = img.rotate(90, expand=True)\r\n img.save('./static/images/target.jpg')\r\n\r\n '''调用函数识别'''\r\n flowerIndex = flower_identify() # 调用识别函数\r\n flowerInfo = FlowersInfo.flowersInfo[flowerIndex] # 得到结果,并从FlowersInfo里找到该花的资料\r\n self.render(\"fi_result.html\", data=flowerInfo)\r\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
aax=int(input("enter aa-x")) aay=int(input("enter aa-y")) bbx=int(input("enter bb-x")) bby=int(input("enter bb-y")) ccx=int(input("enter cc-x")) ccy=int(input("enter cc-y")) ddx=int(input("enter dd-x")) ddy=int(input("enter dd-y")) if aax==aay and aay==bbx and bby==ccx and ccx==ccy and ccy==ddx and ddy==aax: print("yes") else: print("no")
normal
{ "blob_id": "bd0cc8cf059440f8fd7ad135894d82c9b18ebc80", "index": 4583, "step-1": "<mask token>\n", "step-2": "<mask token>\nif aax == aay and aay == bbx and bby == ccx and ccx == ccy and ccy == ddx and ddy == aax:\n print('yes')\nelse:\n print('no')\n", "step-3": "aax = int(input('enter aa-x'))\naay = int(input('enter aa-y'))\nbbx = int(input('enter bb-x'))\nbby = int(input('enter bb-y'))\nccx = int(input('enter cc-x'))\nccy = int(input('enter cc-y'))\nddx = int(input('enter dd-x'))\nddy = int(input('enter dd-y'))\nif aax == aay and aay == bbx and bby == ccx and ccx == ccy and ccy == ddx and ddy == aax:\n print('yes')\nelse:\n print('no')\n", "step-4": "aax=int(input(\"enter aa-x\"))\naay=int(input(\"enter aa-y\"))\nbbx=int(input(\"enter bb-x\"))\nbby=int(input(\"enter bb-y\"))\nccx=int(input(\"enter cc-x\"))\nccy=int(input(\"enter cc-y\"))\nddx=int(input(\"enter dd-x\"))\nddy=int(input(\"enter dd-y\"))\nif aax==aay and aay==bbx and bby==ccx and ccx==ccy and ccy==ddx and ddy==aax:\n print(\"yes\")\nelse:\n print(\"no\")\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django.utils.html import strip_tags from django.core.mail import send_mail from django.urls import reverse from django.http import HttpResponseRedirect def Email(doctorFullName,password,otp,email,id): print("\n== UTILS ===") html_message=''' <html> <body> <p>Welcome %s and pass is %s and %d</p> <p>http://127.0.0.1:8000/varificationpage/%d<p> </body> </html> '''%(doctorFullName,password,otp,id) plain_message =strip_tags(html_message) send_mail("my subjects",plain_message,'[email protected]',[email],html_message=html_message) def emailpatient(firstname,lastname,password,otp,email,id): print("\n== UTILS ===") html_message=''' <html> <body> <p>Welcome %s %s and pass is %s and otp is %d</p> <p>http://127.0.0.1:8000/varificationpage/%d<p> </body> </html> '''%(firstname,lastname,password,otp,id) plain_message =strip_tags(html_message) send_mail("my subjects",plain_message,'[email protected]',[email],html_message=html_message) def forgotPassword(otp,email,id): email_subject = "This is your new OTP" print("\n== UTILS ===") html_message=''' <html> <body> <p>Welcome %s Your Otp is %d </p> <p>http://127.0.0.1:8000/forgetpwdvarification/%d<p> </body> </html> '''%(email,otp,id) print(otp) plain_message =strip_tags(html_message) send_mail("my subjects",plain_message,'[email protected]',[email],html_message=html_message) # return HttpResponseRedirect(reverse(login)) # link = "https://localhost:8000/example?email="+email+"&otp="+otp+"&random="+random # send_mail(email_subject, 'mail_template','[email protected]', [email], {'otp': otp})
normal
{ "blob_id": "4ecf9c03750a31ecd113a7548df4e2a700e775e0", "index": 4034, "step-1": "<mask token>\n\n\ndef emailpatient(firstname, lastname, password, otp, email, id):\n print('\\n== UTILS ===')\n html_message = (\n \"\"\"\n <html>\n <body>\n <p>Welcome %s %s and pass is %s and otp is %d</p>\n <p>http://127.0.0.1:8000/varificationpage/%d<p>\n </body>\n </html>\n \"\"\"\n % (firstname, lastname, password, otp, id))\n plain_message = strip_tags(html_message)\n send_mail('my subjects', plain_message,\n '[email protected]', [email], html_message=html_message)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef Email(doctorFullName, password, otp, email, id):\n print('\\n== UTILS ===')\n html_message = (\n \"\"\"\n <html>\n <body>\n <p>Welcome %s and pass is %s and %d</p>\n <p>http://127.0.0.1:8000/varificationpage/%d<p>\n </body>\n </html>\n \"\"\"\n % (doctorFullName, password, otp, id))\n plain_message = strip_tags(html_message)\n send_mail('my subjects', plain_message,\n '[email protected]', [email], html_message=html_message)\n\n\ndef emailpatient(firstname, lastname, password, otp, email, id):\n print('\\n== UTILS ===')\n html_message = (\n \"\"\"\n <html>\n <body>\n <p>Welcome %s %s and pass is %s and otp is %d</p>\n <p>http://127.0.0.1:8000/varificationpage/%d<p>\n </body>\n </html>\n \"\"\"\n % (firstname, lastname, password, otp, id))\n plain_message = strip_tags(html_message)\n send_mail('my subjects', plain_message,\n '[email protected]', [email], html_message=html_message)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef Email(doctorFullName, password, otp, email, id):\n print('\\n== UTILS ===')\n html_message = (\n \"\"\"\n <html>\n <body>\n <p>Welcome %s and pass is %s and %d</p>\n <p>http://127.0.0.1:8000/varificationpage/%d<p>\n </body>\n </html>\n \"\"\"\n % (doctorFullName, password, otp, id))\n plain_message = strip_tags(html_message)\n send_mail('my subjects', plain_message,\n '[email protected]', [email], html_message=html_message)\n\n\ndef emailpatient(firstname, lastname, password, otp, email, id):\n print('\\n== UTILS ===')\n html_message = (\n \"\"\"\n <html>\n <body>\n <p>Welcome %s %s and pass is %s and otp is %d</p>\n <p>http://127.0.0.1:8000/varificationpage/%d<p>\n </body>\n </html>\n \"\"\"\n % (firstname, lastname, password, otp, id))\n plain_message = strip_tags(html_message)\n send_mail('my subjects', plain_message,\n '[email protected]', [email], html_message=html_message)\n\n\ndef forgotPassword(otp, email, id):\n email_subject = 'This is your new OTP'\n print('\\n== UTILS ===')\n html_message = (\n \"\"\"\n <html>\n <body>\n <p>Welcome %s Your Otp is %d </p>\n <p>http://127.0.0.1:8000/forgetpwdvarification/%d<p>\n </body>\n </html>\n \"\"\"\n % (email, otp, id))\n print(otp)\n plain_message = strip_tags(html_message)\n send_mail('my subjects', plain_message,\n '[email protected]', [email], html_message=html_message)\n", "step-4": "from django.utils.html import strip_tags\nfrom django.core.mail import send_mail\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\n\n\ndef Email(doctorFullName, password, otp, email, id):\n print('\\n== UTILS ===')\n html_message = (\n \"\"\"\n <html>\n <body>\n <p>Welcome %s and pass is %s and %d</p>\n <p>http://127.0.0.1:8000/varificationpage/%d<p>\n </body>\n </html>\n \"\"\"\n % (doctorFullName, password, otp, id))\n plain_message = strip_tags(html_message)\n send_mail('my subjects', plain_message,\n '[email protected]', [email], html_message=html_message)\n\n\ndef emailpatient(firstname, lastname, password, otp, email, id):\n print('\\n== UTILS ===')\n html_message = (\n \"\"\"\n <html>\n <body>\n <p>Welcome %s %s and pass is %s and otp is %d</p>\n <p>http://127.0.0.1:8000/varificationpage/%d<p>\n </body>\n </html>\n \"\"\"\n % (firstname, lastname, password, otp, id))\n plain_message = strip_tags(html_message)\n send_mail('my subjects', plain_message,\n '[email protected]', [email], html_message=html_message)\n\n\ndef forgotPassword(otp, email, id):\n email_subject = 'This is your new OTP'\n print('\\n== UTILS ===')\n html_message = (\n \"\"\"\n <html>\n <body>\n <p>Welcome %s Your Otp is %d </p>\n <p>http://127.0.0.1:8000/forgetpwdvarification/%d<p>\n </body>\n </html>\n \"\"\"\n % (email, otp, id))\n print(otp)\n plain_message = strip_tags(html_message)\n send_mail('my subjects', plain_message,\n '[email protected]', [email], html_message=html_message)\n", "step-5": "from django.utils.html import strip_tags\r\nfrom django.core.mail import send_mail\r\nfrom django.urls import reverse\r\nfrom django.http import HttpResponseRedirect\r\n\r\ndef Email(doctorFullName,password,otp,email,id):\r\n print(\"\\n== UTILS ===\")\r\n html_message='''\r\n <html>\r\n <body>\r\n <p>Welcome %s and pass is %s and %d</p>\r\n <p>http://127.0.0.1:8000/varificationpage/%d<p>\r\n </body>\r\n </html>\r\n '''%(doctorFullName,password,otp,id)\r\n plain_message =strip_tags(html_message)\r\n send_mail(\"my subjects\",plain_message,'[email protected]',[email],html_message=html_message)\r\ndef emailpatient(firstname,lastname,password,otp,email,id):\r\n print(\"\\n== UTILS ===\")\r\n html_message='''\r\n <html>\r\n <body>\r\n <p>Welcome %s %s and pass is %s and otp is %d</p>\r\n <p>http://127.0.0.1:8000/varificationpage/%d<p>\r\n </body>\r\n </html>\r\n '''%(firstname,lastname,password,otp,id)\r\n plain_message =strip_tags(html_message)\r\n send_mail(\"my subjects\",plain_message,'[email protected]',[email],html_message=html_message)\r\n\r\n \r\ndef forgotPassword(otp,email,id):\r\n email_subject = \"This is your new OTP\"\r\n print(\"\\n== UTILS ===\")\r\n html_message='''\r\n <html>\r\n <body>\r\n <p>Welcome %s Your Otp is %d </p>\r\n <p>http://127.0.0.1:8000/forgetpwdvarification/%d<p>\r\n </body>\r\n </html>\r\n '''%(email,otp,id)\r\n print(otp)\r\n plain_message =strip_tags(html_message)\r\n send_mail(\"my subjects\",plain_message,'[email protected]',[email],html_message=html_message)\r\n # return HttpResponseRedirect(reverse(login))\r\n # link = \"https://localhost:8000/example?email=\"+email+\"&otp=\"+otp+\"&random=\"+random\r\n # send_mail(email_subject, 'mail_template','[email protected]', [email], {'otp': otp})", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]