code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
# números entre (8 - 26) e (44 - 44)
intervalo = list(range(8, 27)) + list(range(49, 50))
is_magic = []
for n in primos:
quadrado = n ** 2
if quadrado in intervalo:
is_magic.append(quadrado)
print(len(is_magic)) # 3
|
normal
|
{
"blob_id": "b7f443521e165f327aae9ff5d7bbb7b8462abeb5",
"index": 2890,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in primos:\n quadrado = n ** 2\n if quadrado in intervalo:\n is_magic.append(quadrado)\nprint(len(is_magic))\n",
"step-3": "primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\nintervalo = list(range(8, 27)) + list(range(49, 50))\nis_magic = []\nfor n in primos:\n quadrado = n ** 2\n if quadrado in intervalo:\n is_magic.append(quadrado)\nprint(len(is_magic))\n",
"step-4": "primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\n# números entre (8 - 26) e (44 - 44)\nintervalo = list(range(8, 27)) + list(range(49, 50))\nis_magic = []\nfor n in primos:\n quadrado = n ** 2\n if quadrado in intervalo:\n is_magic.append(quadrado)\n\nprint(len(is_magic)) # 3",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Cesar Sinchiguano <[email protected]>
#
# Distributed under terms of the BSD license.
"""
"""
import numpy as np
from open3d import *
def main():
print("Load a ply point cloud, print it, and render it")
pcd = read_point_cloud("11.ply")
''' read_point_cloud reads a point cloud from a file.
It tries to decode the file based on the extension name.
The supported extension names are: pcd, ply, xyz, xyzrgb, xyzn, pts.'''
pcd = read_point_cloud("TestData/fragment.ply")
print(pcd)
# print("Load a ply point cloud, print it, and render it")
# pcd = read_point_cloud("bun0.pcd")
# print(pcd)
tmp=np.asarray(pcd.points)
print(tmp[0:5,0:3])#rows and column
#draw_geometries([pcd])
print("Downsample the point cloud with a voxel of 0.005")
downpcd = voxel_down_sample(pcd, voxel_size = 0.05)
draw_geometries([downpcd])
# print("Recompute the normal of the downsampled point cloud")
estimate_normals(downpcd, search_param = KDTreeSearchParamHybrid(radius = 0.1, max_nn = 30))
draw_geometries([downpcd])
# print("Print a normal vector of the 0th point")
# print(downpcd.normals[0])
# print("Print the normal vectors of the first 10 points")
# print(np.asarray(downpcd.normals)[:10,:])
# print("")
print("Load a polygon volume and use it to crop the original point cloud")
vol = read_selection_polygon_volume("TestData/Crop/cropped.json")
chair = vol.crop_point_cloud(pcd)
#draw_geometries([chair])
print("")
print("Paint chair")
chair.paint_uniform_color([1, 0.706, 0])
#draw_geometries([chair])
print("")
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "30e8e269cf6500ab804566a85c9b96b3ef9bda36",
"index": 4143,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n print('Load a ply point cloud, print it, and render it')\n pcd = read_point_cloud('11.ply')\n \"\"\" read_point_cloud reads a point cloud from a file.\n It tries to decode the file based on the extension name.\n The supported extension names are: pcd, ply, xyz, xyzrgb, xyzn, pts.\"\"\"\n pcd = read_point_cloud('TestData/fragment.ply')\n print(pcd)\n tmp = np.asarray(pcd.points)\n print(tmp[0:5, 0:3])\n print('Downsample the point cloud with a voxel of 0.005')\n downpcd = voxel_down_sample(pcd, voxel_size=0.05)\n draw_geometries([downpcd])\n estimate_normals(downpcd, search_param=KDTreeSearchParamHybrid(radius=\n 0.1, max_nn=30))\n draw_geometries([downpcd])\n print('Load a polygon volume and use it to crop the original point cloud')\n vol = read_selection_polygon_volume('TestData/Crop/cropped.json')\n chair = vol.crop_point_cloud(pcd)\n print('')\n print('Paint chair')\n chair.paint_uniform_color([1, 0.706, 0])\n print('')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n print('Load a ply point cloud, print it, and render it')\n pcd = read_point_cloud('11.ply')\n \"\"\" read_point_cloud reads a point cloud from a file.\n It tries to decode the file based on the extension name.\n The supported extension names are: pcd, ply, xyz, xyzrgb, xyzn, pts.\"\"\"\n pcd = read_point_cloud('TestData/fragment.ply')\n print(pcd)\n tmp = np.asarray(pcd.points)\n print(tmp[0:5, 0:3])\n print('Downsample the point cloud with a voxel of 0.005')\n downpcd = voxel_down_sample(pcd, voxel_size=0.05)\n draw_geometries([downpcd])\n estimate_normals(downpcd, search_param=KDTreeSearchParamHybrid(radius=\n 0.1, max_nn=30))\n draw_geometries([downpcd])\n print('Load a polygon volume and use it to crop the original point cloud')\n vol = read_selection_polygon_volume('TestData/Crop/cropped.json')\n chair = vol.crop_point_cloud(pcd)\n print('')\n print('Paint chair')\n chair.paint_uniform_color([1, 0.706, 0])\n print('')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport numpy as np\nfrom open3d import *\n\n\ndef main():\n print('Load a ply point cloud, print it, and render it')\n pcd = read_point_cloud('11.ply')\n \"\"\" read_point_cloud reads a point cloud from a file.\n It tries to decode the file based on the extension name.\n The supported extension names are: pcd, ply, xyz, xyzrgb, xyzn, pts.\"\"\"\n pcd = read_point_cloud('TestData/fragment.ply')\n print(pcd)\n tmp = np.asarray(pcd.points)\n print(tmp[0:5, 0:3])\n print('Downsample the point cloud with a voxel of 0.005')\n downpcd = voxel_down_sample(pcd, voxel_size=0.05)\n draw_geometries([downpcd])\n estimate_normals(downpcd, search_param=KDTreeSearchParamHybrid(radius=\n 0.1, max_nn=30))\n draw_geometries([downpcd])\n print('Load a polygon volume and use it to crop the original point cloud')\n vol = read_selection_polygon_volume('TestData/Crop/cropped.json')\n chair = vol.crop_point_cloud(pcd)\n print('')\n print('Paint chair')\n chair.paint_uniform_color([1, 0.706, 0])\n print('')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2018 Cesar Sinchiguano <[email protected]>\n#\n# Distributed under terms of the BSD license.\n\n\"\"\"\n\n\"\"\"\nimport numpy as np\nfrom open3d import *\n\ndef main():\n print(\"Load a ply point cloud, print it, and render it\")\n pcd = read_point_cloud(\"11.ply\")\n ''' read_point_cloud reads a point cloud from a file.\n It tries to decode the file based on the extension name.\n The supported extension names are: pcd, ply, xyz, xyzrgb, xyzn, pts.'''\n pcd = read_point_cloud(\"TestData/fragment.ply\")\n\n print(pcd)\n\n # print(\"Load a ply point cloud, print it, and render it\")\n # pcd = read_point_cloud(\"bun0.pcd\")\n # print(pcd)\n tmp=np.asarray(pcd.points)\n print(tmp[0:5,0:3])#rows and column\n #draw_geometries([pcd])\n\n print(\"Downsample the point cloud with a voxel of 0.005\")\n downpcd = voxel_down_sample(pcd, voxel_size = 0.05)\n draw_geometries([downpcd])\n\n # print(\"Recompute the normal of the downsampled point cloud\")\n estimate_normals(downpcd, search_param = KDTreeSearchParamHybrid(radius = 0.1, max_nn = 30))\n draw_geometries([downpcd])\n\n # print(\"Print a normal vector of the 0th point\")\n # print(downpcd.normals[0])\n # print(\"Print the normal vectors of the first 10 points\")\n # print(np.asarray(downpcd.normals)[:10,:])\n # print(\"\")\n\n print(\"Load a polygon volume and use it to crop the original point cloud\")\n vol = read_selection_polygon_volume(\"TestData/Crop/cropped.json\")\n chair = vol.crop_point_cloud(pcd)\n #draw_geometries([chair])\n print(\"\")\n\n print(\"Paint chair\")\n chair.paint_uniform_color([1, 0.706, 0])\n #draw_geometries([chair])\n print(\"\")\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
target = []
with open('IntegerArray.txt', 'r') as f:
target = f.readlines()
for x in range(len(target)):
target[x] = int(target[x])
def f(A):
if len(A) == 1:
return 0
else:
rightStart = len(A) // 2
leftArray = A[0:rightStart]
righArray = A[rightStart:]
B, b = count_and_sort(leftArray)
C, c = count_and_sort(righArray)
D, d = count_and_sort_split(B, C)
return b + c + d
def count_and_sort(A):
if len(A) == 1:
return A, 0
elif len(A) == 2:
if A[0] < A[1]:
return A, 0
else:
temp = A[0]
A[0] = A[1]
A[1] = temp
return A, 1
else:
rightStart = len(A) // 2
leftArray = A[0:rightStart]
righArray = A[rightStart:]
B, b = count_and_sort(leftArray)
C, c = count_and_sort(righArray)
D, d = count_and_sort_split(B, C)
return D, b + c + d
def count_and_sort_split(B, C):
result = []
nums = 0
i = 0
j = 0
while i < len(B) or j < len(C):
if i >= len(B):
result = result + C[j:]
break
elif j >= len(C):
result = result + B[i:]
break
if B[i] < C[j]:
result.append(B[i])
i += 1
elif B[i] > C[j]:
result.append(C[j])
nums = nums + len(B[i:])
j += 1
return result, nums
print(f(target))
|
normal
|
{
"blob_id": "b5611c668a40e1735c92d6d00867885023ad713f",
"index": 248,
"step-1": "<mask token>\n\n\ndef f(A):\n if len(A) == 1:\n return 0\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return b + c + d\n\n\n<mask token>\n\n\ndef count_and_sort_split(B, C):\n result = []\n nums = 0\n i = 0\n j = 0\n while i < len(B) or j < len(C):\n if i >= len(B):\n result = result + C[j:]\n break\n elif j >= len(C):\n result = result + B[i:]\n break\n if B[i] < C[j]:\n result.append(B[i])\n i += 1\n elif B[i] > C[j]:\n result.append(C[j])\n nums = nums + len(B[i:])\n j += 1\n return result, nums\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(A):\n if len(A) == 1:\n return 0\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return b + c + d\n\n\ndef count_and_sort(A):\n if len(A) == 1:\n return A, 0\n elif len(A) == 2:\n if A[0] < A[1]:\n return A, 0\n else:\n temp = A[0]\n A[0] = A[1]\n A[1] = temp\n return A, 1\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return D, b + c + d\n\n\ndef count_and_sort_split(B, C):\n result = []\n nums = 0\n i = 0\n j = 0\n while i < len(B) or j < len(C):\n if i >= len(B):\n result = result + C[j:]\n break\n elif j >= len(C):\n result = result + B[i:]\n break\n if B[i] < C[j]:\n result.append(B[i])\n i += 1\n elif B[i] > C[j]:\n result.append(C[j])\n nums = nums + len(B[i:])\n j += 1\n return result, nums\n\n\n<mask token>\n",
"step-3": "<mask token>\nwith open('IntegerArray.txt', 'r') as f:\n target = f.readlines()\nfor x in range(len(target)):\n target[x] = int(target[x])\n\n\ndef f(A):\n if len(A) == 1:\n return 0\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return b + c + d\n\n\ndef count_and_sort(A):\n if len(A) == 1:\n return A, 0\n elif len(A) == 2:\n if A[0] < A[1]:\n return A, 0\n else:\n temp = A[0]\n A[0] = A[1]\n A[1] = temp\n return A, 1\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return D, b + c + d\n\n\ndef count_and_sort_split(B, C):\n result = []\n nums = 0\n i = 0\n j = 0\n while i < len(B) or j < len(C):\n if i >= len(B):\n result = result + C[j:]\n break\n elif j >= len(C):\n result = result + B[i:]\n break\n if B[i] < C[j]:\n result.append(B[i])\n i += 1\n elif B[i] > C[j]:\n result.append(C[j])\n nums = nums + len(B[i:])\n j += 1\n return result, nums\n\n\nprint(f(target))\n",
"step-4": "target = []\nwith open('IntegerArray.txt', 'r') as f:\n target = f.readlines()\nfor x in range(len(target)):\n target[x] = int(target[x])\n\n\ndef f(A):\n if len(A) == 1:\n return 0\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return b + c + d\n\n\ndef count_and_sort(A):\n if len(A) == 1:\n return A, 0\n elif len(A) == 2:\n if A[0] < A[1]:\n return A, 0\n else:\n temp = A[0]\n A[0] = A[1]\n A[1] = temp\n return A, 1\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return D, b + c + d\n\n\ndef count_and_sort_split(B, C):\n result = []\n nums = 0\n i = 0\n j = 0\n while i < len(B) or j < len(C):\n if i >= len(B):\n result = result + C[j:]\n break\n elif j >= len(C):\n result = result + B[i:]\n break\n if B[i] < C[j]:\n result.append(B[i])\n i += 1\n elif B[i] > C[j]:\n result.append(C[j])\n nums = nums + len(B[i:])\n j += 1\n return result, nums\n\n\nprint(f(target))\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# Name: CreateDatabase.py
# Description: Connects to a point in time in the geodatabase in
# PostgreSQL using database authentication.
# Import system modules
import arcpy
import os
arcpy.env.workspace="Database Connections"
if arcpy.Exists ("Prueba6.sde")==False:
arcpy.CreateDatabaseConnection_management("Database Connections",
"Prueba6.sde",
"SQL_SERVER",
"192.168.200.250",
"DATABASE_AUTH",
"sde",
"$deDEs4Rr0lLo",
"#",
"sprueba",
"#",
"#",
"#",
"#")
#arcpy.ListUsers(conection_sde)
#print arcpy.ListFeatureClasses()
prueba = "Prueba6.sde"
desc= arcpy.Describe("Prueba6.sde")
print desc.name
arcpy.env.workspace = r"Database Connections/Prueba6.sde"
desc= arcpy.Describe("sprueba.DBO.base_limites_nacionales")
print desc.name
#datasets=arcpy.ListDatasets()
arcpy.env.workspace = r"Database Connections/Prueba6.sde/sprueba.DBO.base_limites_nacionales"
desc= arcpy.Describe("sprueba.DBO.departamentos")
print desc.name
#arcpy.AddField_management("sprueba.DBO.departamentos","limites_buffer","TEXT","10")
#arcpy.Buffer_analysis("sprueba.DBO.departamentos","sprueba.DBO.departamentos_buffer",'10 miles')
inFeatures = "sprueba.DBO.departamentos"
fieldName1 = "xCentroid"
fieldName2 = "yCentroid"
fieldPrecision = 18
fieldScale = 11
# Add fields
arcpy.AddField_management(inFeatures, fieldName1, "DOUBLE",
fieldPrecision, fieldScale)
arcpy.AddField_management(inFeatures, fieldName2, "DOUBLE",
fieldPrecision, fieldScale)
# Calculate centroid
arcpy.CalculateField_management(inFeatures, fieldName1,
"!SHAPE.CENTROID.X!",
"PYTHON_9.3")
arcpy.CalculateField_management(inFeatures, fieldName2,
"!SHAPE.CENTROID.Y!",
"PYTHON_9.3")
|
normal
|
{
"blob_id": "6e98dfd758700c57ddbb17624472ce2c23cbee6a",
"index": 2036,
"step-1": "# Name: CreateDatabase.py\n# Description: Connects to a point in time in the geodatabase in\n# PostgreSQL using database authentication.\n\n# Import system modules\nimport arcpy\nimport os\n\n\n\n\narcpy.env.workspace=\"Database Connections\"\nif arcpy.Exists (\"Prueba6.sde\")==False:\n\n arcpy.CreateDatabaseConnection_management(\"Database Connections\",\n \"Prueba6.sde\",\n \"SQL_SERVER\",\n \"192.168.200.250\",\n \"DATABASE_AUTH\",\n \"sde\",\n \"$deDEs4Rr0lLo\",\n \"#\",\n \"sprueba\",\n \"#\",\n \"#\",\n \"#\",\n \"#\")\n\n\n#arcpy.ListUsers(conection_sde)\n\n#print arcpy.ListFeatureClasses()\n\nprueba = \"Prueba6.sde\"\ndesc= arcpy.Describe(\"Prueba6.sde\")\nprint desc.name\n\n\n\n\narcpy.env.workspace = r\"Database Connections/Prueba6.sde\"\n\ndesc= arcpy.Describe(\"sprueba.DBO.base_limites_nacionales\")\n\nprint desc.name\n\n\n#datasets=arcpy.ListDatasets()\n\n\narcpy.env.workspace = r\"Database Connections/Prueba6.sde/sprueba.DBO.base_limites_nacionales\"\n\ndesc= arcpy.Describe(\"sprueba.DBO.departamentos\")\n\nprint desc.name\n\n#arcpy.AddField_management(\"sprueba.DBO.departamentos\",\"limites_buffer\",\"TEXT\",\"10\")\n#arcpy.Buffer_analysis(\"sprueba.DBO.departamentos\",\"sprueba.DBO.departamentos_buffer\",'10 miles')\n\ninFeatures = \"sprueba.DBO.departamentos\"\nfieldName1 = \"xCentroid\"\nfieldName2 = \"yCentroid\"\nfieldPrecision = 18\nfieldScale = 11\n\n\n# Add fields\narcpy.AddField_management(inFeatures, fieldName1, \"DOUBLE\",\n fieldPrecision, fieldScale)\narcpy.AddField_management(inFeatures, fieldName2, \"DOUBLE\",\n fieldPrecision, fieldScale)\n\n# Calculate centroid\narcpy.CalculateField_management(inFeatures, fieldName1,\n \"!SHAPE.CENTROID.X!\",\n \"PYTHON_9.3\")\narcpy.CalculateField_management(inFeatures, fieldName2,\n \"!SHAPE.CENTROID.Y!\",\n \"PYTHON_9.3\")\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import sys
sys.stdin = open('retire.txt', 'r')
def counseling(pay, row):
global max_sum
if row == N - 1:
if arr[row][0] == 1:
pay += arr[row][1]
max_sum = max(pay, max_sum)
return
if row == N:
max_sum = max(pay, max_sum)
return
if row > N - 1:
return
counseling(pay + arr[row][1], row + arr[row][0])
counseling(pay, row + 1)
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
# visit = [0] * N
max_sum = 0
counseling(0, 0)
print(max_sum)
|
normal
|
{
"blob_id": "9db2377f15aaf28373959dad88c6ec7b6dacffd2",
"index": 9512,
"step-1": "<mask token>\n\n\ndef counseling(pay, row):\n global max_sum\n if row == N - 1:\n if arr[row][0] == 1:\n pay += arr[row][1]\n max_sum = max(pay, max_sum)\n return\n if row == N:\n max_sum = max(pay, max_sum)\n return\n if row > N - 1:\n return\n counseling(pay + arr[row][1], row + arr[row][0])\n counseling(pay, row + 1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef counseling(pay, row):\n global max_sum\n if row == N - 1:\n if arr[row][0] == 1:\n pay += arr[row][1]\n max_sum = max(pay, max_sum)\n return\n if row == N:\n max_sum = max(pay, max_sum)\n return\n if row > N - 1:\n return\n counseling(pay + arr[row][1], row + arr[row][0])\n counseling(pay, row + 1)\n\n\n<mask token>\ncounseling(0, 0)\nprint(max_sum)\n",
"step-3": "<mask token>\nsys.stdin = open('retire.txt', 'r')\n\n\ndef counseling(pay, row):\n global max_sum\n if row == N - 1:\n if arr[row][0] == 1:\n pay += arr[row][1]\n max_sum = max(pay, max_sum)\n return\n if row == N:\n max_sum = max(pay, max_sum)\n return\n if row > N - 1:\n return\n counseling(pay + arr[row][1], row + arr[row][0])\n counseling(pay, row + 1)\n\n\nN = int(input())\narr = [list(map(int, input().split())) for _ in range(N)]\nmax_sum = 0\ncounseling(0, 0)\nprint(max_sum)\n",
"step-4": "import sys\nsys.stdin = open('retire.txt', 'r')\n\n\ndef counseling(pay, row):\n global max_sum\n if row == N - 1:\n if arr[row][0] == 1:\n pay += arr[row][1]\n max_sum = max(pay, max_sum)\n return\n if row == N:\n max_sum = max(pay, max_sum)\n return\n if row > N - 1:\n return\n counseling(pay + arr[row][1], row + arr[row][0])\n counseling(pay, row + 1)\n\n\nN = int(input())\narr = [list(map(int, input().split())) for _ in range(N)]\nmax_sum = 0\ncounseling(0, 0)\nprint(max_sum)\n",
"step-5": "import sys\nsys.stdin = open('retire.txt', 'r')\n\ndef counseling(pay, row):\n global max_sum\n if row == N - 1:\n if arr[row][0] == 1:\n pay += arr[row][1]\n max_sum = max(pay, max_sum)\n return\n if row == N:\n max_sum = max(pay, max_sum)\n return\n if row > N - 1:\n return\n counseling(pay + arr[row][1], row + arr[row][0])\n counseling(pay, row + 1)\n\nN = int(input())\narr = [list(map(int, input().split())) for _ in range(N)]\n# visit = [0] * N\nmax_sum = 0\ncounseling(0, 0)\nprint(max_sum)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
import argparse
import contextlib
import os.path
import shutil
import subprocess
import sys
import tempfile
from Bio import SeqIO
BOOTSTRAP_MODES = 'a',
# Some utilities
@contextlib.contextmanager
def sequences_in_format(sequences, fmt='fasta', **kwargs):
with tempfile.NamedTemporaryFile(**kwargs) as tf:
SeqIO.write(sequences, tf, fmt)
tf.flush()
yield tf.name
@contextlib.contextmanager
def temp_dir(**kwargs):
"""Maintains a temporary directory for the life of the context manager."""
temp_dir = tempfile.mkdtemp(**kwargs)
try:
yield temp_dir
finally:
# Cleanup
# ermm... this is breaking something (maybe bootstrapping replicates?), so leaving out for now
#shutil.rmtree(temp_dir)
pass
def stripext(f, basename=False):
if basename:
return stripext(os.path.basename(f))
return os.path.splitext(f)[0]
def nonextant_file(path):
if os.path.exists(path):
raise ValueError("Exists: " + path)
return path
def joiner(base_path):
def p(*args):
return os.path.join(base_path, *args)
return p
def move(x, y):
subprocess.check_output(['cp', x, y])
def raxml(sequences, output_tree, stats_path=None, log_path=None, quiet=False,
executable='raxmlHPC-SSE3', model='GTRGAMMA', threads=None,
rapid_bootstrap=None, bootstrap_seed=None, tmp_prefix=None, outgroup=None):
name = os.path.basename(os.path.splitext(output_tree)[0])
def f(n):
"Gets the RAxML file name associated with a key"
return 'RAxML_{1}.{0}'.format(name, n)
with temp_dir(prefix='raxml-') as td:
with sequences_in_format(sequences, fmt='phylip-relaxed',
prefix=tmp_prefix, dir=td) as seq_file:
p = joiner(td)
# note: -p is needed for some reason now but didn't use to be?
cmd = [executable, '-n', name, '-m', model, '-s', seq_file, '-p', '9988']
if threads and threads > 1:
cmd.extend(('-T', str(threads)))
if rapid_bootstrap:
cmd.extend(('-f', 'a', '-x', bootstrap_seed,
'-N', rapid_bootstrap))
if outgroup:
cmd.extend(('-o', outgroup))
stdout = stderr = None
if quiet:
stdout = stderr = open(os.path.devnull)
cmd = map(str, cmd)
print >> sys.stderr, "Running:", ' '.join(cmd)
try:
subprocess.check_call(cmd, stdout=stdout, stderr=stderr, cwd=td)
except subprocess.CalledProcessError, e:
raise SystemExit(e.returncode)
# Get the result - either bootstrap-annotated tree or result
key = 'bipartitions' if rapid_bootstrap else 'result'
move(p(f(key)), output_tree)
if stats_path:
move(p(f('info')), stats_path)
if log_path:
move(p(f('log')), log_path)
def main():
parser = argparse.ArgumentParser(description="""Simple wrapper around
RAxML. Abstracts executable selection and sequence formatting; only keeps
desired files; name specification. Most arguments are *not* supported""")
parser.add_argument('alignment_file', type=argparse.FileType('r'),
help="""Input alignment""")
parser.add_argument('--input-format', default='fasta',
help="""Format of input file [default: %(default)s]""")
parser.add_argument('output_tree', type=nonextant_file, help="""Destination
for output tree""")
parser.add_argument('--stats', type=nonextant_file, metavar="<stats file>",
help="""Save RAxML stats to <stats file>""")
parser.add_argument('--log', type=nonextant_file, metavar="<log file>",
help="""Write RAxML log file to <log file>""")
parser.add_argument('-q', '--quiet', action='store_true',
help="""Suppress output""")
bs_group = parser.add_argument_group("Bootstrap Options")
bs_group.add_argument('--rapid-bootstrap', metavar='N',
help="""Run rapid bootstrap analysis with N replicates""",
type=int)
bs_group.add_argument('-x', '--bootstrap-seed', help="""Bootstrap seed""",
dest='bootstrap_seed', type=int, default=1)
rax_group = parser.add_argument_group(title="""RAxML options""")
rax_group.add_argument('-T', '--threads', help="""Number of
threads to use [default: 1]""", type=int)
rax_group.add_argument('--executable', help="""RAxML executable to use.
[default: raxmlHPC-PTHREADS-SSE3 if threads > 1, raxmlHPC-SSE3
otherwise]""")
rax_group.add_argument('-m', '--model', default='GTRGAMMA', help="""RAxML
model to use [default: %(default)s]""")
parser.add_argument('-o', '--outgroup',
help="""Fix output for tree""")
args = parser.parse_args()
if not args.executable:
args.executable = ('raxmlHPC-PTHREADS-SSE3' if args.threads else
'raxmlHPC-SSE3')
with args.alignment_file as fp:
sequences = SeqIO.parse(fp, args.input_format)
raxml(sequences, args.output_tree, executable=args.executable,
stats_path=args.stats, quiet=args.quiet,
threads=args.threads, model=args.model, log_path=args.log,
rapid_bootstrap=args.rapid_bootstrap,
bootstrap_seed=args.bootstrap_seed,
outgroup=args.outgroup,
tmp_prefix=stripext(fp.name, True))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "28532fe798b6a764bec7ea511ba9e66a1d096b6f",
"index": 9364,
"step-1": "#!/usr/bin/python\n\nimport argparse\nimport contextlib\nimport os.path\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom Bio import SeqIO\n\nBOOTSTRAP_MODES = 'a',\n\n# Some utilities\[email protected]\ndef sequences_in_format(sequences, fmt='fasta', **kwargs):\n with tempfile.NamedTemporaryFile(**kwargs) as tf:\n SeqIO.write(sequences, tf, fmt)\n tf.flush()\n yield tf.name\n\[email protected]\ndef temp_dir(**kwargs):\n \"\"\"Maintains a temporary directory for the life of the context manager.\"\"\"\n temp_dir = tempfile.mkdtemp(**kwargs)\n try:\n yield temp_dir\n finally:\n # Cleanup\n # ermm... this is breaking something (maybe bootstrapping replicates?), so leaving out for now\n #shutil.rmtree(temp_dir)\n pass\n\ndef stripext(f, basename=False):\n if basename:\n return stripext(os.path.basename(f))\n return os.path.splitext(f)[0]\n\ndef nonextant_file(path):\n if os.path.exists(path):\n raise ValueError(\"Exists: \" + path)\n\n return path\n\ndef joiner(base_path):\n def p(*args):\n return os.path.join(base_path, *args)\n return p\n\ndef move(x, y):\n subprocess.check_output(['cp', x, y])\n\ndef raxml(sequences, output_tree, stats_path=None, log_path=None, quiet=False,\n executable='raxmlHPC-SSE3', model='GTRGAMMA', threads=None,\n rapid_bootstrap=None, bootstrap_seed=None, tmp_prefix=None, outgroup=None):\n name = os.path.basename(os.path.splitext(output_tree)[0])\n\n def f(n):\n \"Gets the RAxML file name associated with a key\"\n return 'RAxML_{1}.{0}'.format(name, n)\n\n with temp_dir(prefix='raxml-') as td:\n with sequences_in_format(sequences, fmt='phylip-relaxed',\n prefix=tmp_prefix, dir=td) as seq_file:\n p = joiner(td)\n\n # note: -p is needed for some reason now but didn't use to be?\n cmd = [executable, '-n', name, '-m', model, '-s', seq_file, '-p', '9988']\n if threads and threads > 1:\n cmd.extend(('-T', str(threads)))\n\n if rapid_bootstrap:\n cmd.extend(('-f', 'a', '-x', bootstrap_seed,\n '-N', rapid_bootstrap))\n\n if outgroup:\n cmd.extend(('-o', outgroup))\n\n stdout = stderr = None\n if quiet:\n stdout = stderr = open(os.path.devnull)\n\n cmd = map(str, cmd)\n\n print >> sys.stderr, \"Running:\", ' '.join(cmd)\n try:\n subprocess.check_call(cmd, stdout=stdout, stderr=stderr, cwd=td)\n except subprocess.CalledProcessError, e:\n raise SystemExit(e.returncode)\n\n # Get the result - either bootstrap-annotated tree or result\n key = 'bipartitions' if rapid_bootstrap else 'result'\n move(p(f(key)), output_tree)\n\n if stats_path:\n move(p(f('info')), stats_path)\n if log_path:\n move(p(f('log')), log_path)\n\ndef main():\n parser = argparse.ArgumentParser(description=\"\"\"Simple wrapper around\n RAxML. Abstracts executable selection and sequence formatting; only keeps\n desired files; name specification. Most arguments are *not* supported\"\"\")\n parser.add_argument('alignment_file', type=argparse.FileType('r'),\n help=\"\"\"Input alignment\"\"\")\n parser.add_argument('--input-format', default='fasta',\n help=\"\"\"Format of input file [default: %(default)s]\"\"\")\n parser.add_argument('output_tree', type=nonextant_file, help=\"\"\"Destination\n for output tree\"\"\")\n parser.add_argument('--stats', type=nonextant_file, metavar=\"<stats file>\",\n help=\"\"\"Save RAxML stats to <stats file>\"\"\")\n parser.add_argument('--log', type=nonextant_file, metavar=\"<log file>\",\n help=\"\"\"Write RAxML log file to <log file>\"\"\")\n parser.add_argument('-q', '--quiet', action='store_true',\n help=\"\"\"Suppress output\"\"\")\n\n bs_group = parser.add_argument_group(\"Bootstrap Options\")\n bs_group.add_argument('--rapid-bootstrap', metavar='N',\n help=\"\"\"Run rapid bootstrap analysis with N replicates\"\"\",\n type=int)\n bs_group.add_argument('-x', '--bootstrap-seed', help=\"\"\"Bootstrap seed\"\"\",\n dest='bootstrap_seed', type=int, default=1)\n\n rax_group = parser.add_argument_group(title=\"\"\"RAxML options\"\"\")\n rax_group.add_argument('-T', '--threads', help=\"\"\"Number of\n threads to use [default: 1]\"\"\", type=int)\n rax_group.add_argument('--executable', help=\"\"\"RAxML executable to use.\n [default: raxmlHPC-PTHREADS-SSE3 if threads > 1, raxmlHPC-SSE3\n otherwise]\"\"\")\n rax_group.add_argument('-m', '--model', default='GTRGAMMA', help=\"\"\"RAxML\n model to use [default: %(default)s]\"\"\")\n parser.add_argument('-o', '--outgroup',\n help=\"\"\"Fix output for tree\"\"\")\n\n args = parser.parse_args()\n if not args.executable:\n args.executable = ('raxmlHPC-PTHREADS-SSE3' if args.threads else\n 'raxmlHPC-SSE3')\n\n with args.alignment_file as fp:\n sequences = SeqIO.parse(fp, args.input_format)\n raxml(sequences, args.output_tree, executable=args.executable,\n stats_path=args.stats, quiet=args.quiet,\n threads=args.threads, model=args.model, log_path=args.log,\n rapid_bootstrap=args.rapid_bootstrap,\n bootstrap_seed=args.bootstrap_seed,\n outgroup=args.outgroup,\n tmp_prefix=stripext(fp.name, True))\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import (
A1,
A,
)
__all__ = [
"A1",
"A",
]
|
normal
|
{
"blob_id": "846a42a997539a45576d3ecbe0bd290e00b55935",
"index": 3258,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['A1', 'A']\n",
"step-3": "from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import A1, A\n__all__ = ['A1', 'A']\n",
"step-4": "from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import (\n A1,\n A,\n)\n\n__all__ = [\n \"A1\",\n \"A\",\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
# type defining of the variable and playing with variables.
a = 5.0
print(id(a))
a = 10
print("hello.....")
print(type(a))
print(id(a))
# locating addresses...
b = [5, 6, 7]
print(id(b))
b.append(10)
print(id(b))
# Strings...
name = input("Enter Your Name:: ") # iNPUTTING AS NAME
print(name)
print(len(name))
print(name[2])
print(name[0:3])
print(name[-2:])
# Escape Sequence
# \'
# \"
# \\
# \n
message = 'Python "Programming"'
print(message)
message = """Python
New Line..
Programmin"""
print(message)
# string Concatenation
lastname = input("Enter Your Last Name:: ") # iNPUTTING AS NAME
print(lastname)
print(name + " " + lastname)
full = f"{name} {lastname}"
print("Another way of writing... \n" + full)
print(full.upper()) # converts into upper case.
print(full.find("ip")) # finding location of specific char. Returns index number.
print("Dipesh" in full) # returns Boolean value either true or false..
print("Patel" in full)
print(full.replace("Rafaliya", "Patel"))
# Binary representation of any number...
print(bin(a)) # binary of a = 10
print(hex(a)) # Hexadecimal of a..
x = 0b0101
print((x)) # binary num a
print(bin(x)) # binary printing of a
# complex Number...
complex = a + 5j
print(complex) # printing complex number
y = 3
# operations
q = a + y # addition
print(q)
w = a - y # substraction
print(w)
e = a * y # multiplication
print(e)
r = a / y # division
print(r)
t = a // y # division but only print integer value
print(t)
g = a ** y # to the power of
print(g)
m = a % y # remainder
print(m)
# constants variables..
PI = 3.14 # this is a var with a constant value
print(abs(PI)) # absolute value of PI
print(round(PI)) # round up value of PI
no = -8.56
print(math.floor(no)) # floor value of no
print(math.ceil(no)) # ceiling value of no
# if-elif-else loop
age = 10
if age >= 21:
print("Adult")
elif age >= 13:
print("Teenager")
else:
print("Child")
# ternary operator
print("Adult" if age >= 21 else "Teenager")
# for loops
for p in "Dipesh":
print(p)
for l in range(0, 10, 2): # range is a kind of list...
print(l)
answer = 10
guess = 1
while answer != guess: # while loop for guessing
guess = int(input("Enter your Guess:: "))
else:
pass # this is used to break the loop...
# defining a function ... Number is even or odd..
def evenodd(numb):
if numb % 2 == 0:
return "even"
else:
return "odd"
print("The Number is " + evenodd(20))
# printing the row at a time...
def rows(**ro):
print(ro)
rows(name="Dipesh", id=1)
|
normal
|
{
"blob_id": "95b75395cafc6ba9f75ecf48157421e37ced2518",
"index": 815,
"step-1": "<mask token>\n\n\ndef rows(**ro):\n print(ro)\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(id(a))\n<mask token>\nprint('hello.....')\nprint(type(a))\nprint(id(a))\n<mask token>\nprint(id(b))\nb.append(10)\nprint(id(b))\n<mask token>\nprint(name)\nprint(len(name))\nprint(name[2])\nprint(name[0:3])\nprint(name[-2:])\n<mask token>\nprint(message)\n<mask token>\nprint(message)\n<mask token>\nprint(lastname)\nprint(name + ' ' + lastname)\n<mask token>\nprint('Another way of writing... \\n' + full)\nprint(full.upper())\nprint(full.find('ip'))\nprint('Dipesh' in full)\nprint('Patel' in full)\nprint(full.replace('Rafaliya', 'Patel'))\nprint(bin(a))\nprint(hex(a))\n<mask token>\nprint(x)\nprint(bin(x))\n<mask token>\nprint(complex)\n<mask token>\nprint(q)\n<mask token>\nprint(w)\n<mask token>\nprint(e)\n<mask token>\nprint(r)\n<mask token>\nprint(t)\n<mask token>\nprint(g)\n<mask token>\nprint(m)\n<mask token>\nprint(abs(PI))\nprint(round(PI))\n<mask token>\nprint(math.floor(no))\nprint(math.ceil(no))\n<mask token>\nif age >= 21:\n print('Adult')\nelif age >= 13:\n print('Teenager')\nelse:\n print('Child')\nprint('Adult' if age >= 21 else 'Teenager')\nfor p in 'Dipesh':\n print(p)\nfor l in range(0, 10, 2):\n print(l)\n<mask token>\nwhile answer != guess:\n guess = int(input('Enter your Guess:: '))\nelse:\n pass\n\n\ndef evenodd(numb):\n if numb % 2 == 0:\n return 'even'\n else:\n return 'odd'\n\n\nprint('The Number is ' + evenodd(20))\n\n\ndef rows(**ro):\n print(ro)\n\n\nrows(name='Dipesh', id=1)\n",
"step-3": "<mask token>\na = 5.0\nprint(id(a))\na = 10\nprint('hello.....')\nprint(type(a))\nprint(id(a))\nb = [5, 6, 7]\nprint(id(b))\nb.append(10)\nprint(id(b))\nname = input('Enter Your Name:: ')\nprint(name)\nprint(len(name))\nprint(name[2])\nprint(name[0:3])\nprint(name[-2:])\nmessage = 'Python \"Programming\"'\nprint(message)\nmessage = \"\"\"Python \nNew Line..\nProgrammin\"\"\"\nprint(message)\nlastname = input('Enter Your Last Name:: ')\nprint(lastname)\nprint(name + ' ' + lastname)\nfull = f'{name} {lastname}'\nprint('Another way of writing... \\n' + full)\nprint(full.upper())\nprint(full.find('ip'))\nprint('Dipesh' in full)\nprint('Patel' in full)\nprint(full.replace('Rafaliya', 'Patel'))\nprint(bin(a))\nprint(hex(a))\nx = 5\nprint(x)\nprint(bin(x))\ncomplex = a + 5.0j\nprint(complex)\ny = 3\nq = a + y\nprint(q)\nw = a - y\nprint(w)\ne = a * y\nprint(e)\nr = a / y\nprint(r)\nt = a // y\nprint(t)\ng = a ** y\nprint(g)\nm = a % y\nprint(m)\nPI = 3.14\nprint(abs(PI))\nprint(round(PI))\nno = -8.56\nprint(math.floor(no))\nprint(math.ceil(no))\nage = 10\nif age >= 21:\n print('Adult')\nelif age >= 13:\n print('Teenager')\nelse:\n print('Child')\nprint('Adult' if age >= 21 else 'Teenager')\nfor p in 'Dipesh':\n print(p)\nfor l in range(0, 10, 2):\n print(l)\nanswer = 10\nguess = 1\nwhile answer != guess:\n guess = int(input('Enter your Guess:: '))\nelse:\n pass\n\n\ndef evenodd(numb):\n if numb % 2 == 0:\n return 'even'\n else:\n return 'odd'\n\n\nprint('The Number is ' + evenodd(20))\n\n\ndef rows(**ro):\n print(ro)\n\n\nrows(name='Dipesh', id=1)\n",
"step-4": "import math\na = 5.0\nprint(id(a))\na = 10\nprint('hello.....')\nprint(type(a))\nprint(id(a))\nb = [5, 6, 7]\nprint(id(b))\nb.append(10)\nprint(id(b))\nname = input('Enter Your Name:: ')\nprint(name)\nprint(len(name))\nprint(name[2])\nprint(name[0:3])\nprint(name[-2:])\nmessage = 'Python \"Programming\"'\nprint(message)\nmessage = \"\"\"Python \nNew Line..\nProgrammin\"\"\"\nprint(message)\nlastname = input('Enter Your Last Name:: ')\nprint(lastname)\nprint(name + ' ' + lastname)\nfull = f'{name} {lastname}'\nprint('Another way of writing... \\n' + full)\nprint(full.upper())\nprint(full.find('ip'))\nprint('Dipesh' in full)\nprint('Patel' in full)\nprint(full.replace('Rafaliya', 'Patel'))\nprint(bin(a))\nprint(hex(a))\nx = 5\nprint(x)\nprint(bin(x))\ncomplex = a + 5.0j\nprint(complex)\ny = 3\nq = a + y\nprint(q)\nw = a - y\nprint(w)\ne = a * y\nprint(e)\nr = a / y\nprint(r)\nt = a // y\nprint(t)\ng = a ** y\nprint(g)\nm = a % y\nprint(m)\nPI = 3.14\nprint(abs(PI))\nprint(round(PI))\nno = -8.56\nprint(math.floor(no))\nprint(math.ceil(no))\nage = 10\nif age >= 21:\n print('Adult')\nelif age >= 13:\n print('Teenager')\nelse:\n print('Child')\nprint('Adult' if age >= 21 else 'Teenager')\nfor p in 'Dipesh':\n print(p)\nfor l in range(0, 10, 2):\n print(l)\nanswer = 10\nguess = 1\nwhile answer != guess:\n guess = int(input('Enter your Guess:: '))\nelse:\n pass\n\n\ndef evenodd(numb):\n if numb % 2 == 0:\n return 'even'\n else:\n return 'odd'\n\n\nprint('The Number is ' + evenodd(20))\n\n\ndef rows(**ro):\n print(ro)\n\n\nrows(name='Dipesh', id=1)\n",
"step-5": "import math\r\n\r\n# type defining of the variable and playing with variables.\r\na = 5.0\r\nprint(id(a))\r\na = 10\r\nprint(\"hello.....\")\r\nprint(type(a))\r\nprint(id(a))\r\n\r\n# locating addresses...\r\nb = [5, 6, 7]\r\nprint(id(b))\r\nb.append(10)\r\nprint(id(b))\r\n\r\n# Strings...\r\n\r\nname = input(\"Enter Your Name:: \") # iNPUTTING AS NAME\r\nprint(name)\r\nprint(len(name))\r\nprint(name[2])\r\nprint(name[0:3])\r\nprint(name[-2:])\r\n\r\n# Escape Sequence\r\n# \\'\r\n# \\\"\r\n# \\\\\r\n# \\n\r\nmessage = 'Python \"Programming\"'\r\nprint(message)\r\nmessage = \"\"\"Python \r\nNew Line..\r\nProgrammin\"\"\"\r\nprint(message)\r\n# string Concatenation\r\n\r\nlastname = input(\"Enter Your Last Name:: \") # iNPUTTING AS NAME\r\nprint(lastname)\r\nprint(name + \" \" + lastname)\r\n\r\nfull = f\"{name} {lastname}\"\r\nprint(\"Another way of writing... \\n\" + full)\r\nprint(full.upper()) # converts into upper case.\r\nprint(full.find(\"ip\")) # finding location of specific char. Returns index number.\r\n\r\nprint(\"Dipesh\" in full) # returns Boolean value either true or false..\r\nprint(\"Patel\" in full)\r\nprint(full.replace(\"Rafaliya\", \"Patel\"))\r\n\r\n# Binary representation of any number...\r\nprint(bin(a)) # binary of a = 10\r\nprint(hex(a)) # Hexadecimal of a..\r\n\r\nx = 0b0101\r\nprint((x)) # binary num a\r\nprint(bin(x)) # binary printing of a\r\n\r\n# complex Number...\r\ncomplex = a + 5j\r\nprint(complex) # printing complex number\r\ny = 3\r\n# operations\r\nq = a + y # addition\r\nprint(q)\r\nw = a - y # substraction\r\nprint(w)\r\ne = a * y # multiplication\r\nprint(e)\r\nr = a / y # division\r\nprint(r)\r\nt = a // y # division but only print integer value\r\nprint(t)\r\ng = a ** y # to the power of\r\nprint(g)\r\nm = a % y # remainder\r\nprint(m)\r\n\r\n# constants variables..\r\nPI = 3.14 # this is a var with a constant value\r\nprint(abs(PI)) # absolute value of PI\r\nprint(round(PI)) # round up value of PI\r\nno = -8.56\r\nprint(math.floor(no)) # floor value of no\r\nprint(math.ceil(no)) # ceiling value of no\r\n\r\n# if-elif-else loop\r\nage = 10\r\nif age >= 21:\r\n print(\"Adult\")\r\nelif age >= 13:\r\n print(\"Teenager\")\r\nelse:\r\n print(\"Child\")\r\n\r\n# ternary operator\r\nprint(\"Adult\" if age >= 21 else \"Teenager\")\r\n\r\n# for loops\r\nfor p in \"Dipesh\":\r\n print(p)\r\n\r\nfor l in range(0, 10, 2): # range is a kind of list...\r\n print(l)\r\n\r\nanswer = 10\r\nguess = 1\r\nwhile answer != guess: # while loop for guessing\r\n guess = int(input(\"Enter your Guess:: \"))\r\nelse:\r\n pass # this is used to break the loop...\r\n\r\n# defining a function ... Number is even or odd..\r\ndef evenodd(numb):\r\n if numb % 2 == 0:\r\n return \"even\"\r\n else:\r\n return \"odd\"\r\n\r\n\r\nprint(\"The Number is \" + evenodd(20))\r\n\r\n# printing the row at a time...\r\ndef rows(**ro):\r\n print(ro)\r\n\r\n\r\nrows(name=\"Dipesh\", id=1)\r\n\r\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import cv2
import glob
import numpy as np
import csv
import matplotlib.pyplot as plt
from pydarknet import Detector,Image
"""
Calculates the average precision based on the precision and recall values,
which are essentially the output of getPrecisionRecall
Returns the 101pt interpolation curve and a single average precision value
"""
def getAP(prec,rec):
#smooth
prec0 = prec.copy()
prec0.append(0.0)
smoothprec = np.zeros(101) #smoothed and ready for easy 101pt interpolation
for idx in range(101):
i = (100-idx)/100.
val = 0
for re_idx in range(len(rec)): #go through recs
re_i = len(rec)-re_idx-1 #from back to front
if rec[re_i] >= i: # if value there is larger than i
val = max(prec0[re_i:])
#break
smoothprec[100-idx] = val
#quick 101 pt interpolation
ap = np.mean(smoothprec)
return(smoothprec,ap)
"""
Calculates the intersection of two boxes a and b,
both arrays are in the format x1,y1,x2,y2, where x1,y1 and x2,y2 are
the upmost left and downmost right corner
Returns a single value for the Intersection amount in pixels
"""
def getIntersection(a,b): #each in format x1,y1,x2,y2
intersection = [0,0,0,0]
#left ->
if b[0] <= a[0] and a[0] <= b[2]:
intersection[0] = a[0]
elif a[0] <= b[0] and b[0] <= a[2]:
intersection[0] = b[0]
else:
return 0
#down ->
if b[1] <= a[1] and a[1] <= b[3]:
intersection[1] = a[1]
elif a[1] <= b[1] and b[1] <= a[3]:
intersection[1] = b[1]
else:
return 0
#right ->
if b[0] <= a[2] and a[2] <= b[2]:
intersection[2] = a[2]
elif a[0] <= b[2] and b[2] <= a[2]:
intersection[2] = b[2]
else:
return 0
#up ->
if b[0] <= a[3] and a[3] <= b[3]: #up
intersection[3] = a[3]
elif a[0] <= b[3] and b[3] <= a[3]:
intersection[3] = b[3]
else:
return 0
i1 = intersection[3]-intersection[1]
i2 = intersection[2]-intersection[0]
i = i1*i2
return i
"""
Calculates the IoU Intersection over Union for the two boxes a and b,
both arrays are in the format x1,y1,x2,y2, where x1,y1 and x2,y2 are
the upmost left and downmost right corner
Returns a single IoU value
"""
def getIoU(a,b): #format of a and b is x1,y1,x2,y2
a = np.array(a, np.float32)
b = np.array(b, np.float32)
intersection = getIntersection(a,b)
asize = (a[2]-a[0])*(a[3]-a[1])
bsize = (b[2]-b[0])*(b[3]-b[1])
if intersection > 0:#
union = asize + bsize - intersection
else:
union = asize + bsize
return(intersection/union)
"""
Calculates the precision and recall values/curve given plist that contains only "TP" and "FP" items
this list was created by predictions that are ordered based on score
and positives, the number of all positives based on the ground truth
Returns tuple of lists for precisions and recalls
"""
def getPrecisionRecall(plist,positives):
tp = 0
fp = 0
precs = []
recs = []
for e in plist:
if e == "TP":
tp += 1
elif e == "FP":
fp += 1
precision = tp/(tp+fp)
precs.append(precision)
recall = tp/(positives)
recs.append(recall)
return(precs,recs)
def readResults(filename):
file = []
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
file.append(row)
return file
"""
converts relative to absolute coordinates,
x = point of box (relative), y = point of box (relative)
w = width of box (relative), h = height of box (relative)
o_x = original width of image, o_y = original height of image
"""
def relativeToAbsolute(x,y,w,h,o_x,o_y):
n_x = float(x)*float(o_x)
n_y = float(y)*float(o_y)
n_w = float(w)*float(o_x)
n_h = float(h)*float(o_y)
return(n_x,n_y,n_w,n_h)
|
normal
|
{
"blob_id": "f8a31cdf5f55b5aed33a407d2c008ba9b969d655",
"index": 9493,
"step-1": "<mask token>\n\n\ndef getIntersection(a, b):\n intersection = [0, 0, 0, 0]\n if b[0] <= a[0] and a[0] <= b[2]:\n intersection[0] = a[0]\n elif a[0] <= b[0] and b[0] <= a[2]:\n intersection[0] = b[0]\n else:\n return 0\n if b[1] <= a[1] and a[1] <= b[3]:\n intersection[1] = a[1]\n elif a[1] <= b[1] and b[1] <= a[3]:\n intersection[1] = b[1]\n else:\n return 0\n if b[0] <= a[2] and a[2] <= b[2]:\n intersection[2] = a[2]\n elif a[0] <= b[2] and b[2] <= a[2]:\n intersection[2] = b[2]\n else:\n return 0\n if b[0] <= a[3] and a[3] <= b[3]:\n intersection[3] = a[3]\n elif a[0] <= b[3] and b[3] <= a[3]:\n intersection[3] = b[3]\n else:\n return 0\n i1 = intersection[3] - intersection[1]\n i2 = intersection[2] - intersection[0]\n i = i1 * i2\n return i\n\n\n<mask token>\n\n\ndef readResults(filename):\n file = []\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n file.append(row)\n return file\n\n\n<mask token>\n\n\ndef relativeToAbsolute(x, y, w, h, o_x, o_y):\n n_x = float(x) * float(o_x)\n n_y = float(y) * float(o_y)\n n_w = float(w) * float(o_x)\n n_h = float(h) * float(o_y)\n return n_x, n_y, n_w, n_h\n",
"step-2": "<mask token>\n\n\ndef getAP(prec, rec):\n prec0 = prec.copy()\n prec0.append(0.0)\n smoothprec = np.zeros(101)\n for idx in range(101):\n i = (100 - idx) / 100.0\n val = 0\n for re_idx in range(len(rec)):\n re_i = len(rec) - re_idx - 1\n if rec[re_i] >= i:\n val = max(prec0[re_i:])\n smoothprec[100 - idx] = val\n ap = np.mean(smoothprec)\n return smoothprec, ap\n\n\n<mask token>\n\n\ndef getIntersection(a, b):\n intersection = [0, 0, 0, 0]\n if b[0] <= a[0] and a[0] <= b[2]:\n intersection[0] = a[0]\n elif a[0] <= b[0] and b[0] <= a[2]:\n intersection[0] = b[0]\n else:\n return 0\n if b[1] <= a[1] and a[1] <= b[3]:\n intersection[1] = a[1]\n elif a[1] <= b[1] and b[1] <= a[3]:\n intersection[1] = b[1]\n else:\n return 0\n if b[0] <= a[2] and a[2] <= b[2]:\n intersection[2] = a[2]\n elif a[0] <= b[2] and b[2] <= a[2]:\n intersection[2] = b[2]\n else:\n return 0\n if b[0] <= a[3] and a[3] <= b[3]:\n intersection[3] = a[3]\n elif a[0] <= b[3] and b[3] <= a[3]:\n intersection[3] = b[3]\n else:\n return 0\n i1 = intersection[3] - intersection[1]\n i2 = intersection[2] - intersection[0]\n i = i1 * i2\n return i\n\n\n<mask token>\n\n\ndef getPrecisionRecall(plist, positives):\n tp = 0\n fp = 0\n precs = []\n recs = []\n for e in plist:\n if e == 'TP':\n tp += 1\n elif e == 'FP':\n fp += 1\n precision = tp / (tp + fp)\n precs.append(precision)\n recall = tp / positives\n recs.append(recall)\n return precs, recs\n\n\ndef readResults(filename):\n file = []\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n file.append(row)\n return file\n\n\n<mask token>\n\n\ndef relativeToAbsolute(x, y, w, h, o_x, o_y):\n n_x = float(x) * float(o_x)\n n_y = float(y) * float(o_y)\n n_w = float(w) * float(o_x)\n n_h = float(h) * float(o_y)\n return n_x, n_y, n_w, n_h\n",
"step-3": "<mask token>\n\n\ndef getAP(prec, rec):\n prec0 = prec.copy()\n prec0.append(0.0)\n smoothprec = np.zeros(101)\n for idx in range(101):\n i = (100 - idx) / 100.0\n val = 0\n for re_idx in range(len(rec)):\n re_i = len(rec) - re_idx - 1\n if rec[re_i] >= i:\n val = max(prec0[re_i:])\n smoothprec[100 - idx] = val\n ap = np.mean(smoothprec)\n return smoothprec, ap\n\n\n<mask token>\n\n\ndef getIntersection(a, b):\n intersection = [0, 0, 0, 0]\n if b[0] <= a[0] and a[0] <= b[2]:\n intersection[0] = a[0]\n elif a[0] <= b[0] and b[0] <= a[2]:\n intersection[0] = b[0]\n else:\n return 0\n if b[1] <= a[1] and a[1] <= b[3]:\n intersection[1] = a[1]\n elif a[1] <= b[1] and b[1] <= a[3]:\n intersection[1] = b[1]\n else:\n return 0\n if b[0] <= a[2] and a[2] <= b[2]:\n intersection[2] = a[2]\n elif a[0] <= b[2] and b[2] <= a[2]:\n intersection[2] = b[2]\n else:\n return 0\n if b[0] <= a[3] and a[3] <= b[3]:\n intersection[3] = a[3]\n elif a[0] <= b[3] and b[3] <= a[3]:\n intersection[3] = b[3]\n else:\n return 0\n i1 = intersection[3] - intersection[1]\n i2 = intersection[2] - intersection[0]\n i = i1 * i2\n return i\n\n\n<mask token>\n\n\ndef getIoU(a, b):\n a = np.array(a, np.float32)\n b = np.array(b, np.float32)\n intersection = getIntersection(a, b)\n asize = (a[2] - a[0]) * (a[3] - a[1])\n bsize = (b[2] - b[0]) * (b[3] - b[1])\n if intersection > 0:\n union = asize + bsize - intersection\n else:\n union = asize + bsize\n return intersection / union\n\n\n<mask token>\n\n\ndef getPrecisionRecall(plist, positives):\n tp = 0\n fp = 0\n precs = []\n recs = []\n for e in plist:\n if e == 'TP':\n tp += 1\n elif e == 'FP':\n fp += 1\n precision = tp / (tp + fp)\n precs.append(precision)\n recall = tp / positives\n recs.append(recall)\n return precs, recs\n\n\ndef readResults(filename):\n file = []\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n file.append(row)\n return file\n\n\n<mask token>\n\n\ndef relativeToAbsolute(x, y, w, h, o_x, o_y):\n n_x = float(x) * float(o_x)\n n_y = float(y) * float(o_y)\n n_w = float(w) * float(o_x)\n n_h = float(h) * float(o_y)\n return n_x, n_y, n_w, n_h\n",
"step-4": "import cv2\nimport glob\nimport numpy as np\nimport csv\nimport matplotlib.pyplot as plt\nfrom pydarknet import Detector, Image\n<mask token>\n\n\ndef getAP(prec, rec):\n prec0 = prec.copy()\n prec0.append(0.0)\n smoothprec = np.zeros(101)\n for idx in range(101):\n i = (100 - idx) / 100.0\n val = 0\n for re_idx in range(len(rec)):\n re_i = len(rec) - re_idx - 1\n if rec[re_i] >= i:\n val = max(prec0[re_i:])\n smoothprec[100 - idx] = val\n ap = np.mean(smoothprec)\n return smoothprec, ap\n\n\n<mask token>\n\n\ndef getIntersection(a, b):\n intersection = [0, 0, 0, 0]\n if b[0] <= a[0] and a[0] <= b[2]:\n intersection[0] = a[0]\n elif a[0] <= b[0] and b[0] <= a[2]:\n intersection[0] = b[0]\n else:\n return 0\n if b[1] <= a[1] and a[1] <= b[3]:\n intersection[1] = a[1]\n elif a[1] <= b[1] and b[1] <= a[3]:\n intersection[1] = b[1]\n else:\n return 0\n if b[0] <= a[2] and a[2] <= b[2]:\n intersection[2] = a[2]\n elif a[0] <= b[2] and b[2] <= a[2]:\n intersection[2] = b[2]\n else:\n return 0\n if b[0] <= a[3] and a[3] <= b[3]:\n intersection[3] = a[3]\n elif a[0] <= b[3] and b[3] <= a[3]:\n intersection[3] = b[3]\n else:\n return 0\n i1 = intersection[3] - intersection[1]\n i2 = intersection[2] - intersection[0]\n i = i1 * i2\n return i\n\n\n<mask token>\n\n\ndef getIoU(a, b):\n a = np.array(a, np.float32)\n b = np.array(b, np.float32)\n intersection = getIntersection(a, b)\n asize = (a[2] - a[0]) * (a[3] - a[1])\n bsize = (b[2] - b[0]) * (b[3] - b[1])\n if intersection > 0:\n union = asize + bsize - intersection\n else:\n union = asize + bsize\n return intersection / union\n\n\n<mask token>\n\n\ndef getPrecisionRecall(plist, positives):\n tp = 0\n fp = 0\n precs = []\n recs = []\n for e in plist:\n if e == 'TP':\n tp += 1\n elif e == 'FP':\n fp += 1\n precision = tp / (tp + fp)\n precs.append(precision)\n recall = tp / positives\n recs.append(recall)\n return precs, recs\n\n\ndef readResults(filename):\n file = []\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n file.append(row)\n return file\n\n\n<mask token>\n\n\ndef relativeToAbsolute(x, y, w, h, o_x, o_y):\n n_x = float(x) * float(o_x)\n n_y = float(y) * float(o_y)\n n_w = float(w) * float(o_x)\n n_h = float(h) * float(o_y)\n return n_x, n_y, n_w, n_h\n",
"step-5": "import cv2\r\nimport glob\r\nimport numpy as np\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nfrom pydarknet import Detector,Image\r\n\r\n\"\"\"\r\nCalculates the average precision based on the precision and recall values,\r\nwhich are essentially the output of getPrecisionRecall\r\nReturns the 101pt interpolation curve and a single average precision value\r\n\"\"\"\r\ndef getAP(prec,rec):\r\n #smooth\r\n prec0 = prec.copy()\r\n prec0.append(0.0)\r\n smoothprec = np.zeros(101) #smoothed and ready for easy 101pt interpolation\r\n for idx in range(101):\r\n i = (100-idx)/100.\r\n val = 0\r\n for re_idx in range(len(rec)): #go through recs\r\n re_i = len(rec)-re_idx-1 #from back to front\r\n if rec[re_i] >= i: # if value there is larger than i\r\n val = max(prec0[re_i:])\r\n #break\r\n smoothprec[100-idx] = val\r\n #quick 101 pt interpolation\r\n ap = np.mean(smoothprec)\r\n return(smoothprec,ap)\r\n\r\n\"\"\"\r\nCalculates the intersection of two boxes a and b,\r\nboth arrays are in the format x1,y1,x2,y2, where x1,y1 and x2,y2 are \r\nthe upmost left and downmost right corner\r\nReturns a single value for the Intersection amount in pixels\r\n\"\"\"\r\ndef getIntersection(a,b): #each in format x1,y1,x2,y2\r\n intersection = [0,0,0,0]\r\n #left -> \r\n if b[0] <= a[0] and a[0] <= b[2]:\r\n intersection[0] = a[0]\r\n elif a[0] <= b[0] and b[0] <= a[2]:\r\n intersection[0] = b[0]\r\n else: \r\n return 0\r\n #down ->\r\n if b[1] <= a[1] and a[1] <= b[3]:\r\n intersection[1] = a[1]\r\n elif a[1] <= b[1] and b[1] <= a[3]:\r\n intersection[1] = b[1]\r\n else:\r\n return 0\r\n #right ->\r\n if b[0] <= a[2] and a[2] <= b[2]: \r\n intersection[2] = a[2]\r\n elif a[0] <= b[2] and b[2] <= a[2]:\r\n intersection[2] = b[2]\r\n else:\r\n return 0\r\n #up ->\r\n if b[0] <= a[3] and a[3] <= b[3]: #up\r\n intersection[3] = a[3]\r\n elif a[0] <= b[3] and b[3] <= a[3]:\r\n intersection[3] = b[3] \r\n else:\r\n return 0\r\n i1 = intersection[3]-intersection[1]\r\n i2 = intersection[2]-intersection[0]\r\n i = i1*i2 \r\n return i\r\n\r\n\"\"\"\r\nCalculates the IoU Intersection over Union for the two boxes a and b,\r\nboth arrays are in the format x1,y1,x2,y2, where x1,y1 and x2,y2 are \r\nthe upmost left and downmost right corner\r\nReturns a single IoU value\r\n\"\"\"\r\ndef getIoU(a,b): #format of a and b is x1,y1,x2,y2\r\n a = np.array(a, np.float32)\r\n b = np.array(b, np.float32)\r\n intersection = getIntersection(a,b)\r\n asize = (a[2]-a[0])*(a[3]-a[1])\r\n bsize = (b[2]-b[0])*(b[3]-b[1])\r\n if intersection > 0:#\r\n union = asize + bsize - intersection\r\n else:\r\n union = asize + bsize\r\n return(intersection/union)\r\n\r\n\"\"\"\r\nCalculates the precision and recall values/curve given plist that contains only \"TP\" and \"FP\" items\r\nthis list was created by predictions that are ordered based on score\r\nand positives, the number of all positives based on the ground truth\r\nReturns tuple of lists for precisions and recalls\r\n\"\"\"\r\ndef getPrecisionRecall(plist,positives):\r\n tp = 0\r\n fp = 0\r\n precs = []\r\n recs = []\r\n for e in plist:\r\n if e == \"TP\":\r\n tp += 1\r\n elif e == \"FP\":\r\n fp += 1\r\n precision = tp/(tp+fp)\r\n precs.append(precision)\r\n recall = tp/(positives)\r\n recs.append(recall)\r\n return(precs,recs)\r\n\r\ndef readResults(filename):\r\n\tfile = []\r\n\twith open(filename) as csvfile:\r\n\t reader = csv.reader(csvfile, delimiter=',')\r\n\t for row in reader:\r\n\t \tfile.append(row)\r\n\treturn file\r\n\r\n\"\"\"\r\nconverts relative to absolute coordinates,\r\nx = point of box (relative), y = point of box (relative)\r\nw = width of box (relative), h = height of box (relative)\r\no_x = original width of image, o_y = original height of image\r\n\"\"\"\r\ndef relativeToAbsolute(x,y,w,h,o_x,o_y):\r\n n_x = float(x)*float(o_x)\r\n n_y = float(y)*float(o_y)\r\n n_w = float(w)*float(o_x)\r\n n_h = float(h)*float(o_y)\r\n return(n_x,n_y,n_w,n_h)\r\n\r\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import datetime
now = datetime.datetime.now()
print(now.year, now.month, now.day, now.hour, now.minute, now.second)
|
normal
|
{
"blob_id": "3af91de0b25f575ec9d981d7711c710a7e9695e4",
"index": 6819,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(now.year, now.month, now.day, now.hour, now.minute, now.second)\n",
"step-3": "<mask token>\nnow = datetime.datetime.now()\nprint(now.year, now.month, now.day, now.hour, now.minute, now.second)\n",
"step-4": "import datetime\nnow = datetime.datetime.now()\nprint(now.year, now.month, now.day, now.hour, now.minute, now.second)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Creating function
def name_of_function():
'''
Docstring explains function.
'''
return "Hello" #use return instead of print since return can be stored as a variable.
#Simple example
def dog_check(mystring):
if 'dog' in mystring.lower():
return True
else:
return False
#This is a beginner move. x in y.lower() is already a boolean.
dog_check('Dog ran away')
#Expert move:
def dog_check(mystring):
return 'dog' in mystring.lower()
# *args
def myfunc(*args): #instead of myfunc(a,b,c,...) no limit of arguments and it will be treated as tuples.
return sum(args) * 0.05
myfunc(14,10,100)
# **kwargs # kwargs returns as a dictionary
def myfunc(**kwargs):
if 'fruit' in kwargs:
print('My fruit of choice is {}'.format(kwargs['fruit']))
else:
print('I did not find any fruit here')
myfunc(fruit='apple')
#Combination
def myfunc(*args, **kwargs):
print('I would like {} {}'.format(args[0], kwargs['food']))
myfunc(10,20,30,fruit='orange',food='eggs',animal='dog')
##BONUS Project
#Define a function called myfunc that takes in a string, and returns a matching string where every even letter is uppercase, n/
#and every odd letter is lowercase.
def myfunc(word):
result = ""
for index, letter in enumerate(word):
if index % 2 == 0:
result += letter.lower()
else:
result += letter.upper()
return result
myfunc('VictoriaSok')
|
normal
|
{
"blob_id": "1deb070dd91c01190b70fa678add31ecb82f34fa",
"index": 3404,
"step-1": "def name_of_function():\n \"\"\"\n Docstring explains function.\n \"\"\"\n return 'Hello'\n\n\ndef dog_check(mystring):\n if 'dog' in mystring.lower():\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef dog_check(mystring):\n return 'dog' in mystring.lower()\n\n\n<mask token>\n\n\ndef myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n\n\n<mask token>\n\n\ndef myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))\n\n\n<mask token>\n",
"step-2": "def name_of_function():\n \"\"\"\n Docstring explains function.\n \"\"\"\n return 'Hello'\n\n\ndef dog_check(mystring):\n if 'dog' in mystring.lower():\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef dog_check(mystring):\n return 'dog' in mystring.lower()\n\n\n<mask token>\n\n\ndef myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n\n\n<mask token>\n\n\ndef myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))\n\n\n<mask token>\n\n\ndef myfunc(word):\n result = ''\n for index, letter in enumerate(word):\n if index % 2 == 0:\n result += letter.lower()\n else:\n result += letter.upper()\n return result\n\n\n<mask token>\n",
"step-3": "def name_of_function():\n \"\"\"\n Docstring explains function.\n \"\"\"\n return 'Hello'\n\n\ndef dog_check(mystring):\n if 'dog' in mystring.lower():\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef dog_check(mystring):\n return 'dog' in mystring.lower()\n\n\ndef myfunc(*args):\n return sum(args) * 0.05\n\n\n<mask token>\n\n\ndef myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n\n\n<mask token>\n\n\ndef myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))\n\n\n<mask token>\n\n\ndef myfunc(word):\n result = ''\n for index, letter in enumerate(word):\n if index % 2 == 0:\n result += letter.lower()\n else:\n result += letter.upper()\n return result\n\n\n<mask token>\n",
"step-4": "def name_of_function():\n \"\"\"\n Docstring explains function.\n \"\"\"\n return 'Hello'\n\n\ndef dog_check(mystring):\n if 'dog' in mystring.lower():\n return True\n else:\n return False\n\n\ndog_check('Dog ran away')\n\n\ndef dog_check(mystring):\n return 'dog' in mystring.lower()\n\n\ndef myfunc(*args):\n return sum(args) * 0.05\n\n\nmyfunc(14, 10, 100)\n\n\ndef myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n\n\nmyfunc(fruit='apple')\n\n\ndef myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))\n\n\nmyfunc(10, 20, 30, fruit='orange', food='eggs', animal='dog')\n\n\ndef myfunc(word):\n result = ''\n for index, letter in enumerate(word):\n if index % 2 == 0:\n result += letter.lower()\n else:\n result += letter.upper()\n return result\n\n\nmyfunc('VictoriaSok')\n",
"step-5": "#Creating function\n\ndef name_of_function():\n '''\n Docstring explains function.\n '''\n return \"Hello\" #use return instead of print since return can be stored as a variable.\n \n \n#Simple example\n\ndef dog_check(mystring):\n if 'dog' in mystring.lower():\n return True\n else:\n return False\n#This is a beginner move. x in y.lower() is already a boolean.\n\ndog_check('Dog ran away')\n\n#Expert move:\ndef dog_check(mystring):\n return 'dog' in mystring.lower()\n\n\n# *args\ndef myfunc(*args): #instead of myfunc(a,b,c,...) no limit of arguments and it will be treated as tuples.\n return sum(args) * 0.05\n \nmyfunc(14,10,100)\n\n\n# **kwargs # kwargs returns as a dictionary\ndef myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n \nmyfunc(fruit='apple')\n\n\n#Combination\ndef myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))\n \nmyfunc(10,20,30,fruit='orange',food='eggs',animal='dog')\n\n\n\n##BONUS Project\n#Define a function called myfunc that takes in a string, and returns a matching string where every even letter is uppercase, n/\n#and every odd letter is lowercase.\n\ndef myfunc(word):\n\n result = \"\"\n for index, letter in enumerate(word):\n if index % 2 == 0:\n result += letter.lower()\n else:\n result += letter.upper()\n return result\n \nmyfunc('VictoriaSok')\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import argparse
import sys
import subprocess
import getpass
# Process arguments
parser = argparse.ArgumentParser(description='Setup a new apache virtual host on an Ubuntu system. Only tested on versions 18.04 and 20.04')
parser.add_argument('domain_name', metavar='D', type=str, nargs='+', help='domain name to give to virtual host. multiple domains can be specified at once')
args = parser.parse_args()
# Confirm action with user
print("The following virtual host(s) will be created under their respective names.")
fa_flag = False
for arg in vars(args):
print(getattr(args, arg))
# List of port numbers
port_list = []
# Ask for ports for the each domain
print("Note: port defaults to 80")
for vh in sys.argv:
if vh == 'create_apache_vhost.py':
continue
port = input("Which port should be used for " + vh + "?: ")
if port:
port_list.append(port)
else:
port_list.append("80")
while True:
ans = input("Proceed? [Y/n] ")
if ans == 'n' or ans == 'N':
print("Exiting")
quit()
elif ans == 'Y' or ans == 'y':
print("Proceeding")
break
else:
print("Invald input")
# Install apache2 if not yet installed
install_sts = subprocess.call(['test', '-e', '/etc/apache2'])
if install_sts != 0:
print("Installing Apache")
subprocess.call(['sudo', 'apt', 'install', 'apache2'])
subprocess.call(['ufw', 'allow', "'Apache'"])
# Get username
username = getpass.getuser()
# Iterate though each virtual host to be created
index = 0
for vh in sys.argv:
if vh == 'create_apache_vhost.py':
continue
print("Creating virtual host: " + vh)
src_path = '/var/www/html/' + vh
subprocess.call(['sudo', 'mkdir', src_path])
subprocess.call(['sudo', 'chown', '-R', username + ':' + username, src_path])
subprocess.call(['sudo', 'chmod', '755', src_path])
subprocess.call(['sudo', 'touch', src_path + 'index.html'])
with open(src_path + '/index.html', 'a') as out:
out.write("""<html>
<head>
<title>Welcome to """ + vh + """</title>
</head>
<body>
<h1>""" + vh + """ virtual host is working!</h1>
</body>
</html>""")
conf_path = '/etc/apache2/sites-available/' + vh + '.conf'
subprocess.call(['sudo', 'touch', conf_path])
with open(conf_path, 'w') as out:
out.write("""<VirtualHost *:""" + port_list[index] + """>
ServerAdmin webmaster@localhost
ServerName """ + vh + """
ServerAlias www.""" + vh + """.com
DocumentRoot /var/www/html/""" + vh + """
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>""")
subprocess.call(['sudo', 'a2ensite', vh])
print("\n [" + vh + "] virtual host was successfully created!")
print(" - Source is located at " + src_path)
print(" - Config file is located at " + conf_path + "\n")
index += 1
subprocess.call(['systemctl', 'restart', 'apache2'])
|
normal
|
{
"blob_id": "a8e67ddbb741af6a9ff7540fef8c21468321ede0",
"index": 7996,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('domain_name', metavar='D', type=str, nargs='+', help=\n 'domain name to give to virtual host. multiple domains can be specified at once'\n )\n<mask token>\nprint(\n 'The following virtual host(s) will be created under their respective names.'\n )\n<mask token>\nfor arg in vars(args):\n print(getattr(args, arg))\n<mask token>\nprint('Note: port defaults to 80')\nfor vh in sys.argv:\n if vh == 'create_apache_vhost.py':\n continue\n port = input('Which port should be used for ' + vh + '?: ')\n if port:\n port_list.append(port)\n else:\n port_list.append('80')\nwhile True:\n ans = input('Proceed? [Y/n] ')\n if ans == 'n' or ans == 'N':\n print('Exiting')\n quit()\n elif ans == 'Y' or ans == 'y':\n print('Proceeding')\n break\n else:\n print('Invald input')\n<mask token>\nif install_sts != 0:\n print('Installing Apache')\n subprocess.call(['sudo', 'apt', 'install', 'apache2'])\n subprocess.call(['ufw', 'allow', \"'Apache'\"])\n<mask token>\nfor vh in sys.argv:\n if vh == 'create_apache_vhost.py':\n continue\n print('Creating virtual host: ' + vh)\n src_path = '/var/www/html/' + vh\n subprocess.call(['sudo', 'mkdir', src_path])\n subprocess.call(['sudo', 'chown', '-R', username + ':' + username,\n src_path])\n subprocess.call(['sudo', 'chmod', '755', src_path])\n subprocess.call(['sudo', 'touch', src_path + 'index.html'])\n with open(src_path + '/index.html', 'a') as out:\n out.write('<html>\\n <head>\\n <title>Welcome to ' + vh +\n \"\"\"</title>\n </head>\n <body>\n <h1>\"\"\" + vh +\n ' virtual host is working!</h1>\\n </body>\\n</html>')\n conf_path = '/etc/apache2/sites-available/' + vh + '.conf'\n subprocess.call(['sudo', 'touch', conf_path])\n with open(conf_path, 'w') as out:\n out.write('<VirtualHost *:' + port_list[index] +\n \"\"\">\n ServerAdmin webmaster@localhost\n ServerName \"\"\" +\n vh + \"\"\"\n ServerAlias www.\"\"\" + vh +\n \"\"\".com\n DocumentRoot /var/www/html/\"\"\" + vh +\n \"\"\"\n ErrorLog ${APACHE_LOG_DIR}/error.log\n CustomLog ${APACHE_LOG_DIR}/access.log combined\n</VirtualHost>\"\"\"\n )\n subprocess.call(['sudo', 'a2ensite', vh])\n print('\\n [' + vh + '] virtual host was successfully created!')\n print(' - Source is located at ' + src_path)\n print(' - Config file is located at ' + conf_path + '\\n')\n index += 1\nsubprocess.call(['systemctl', 'restart', 'apache2'])\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser(description=\n 'Setup a new apache virtual host on an Ubuntu system. Only tested on versions 18.04 and 20.04'\n )\nparser.add_argument('domain_name', metavar='D', type=str, nargs='+', help=\n 'domain name to give to virtual host. multiple domains can be specified at once'\n )\nargs = parser.parse_args()\nprint(\n 'The following virtual host(s) will be created under their respective names.'\n )\nfa_flag = False\nfor arg in vars(args):\n print(getattr(args, arg))\nport_list = []\nprint('Note: port defaults to 80')\nfor vh in sys.argv:\n if vh == 'create_apache_vhost.py':\n continue\n port = input('Which port should be used for ' + vh + '?: ')\n if port:\n port_list.append(port)\n else:\n port_list.append('80')\nwhile True:\n ans = input('Proceed? [Y/n] ')\n if ans == 'n' or ans == 'N':\n print('Exiting')\n quit()\n elif ans == 'Y' or ans == 'y':\n print('Proceeding')\n break\n else:\n print('Invald input')\ninstall_sts = subprocess.call(['test', '-e', '/etc/apache2'])\nif install_sts != 0:\n print('Installing Apache')\n subprocess.call(['sudo', 'apt', 'install', 'apache2'])\n subprocess.call(['ufw', 'allow', \"'Apache'\"])\nusername = getpass.getuser()\nindex = 0\nfor vh in sys.argv:\n if vh == 'create_apache_vhost.py':\n continue\n print('Creating virtual host: ' + vh)\n src_path = '/var/www/html/' + vh\n subprocess.call(['sudo', 'mkdir', src_path])\n subprocess.call(['sudo', 'chown', '-R', username + ':' + username,\n src_path])\n subprocess.call(['sudo', 'chmod', '755', src_path])\n subprocess.call(['sudo', 'touch', src_path + 'index.html'])\n with open(src_path + '/index.html', 'a') as out:\n out.write('<html>\\n <head>\\n <title>Welcome to ' + vh +\n \"\"\"</title>\n </head>\n <body>\n <h1>\"\"\" + vh +\n ' virtual host is working!</h1>\\n </body>\\n</html>')\n conf_path = '/etc/apache2/sites-available/' + vh + '.conf'\n subprocess.call(['sudo', 'touch', conf_path])\n with open(conf_path, 'w') as out:\n out.write('<VirtualHost *:' + port_list[index] +\n \"\"\">\n ServerAdmin webmaster@localhost\n ServerName \"\"\" +\n vh + \"\"\"\n ServerAlias www.\"\"\" + vh +\n \"\"\".com\n DocumentRoot /var/www/html/\"\"\" + vh +\n \"\"\"\n ErrorLog ${APACHE_LOG_DIR}/error.log\n CustomLog ${APACHE_LOG_DIR}/access.log combined\n</VirtualHost>\"\"\"\n )\n subprocess.call(['sudo', 'a2ensite', vh])\n print('\\n [' + vh + '] virtual host was successfully created!')\n print(' - Source is located at ' + src_path)\n print(' - Config file is located at ' + conf_path + '\\n')\n index += 1\nsubprocess.call(['systemctl', 'restart', 'apache2'])\n",
"step-4": "import argparse\nimport sys\nimport subprocess\nimport getpass\nparser = argparse.ArgumentParser(description=\n 'Setup a new apache virtual host on an Ubuntu system. Only tested on versions 18.04 and 20.04'\n )\nparser.add_argument('domain_name', metavar='D', type=str, nargs='+', help=\n 'domain name to give to virtual host. multiple domains can be specified at once'\n )\nargs = parser.parse_args()\nprint(\n 'The following virtual host(s) will be created under their respective names.'\n )\nfa_flag = False\nfor arg in vars(args):\n print(getattr(args, arg))\nport_list = []\nprint('Note: port defaults to 80')\nfor vh in sys.argv:\n if vh == 'create_apache_vhost.py':\n continue\n port = input('Which port should be used for ' + vh + '?: ')\n if port:\n port_list.append(port)\n else:\n port_list.append('80')\nwhile True:\n ans = input('Proceed? [Y/n] ')\n if ans == 'n' or ans == 'N':\n print('Exiting')\n quit()\n elif ans == 'Y' or ans == 'y':\n print('Proceeding')\n break\n else:\n print('Invald input')\ninstall_sts = subprocess.call(['test', '-e', '/etc/apache2'])\nif install_sts != 0:\n print('Installing Apache')\n subprocess.call(['sudo', 'apt', 'install', 'apache2'])\n subprocess.call(['ufw', 'allow', \"'Apache'\"])\nusername = getpass.getuser()\nindex = 0\nfor vh in sys.argv:\n if vh == 'create_apache_vhost.py':\n continue\n print('Creating virtual host: ' + vh)\n src_path = '/var/www/html/' + vh\n subprocess.call(['sudo', 'mkdir', src_path])\n subprocess.call(['sudo', 'chown', '-R', username + ':' + username,\n src_path])\n subprocess.call(['sudo', 'chmod', '755', src_path])\n subprocess.call(['sudo', 'touch', src_path + 'index.html'])\n with open(src_path + '/index.html', 'a') as out:\n out.write('<html>\\n <head>\\n <title>Welcome to ' + vh +\n \"\"\"</title>\n </head>\n <body>\n <h1>\"\"\" + vh +\n ' virtual host is working!</h1>\\n </body>\\n</html>')\n conf_path = '/etc/apache2/sites-available/' + vh + '.conf'\n subprocess.call(['sudo', 'touch', conf_path])\n with open(conf_path, 'w') as out:\n out.write('<VirtualHost *:' + port_list[index] +\n \"\"\">\n ServerAdmin webmaster@localhost\n ServerName \"\"\" +\n vh + \"\"\"\n ServerAlias www.\"\"\" + vh +\n \"\"\".com\n DocumentRoot /var/www/html/\"\"\" + vh +\n \"\"\"\n ErrorLog ${APACHE_LOG_DIR}/error.log\n CustomLog ${APACHE_LOG_DIR}/access.log combined\n</VirtualHost>\"\"\"\n )\n subprocess.call(['sudo', 'a2ensite', vh])\n print('\\n [' + vh + '] virtual host was successfully created!')\n print(' - Source is located at ' + src_path)\n print(' - Config file is located at ' + conf_path + '\\n')\n index += 1\nsubprocess.call(['systemctl', 'restart', 'apache2'])\n",
"step-5": "import argparse\nimport sys\nimport subprocess\nimport getpass\n\n# Process arguments\nparser = argparse.ArgumentParser(description='Setup a new apache virtual host on an Ubuntu system. Only tested on versions 18.04 and 20.04')\nparser.add_argument('domain_name', metavar='D', type=str, nargs='+', help='domain name to give to virtual host. multiple domains can be specified at once')\nargs = parser.parse_args()\n\n# Confirm action with user\nprint(\"The following virtual host(s) will be created under their respective names.\")\nfa_flag = False\nfor arg in vars(args):\n print(getattr(args, arg))\n\n# List of port numbers\nport_list = []\n\n# Ask for ports for the each domain\nprint(\"Note: port defaults to 80\")\nfor vh in sys.argv:\n if vh == 'create_apache_vhost.py':\n continue\n\n port = input(\"Which port should be used for \" + vh + \"?: \")\n if port:\n port_list.append(port)\n else:\n port_list.append(\"80\")\n\nwhile True:\n ans = input(\"Proceed? [Y/n] \")\n if ans == 'n' or ans == 'N':\n print(\"Exiting\")\n quit()\n elif ans == 'Y' or ans == 'y':\n print(\"Proceeding\")\n break\n else:\n print(\"Invald input\")\n\n# Install apache2 if not yet installed\ninstall_sts = subprocess.call(['test', '-e', '/etc/apache2'])\nif install_sts != 0:\n print(\"Installing Apache\")\n subprocess.call(['sudo', 'apt', 'install', 'apache2'])\n subprocess.call(['ufw', 'allow', \"'Apache'\"])\n\n# Get username\nusername = getpass.getuser()\n\n# Iterate though each virtual host to be created\nindex = 0\nfor vh in sys.argv:\n if vh == 'create_apache_vhost.py':\n continue\n \n print(\"Creating virtual host: \" + vh)\n\n src_path = '/var/www/html/' + vh\n subprocess.call(['sudo', 'mkdir', src_path])\n subprocess.call(['sudo', 'chown', '-R', username + ':' + username, src_path])\n subprocess.call(['sudo', 'chmod', '755', src_path])\n subprocess.call(['sudo', 'touch', src_path + 'index.html'])\n\n with open(src_path + '/index.html', 'a') as out:\n out.write(\"\"\"<html>\n <head>\n <title>Welcome to \"\"\" + vh + \"\"\"</title>\n </head>\n <body>\n <h1>\"\"\" + vh + \"\"\" virtual host is working!</h1>\n </body>\n</html>\"\"\")\n \n conf_path = '/etc/apache2/sites-available/' + vh + '.conf'\n subprocess.call(['sudo', 'touch', conf_path])\n\n with open(conf_path, 'w') as out:\n out.write(\"\"\"<VirtualHost *:\"\"\" + port_list[index] + \"\"\">\n ServerAdmin webmaster@localhost\n ServerName \"\"\" + vh + \"\"\"\n ServerAlias www.\"\"\" + vh + \"\"\".com\n DocumentRoot /var/www/html/\"\"\" + vh + \"\"\"\n ErrorLog ${APACHE_LOG_DIR}/error.log\n CustomLog ${APACHE_LOG_DIR}/access.log combined\n</VirtualHost>\"\"\")\n\n subprocess.call(['sudo', 'a2ensite', vh])\n\n print(\"\\n [\" + vh + \"] virtual host was successfully created!\")\n print(\" - Source is located at \" + src_path)\n print(\" - Config file is located at \" + conf_path + \"\\n\")\n\n index += 1\n\nsubprocess.call(['systemctl', 'restart', 'apache2'])\n\n \n\n\n \n\n\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy
from scipy.optimize import OptimizeResult
from logging import getLogger
logger = getLogger(__name__)
def minimize_neldermead(func, x0, args=(), callback=None,
maxiter=None, maxfev=None, disp=False,
return_all=False, initial_simplex=None,
xatol=1e-4, fatol=1e-4, **unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*200``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
initial_simplex : array_like of shape (N + 1, N)
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the j-th vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
xatol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
fatol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
"""
maxfun = maxfev
retall = return_all
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
nonzdelt = 0.05
zdelt = 0.00025
if initial_simplex is None:
N = len(x0)
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
sim[0] = x0
for k in range(N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt) * y[k]
else:
y[k] = zdelt
sim[k + 1] = y
maxiter = 10
maxfun = 10
one2np1 = list(range(1, N + 1))
fsim = numpy.zeros((N + 1,), float)
for k in range(N + 1):
fsim[k] = func(sim[k])
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
raise Exception()
print('aaaaffaaaaaa')
iterations = 1
while iterations < maxiter:
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):
break
logger.debug('itr: %s' % iterations)
print('aaaaaaaaaa')
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
result = OptimizeResult(fun=fval, nit=iterations, nfev=0,
status=warnflag, success=(warnflag == 0),
message=None, x=x, final_simplex=(sim, fsim))
return result
|
normal
|
{
"blob_id": "35921b081e8e8c4da2b16afc20b27b636e9a6676",
"index": 4761,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef minimize_neldermead(func, x0, args=(), callback=None, maxiter=None,\n maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol=\n 0.0001, fatol=0.0001, **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n Nelder-Mead algorithm.\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter, maxfev : int\n Maximum allowed number of iterations and function evaluations.\n Will default to ``N*200``, where ``N`` is the number of\n variables, if neither `maxiter` or `maxfev` is set. If both\n `maxiter` and `maxfev` are set, minimization will stop at the\n first reached.\n initial_simplex : array_like of shape (N + 1, N)\n Initial simplex. If given, overrides `x0`.\n ``initial_simplex[j,:]`` should contain the coordinates of\n the j-th vertex of the ``N+1`` vertices in the simplex, where\n ``N`` is the dimension.\n xatol : float, optional\n Absolute error in xopt between iterations that is acceptable for\n convergence.\n fatol : number, optional\n Absolute error in func(xopt) between iterations that is acceptable for\n convergence.\n \"\"\"\n maxfun = maxfev\n retall = return_all\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n if initial_simplex is None:\n N = len(x0)\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n maxiter = 10\n maxfun = 10\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n iterations = 1\n while iterations < maxiter:\n if numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))\n ) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol:\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n elif fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else:\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0, status=\n warnflag, success=warnflag == 0, message=None, x=x, final_simplex=(\n sim, fsim))\n return result\n",
"step-3": "<mask token>\nlogger = getLogger(__name__)\n\n\ndef minimize_neldermead(func, x0, args=(), callback=None, maxiter=None,\n maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol=\n 0.0001, fatol=0.0001, **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n Nelder-Mead algorithm.\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter, maxfev : int\n Maximum allowed number of iterations and function evaluations.\n Will default to ``N*200``, where ``N`` is the number of\n variables, if neither `maxiter` or `maxfev` is set. If both\n `maxiter` and `maxfev` are set, minimization will stop at the\n first reached.\n initial_simplex : array_like of shape (N + 1, N)\n Initial simplex. If given, overrides `x0`.\n ``initial_simplex[j,:]`` should contain the coordinates of\n the j-th vertex of the ``N+1`` vertices in the simplex, where\n ``N`` is the dimension.\n xatol : float, optional\n Absolute error in xopt between iterations that is acceptable for\n convergence.\n fatol : number, optional\n Absolute error in func(xopt) between iterations that is acceptable for\n convergence.\n \"\"\"\n maxfun = maxfev\n retall = return_all\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n if initial_simplex is None:\n N = len(x0)\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n maxiter = 10\n maxfun = 10\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n iterations = 1\n while iterations < maxiter:\n if numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))\n ) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol:\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n elif fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else:\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0, status=\n warnflag, success=warnflag == 0, message=None, x=x, final_simplex=(\n sim, fsim))\n return result\n",
"step-4": "import numpy\nfrom scipy.optimize import OptimizeResult\nfrom logging import getLogger\nlogger = getLogger(__name__)\n\n\ndef minimize_neldermead(func, x0, args=(), callback=None, maxiter=None,\n maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol=\n 0.0001, fatol=0.0001, **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n Nelder-Mead algorithm.\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter, maxfev : int\n Maximum allowed number of iterations and function evaluations.\n Will default to ``N*200``, where ``N`` is the number of\n variables, if neither `maxiter` or `maxfev` is set. If both\n `maxiter` and `maxfev` are set, minimization will stop at the\n first reached.\n initial_simplex : array_like of shape (N + 1, N)\n Initial simplex. If given, overrides `x0`.\n ``initial_simplex[j,:]`` should contain the coordinates of\n the j-th vertex of the ``N+1`` vertices in the simplex, where\n ``N`` is the dimension.\n xatol : float, optional\n Absolute error in xopt between iterations that is acceptable for\n convergence.\n fatol : number, optional\n Absolute error in func(xopt) between iterations that is acceptable for\n convergence.\n \"\"\"\n maxfun = maxfev\n retall = return_all\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n if initial_simplex is None:\n N = len(x0)\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n maxiter = 10\n maxfun = 10\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n iterations = 1\n while iterations < maxiter:\n if numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))\n ) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol:\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n elif fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else:\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0, status=\n warnflag, success=warnflag == 0, message=None, x=x, final_simplex=(\n sim, fsim))\n return result\n",
"step-5": "import numpy\nfrom scipy.optimize import OptimizeResult\n\nfrom logging import getLogger\n\nlogger = getLogger(__name__)\n\n\ndef minimize_neldermead(func, x0, args=(), callback=None,\n maxiter=None, maxfev=None, disp=False,\n return_all=False, initial_simplex=None,\n xatol=1e-4, fatol=1e-4, **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n Nelder-Mead algorithm.\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter, maxfev : int\n Maximum allowed number of iterations and function evaluations.\n Will default to ``N*200``, where ``N`` is the number of\n variables, if neither `maxiter` or `maxfev` is set. If both\n `maxiter` and `maxfev` are set, minimization will stop at the\n first reached.\n initial_simplex : array_like of shape (N + 1, N)\n Initial simplex. If given, overrides `x0`.\n ``initial_simplex[j,:]`` should contain the coordinates of\n the j-th vertex of the ``N+1`` vertices in the simplex, where\n ``N`` is the dimension.\n xatol : float, optional\n Absolute error in xopt between iterations that is acceptable for\n convergence.\n fatol : number, optional\n Absolute error in func(xopt) between iterations that is acceptable for\n convergence.\n \"\"\"\n maxfun = maxfev\n retall = return_all\n\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n\n if initial_simplex is None:\n N = len(x0)\n\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n\n maxiter = 10\n maxfun = 10\n\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n # sort so sim[0,:] has the lowest function value\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n\n iterations = 1\n\n while iterations < maxiter:\n if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and\n numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n # Perform an inside contraction\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0,\n status=warnflag, success=(warnflag == 0),\n message=None, x=x, final_simplex=(sim, fsim))\n return result\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import Any, Callable, Generator, List, Optional
import pytest
from _pytest import nodes
from _pytest.config import hookimpl
from _pytest.python import Function, PyCollector # type: ignore
from hypothesis.errors import InvalidArgument # pylint: disable=ungrouped-imports
from .._hypothesis import create_test
from ..exceptions import InvalidSchema
from ..models import Endpoint
from ..utils import is_schemathesis_test
@hookimpl(hookwrapper=True) # type:ignore # pragma: no mutate
def pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any) -> Optional["SchemathesisCase"]:
"""Switch to a different collector if the test is parametrized marked by schemathesis."""
outcome = yield
if is_schemathesis_test(obj):
outcome.force_result(SchemathesisCase(obj, name, collector))
else:
outcome.get_result()
class SchemathesisCase(PyCollector):
def __init__(self, test_function: Callable, *args: Any, **kwargs: Any) -> None:
self.test_function = test_function
self.schemathesis_case = test_function._schemathesis_test # type: ignore
super().__init__(*args, **kwargs)
def _get_test_name(self, endpoint: Endpoint) -> str:
return f"{self.name}[{endpoint.method}:{endpoint.path}]"
def _gen_items(self, endpoint: Endpoint) -> Generator[Function, None, None]:
"""Generate all items for the given endpoint.
Could produce more than one test item if
parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.
"""
try:
hypothesis_item = create_test(endpoint, self.test_function)
except InvalidSchema:
hypothesis_item = lambda: pytest.fail("Invalid schema for endpoint")
items = self.ihook.pytest_pycollect_makeitem(
collector=self.parent, name=self._get_test_name(endpoint), obj=hypothesis_item
)
for item in items:
item.obj = hypothesis_item
yield item
def collect(self) -> List[Function]: # type: ignore
"""Generate different test items for all endpoints available in the given schema."""
try:
return [
item for endpoint in self.schemathesis_case.get_all_endpoints() for item in self._gen_items(endpoint)
]
except Exception:
pytest.fail("Error during collection")
@hookimpl(hookwrapper=True) # pragma: no mutate
def pytest_pyfunc_call(pyfuncitem): # type:ignore
"""It is possible to have a Hypothesis exception in runtime.
For example - kwargs validation is failed for some strategy.
"""
outcome = yield
try:
outcome.get_result()
except InvalidArgument as exc:
pytest.fail(exc.args[0])
|
normal
|
{
"blob_id": "2060f0af351c1487f8aa45943dbaa050f4291c58",
"index": 7791,
"step-1": "<mask token>\n\n\nclass SchemathesisCase(PyCollector):\n <mask token>\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any\n ) ->Optional['SchemathesisCase']:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any\n ) ->None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any\n ) ->Optional['SchemathesisCase']:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any\n ) ->None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pyfunc_call(pyfuncitem):\n \"\"\"It is possible to have a Hypothesis exception in runtime.\n\n For example - kwargs validation is failed for some strategy.\n \"\"\"\n outcome = yield\n try:\n outcome.get_result()\n except InvalidArgument as exc:\n pytest.fail(exc.args[0])\n",
"step-4": "from typing import Any, Callable, Generator, List, Optional\nimport pytest\nfrom _pytest import nodes\nfrom _pytest.config import hookimpl\nfrom _pytest.python import Function, PyCollector\nfrom hypothesis.errors import InvalidArgument\nfrom .._hypothesis import create_test\nfrom ..exceptions import InvalidSchema\nfrom ..models import Endpoint\nfrom ..utils import is_schemathesis_test\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any\n ) ->Optional['SchemathesisCase']:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any\n ) ->None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pyfunc_call(pyfuncitem):\n \"\"\"It is possible to have a Hypothesis exception in runtime.\n\n For example - kwargs validation is failed for some strategy.\n \"\"\"\n outcome = yield\n try:\n outcome.get_result()\n except InvalidArgument as exc:\n pytest.fail(exc.args[0])\n",
"step-5": "from typing import Any, Callable, Generator, List, Optional\n\nimport pytest\nfrom _pytest import nodes\nfrom _pytest.config import hookimpl\nfrom _pytest.python import Function, PyCollector # type: ignore\nfrom hypothesis.errors import InvalidArgument # pylint: disable=ungrouped-imports\n\nfrom .._hypothesis import create_test\nfrom ..exceptions import InvalidSchema\nfrom ..models import Endpoint\nfrom ..utils import is_schemathesis_test\n\n\n@hookimpl(hookwrapper=True) # type:ignore # pragma: no mutate\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any) -> Optional[\"SchemathesisCase\"]:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any) -> None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test # type: ignore\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) -> str:\n return f\"{self.name}[{endpoint.method}:{endpoint.path}]\"\n\n def _gen_items(self, endpoint: Endpoint) -> Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda: pytest.fail(\"Invalid schema for endpoint\")\n items = self.ihook.pytest_pycollect_makeitem(\n collector=self.parent, name=self._get_test_name(endpoint), obj=hypothesis_item\n )\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) -> List[Function]: # type: ignore\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [\n item for endpoint in self.schemathesis_case.get_all_endpoints() for item in self._gen_items(endpoint)\n ]\n except Exception:\n pytest.fail(\"Error during collection\")\n\n\n@hookimpl(hookwrapper=True) # pragma: no mutate\ndef pytest_pyfunc_call(pyfuncitem): # type:ignore\n \"\"\"It is possible to have a Hypothesis exception in runtime.\n\n For example - kwargs validation is failed for some strategy.\n \"\"\"\n outcome = yield\n try:\n outcome.get_result()\n except InvalidArgument as exc:\n pytest.fail(exc.args[0])\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
from data.dataframe_sequence_multi import DataFrameSequenceMulti
from metrics import Metrics
from models.models_ts_multi import lstm_model_multi
import threading
import sys
from keras import optimizers
from data.data_helper import plot_history
epochs = 100
start = 6
end = 18
res = []
sets = []
min_vals = []
min_loss = []
def run_final_all_days():
# onsite
# data = DataFrameSequenceMulti(False, True, False, False)
# onsite & img
# data = DataFrameSequenceMulti(False, True, True, False)
# all data
data = DataFrameSequenceMulti(False, True, True, True)
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(5)
name_data = 'data_' + 'all'
name_epoch = 'epochs_' + str(epochs)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)
lstm.set_days(data.get_all_test_days())
lstm.run_experiment()
def run_final_test_days():
# sqs = [5, 10]
sqs=[5]
cams = [1]
permutations = [(True,True,True)]
# permutations = [(True, True, True), (True, False, False), (False, True, False)]
# permutations_names = ['all data', 'onsite_only', 'img only']
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams=c, clear_sky_label=True)
# data.normalize_mega_df()
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam, pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
# data.load_prev_mega_df()
data.build_ts_df(start, end, [7,8,9,10,11,12], s, cams=c, clear_sky_label=False)
# data.save_df()
else:
data.build_ts_df(start, end, [7,8,9,10], s, cams=c, clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time + name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_PREM2_PXL' + name_epoch + name_time + name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
def LSTM_test():
data = DataFrameSequenceMulti(False, True, True, False)
# data.build_ts_df(6, 19, [7,8,9,10], 5)
data.load_prev_mega_df()
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')
data.split_data_set_EXPRMTL(9, 15, 3)
data.scale_mega(model='lstm')
data.flatten_data_set_to_3d()
lstm.get_model()
lstm.train(100)
y_pred, rmse = lstm.predict()
# plot_history('s1', 1, lstm.history)
# import matplotlib.pyplot as plt
# from matplotlib.lines import lineStyles
# plt.plot(lstm.history.history['loss'])
# plt.plot(lstm.history.history['val_loss'], linestyle=':')
# ymin = min(lstm.history.history['val_loss'])
# xpos = lstm.history.history['val_loss'].index(ymin)
# xmin = lstm.history.history['val_loss'][xpos]
# plt.annotate('Minimum validation loss', size=20, xy=(xpos, ymin), xytext=(xpos, ymin + 30000),
# arrowprops=dict(facecolor='black', shrink=0.05, width=5, headwidth=20),
# horizontalalignment='center', verticalalignment='top',
# )
# plt.ylim(0, 100000)
# plt.title('LSTM M 5 all data', size=20)
# plt.ylabel('Mean squared error', size=20)
# plt.xlabel('Epochs', size=20)
# plt.legend(['train', 'validation'], loc='upper left')
# plt.show()
#
# Metrics.write_results_multi('LSTM_TEST_MULTI', data.test_x_df.reshape(
# (data.test_x_df.shape[0],
# data.sequence_len_minutes,
# data.number_of_features)),
# data.test_y_df, y_pred)
print(rmse)
def optimize():
# data.build_ts_df(6, 19, [8, 9, 10,11,12], 10, cams=1, clear_sky_label=False)
# data.normalize_mega_df()
# data.split_data_set(10,15)
# data.flatten_data_set_to_3d()
#
# seq_l = [3,5,10]
# nodes = [(50,25,10),(60,30,15),(80,40,20)]
# activations = ['relu', 'sigmoid']
# opts = ['Adam', 'RMSprop']
# learning_rate = [0.001, 0.01, 0.1]
seq_l = [5]
nodes = [(50,25,10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7,8,9,10,11,12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a) + ' optimizer: ' + str(o) + ' lr: ' + str(lr) + " seq_l: " + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history['loss'])))
run_lstm_experiment(set='test')
# run_final_test_days()
# run_final_all_days()
# LSTM_test()
|
normal
|
{
"blob_id": "af903feda57e4ace0c7f909abbeb86bb9a7e4d8c",
"index": 1806,
"step-1": "<mask token>\n\n\ndef run_final_test_days():\n sqs = [5]\n cams = [1]\n permutations = [(True, True, True)]\n permutations_names = ['all data perez']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams\n =c, clear_sky_label=True)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,\n pred_csi=True)\n lstm.set_days(data.get_thesis_test_days())\n lstm.run_experiment()\n\n\ndef run_lstm_experiment(set='test'):\n sqs = [5]\n cams = [1]\n permutations = [(True, True, False)]\n permutations_names = ['pxl_onsite']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n if set == 'test':\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,\n cams=c, clear_sky_label=False)\n else:\n data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,\n clear_sky_label=False)\n data.scale_mega('lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n if set == 'test':\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_TEST_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_thesis_test_days())\n else:\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_PREM2_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_prem_days())\n lstm.run_experiment()\n\n\n<mask token>\n\n\ndef optimize():\n seq_l = [5]\n nodes = [(50, 25, 10)]\n activations = ['relu']\n opts = ['Adam']\n learning_rate = [0.001]\n data = DataFrameSequenceMulti(False, True, True, True)\n lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')\n num = 0\n for s in seq_l:\n data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)\n data.normalize_mega_df()\n data.split_data_set(10, 15)\n data.flatten_data_set_to_3d()\n for n in nodes:\n for a in activations:\n for o in opts:\n for lr in learning_rate:\n if o == 'Adam':\n opt = optimizers.Adam(lr=lr)\n else:\n opt = optimizers.RMSprop(lr=lr)\n lstm.set_model(n, a, opt)\n out = lstm.train(100)\n res.append(out)\n settings = 'nodes: ' + str(n) + ' activation: ' + str(a\n ) + ' optimizer: ' + str(o) + ' lr: ' + str(lr\n ) + ' seq_l: ' + str(s)\n sets.append(settings)\n plot_history(settings, num, out)\n min_loss.append(min(out.history['loss']))\n min_vals.append(min(out.history['val_loss']))\n num = num + 1\n best_val_loss = min_vals.index(min(min_vals))\n print('BEST VAL LOSS: ')\n print(sets[best_val_loss])\n print('val loss: ' + str(min(min_vals)))\n print('epoch: ')\n print(res[best_val_loss].history['val_loss'].index(min(res[\n best_val_loss].history['val_loss'])))\n best_loss = min_loss.index(min(min_loss))\n print('BEST Train LOSS: ')\n print(sets[best_loss])\n print('train loss: ' + str(min(min_loss)))\n print('epoch: ')\n print(res[best_loss].history['loss'].index(min(res[best_loss].history[\n 'loss'])))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_final_all_days():\n data = DataFrameSequenceMulti(False, True, True, True)\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(5)\n name_data = 'data_' + 'all'\n name_epoch = 'epochs_' + str(epochs)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)\n lstm.set_days(data.get_all_test_days())\n lstm.run_experiment()\n\n\ndef run_final_test_days():\n sqs = [5]\n cams = [1]\n permutations = [(True, True, True)]\n permutations_names = ['all data perez']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams\n =c, clear_sky_label=True)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,\n pred_csi=True)\n lstm.set_days(data.get_thesis_test_days())\n lstm.run_experiment()\n\n\ndef run_lstm_experiment(set='test'):\n sqs = [5]\n cams = [1]\n permutations = [(True, True, False)]\n permutations_names = ['pxl_onsite']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n if set == 'test':\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,\n cams=c, clear_sky_label=False)\n else:\n data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,\n clear_sky_label=False)\n data.scale_mega('lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n if set == 'test':\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_TEST_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_thesis_test_days())\n else:\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_PREM2_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_prem_days())\n lstm.run_experiment()\n\n\ndef LSTM_test():\n data = DataFrameSequenceMulti(False, True, True, False)\n data.load_prev_mega_df()\n lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')\n data.split_data_set_EXPRMTL(9, 15, 3)\n data.scale_mega(model='lstm')\n data.flatten_data_set_to_3d()\n lstm.get_model()\n lstm.train(100)\n y_pred, rmse = lstm.predict()\n print(rmse)\n\n\ndef optimize():\n seq_l = [5]\n nodes = [(50, 25, 10)]\n activations = ['relu']\n opts = ['Adam']\n learning_rate = [0.001]\n data = DataFrameSequenceMulti(False, True, True, True)\n lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')\n num = 0\n for s in seq_l:\n data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)\n data.normalize_mega_df()\n data.split_data_set(10, 15)\n data.flatten_data_set_to_3d()\n for n in nodes:\n for a in activations:\n for o in opts:\n for lr in learning_rate:\n if o == 'Adam':\n opt = optimizers.Adam(lr=lr)\n else:\n opt = optimizers.RMSprop(lr=lr)\n lstm.set_model(n, a, opt)\n out = lstm.train(100)\n res.append(out)\n settings = 'nodes: ' + str(n) + ' activation: ' + str(a\n ) + ' optimizer: ' + str(o) + ' lr: ' + str(lr\n ) + ' seq_l: ' + str(s)\n sets.append(settings)\n plot_history(settings, num, out)\n min_loss.append(min(out.history['loss']))\n min_vals.append(min(out.history['val_loss']))\n num = num + 1\n best_val_loss = min_vals.index(min(min_vals))\n print('BEST VAL LOSS: ')\n print(sets[best_val_loss])\n print('val loss: ' + str(min(min_vals)))\n print('epoch: ')\n print(res[best_val_loss].history['val_loss'].index(min(res[\n best_val_loss].history['val_loss'])))\n best_loss = min_loss.index(min(min_loss))\n print('BEST Train LOSS: ')\n print(sets[best_loss])\n print('train loss: ' + str(min(min_loss)))\n print('epoch: ')\n print(res[best_loss].history['loss'].index(min(res[best_loss].history[\n 'loss'])))\n\n\n<mask token>\n",
"step-3": "<mask token>\nepochs = 100\nstart = 6\nend = 18\nres = []\nsets = []\nmin_vals = []\nmin_loss = []\n\n\ndef run_final_all_days():\n data = DataFrameSequenceMulti(False, True, True, True)\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(5)\n name_data = 'data_' + 'all'\n name_epoch = 'epochs_' + str(epochs)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)\n lstm.set_days(data.get_all_test_days())\n lstm.run_experiment()\n\n\ndef run_final_test_days():\n sqs = [5]\n cams = [1]\n permutations = [(True, True, True)]\n permutations_names = ['all data perez']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams\n =c, clear_sky_label=True)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,\n pred_csi=True)\n lstm.set_days(data.get_thesis_test_days())\n lstm.run_experiment()\n\n\ndef run_lstm_experiment(set='test'):\n sqs = [5]\n cams = [1]\n permutations = [(True, True, False)]\n permutations_names = ['pxl_onsite']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n if set == 'test':\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,\n cams=c, clear_sky_label=False)\n else:\n data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,\n clear_sky_label=False)\n data.scale_mega('lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n if set == 'test':\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_TEST_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_thesis_test_days())\n else:\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_PREM2_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_prem_days())\n lstm.run_experiment()\n\n\ndef LSTM_test():\n data = DataFrameSequenceMulti(False, True, True, False)\n data.load_prev_mega_df()\n lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')\n data.split_data_set_EXPRMTL(9, 15, 3)\n data.scale_mega(model='lstm')\n data.flatten_data_set_to_3d()\n lstm.get_model()\n lstm.train(100)\n y_pred, rmse = lstm.predict()\n print(rmse)\n\n\ndef optimize():\n seq_l = [5]\n nodes = [(50, 25, 10)]\n activations = ['relu']\n opts = ['Adam']\n learning_rate = [0.001]\n data = DataFrameSequenceMulti(False, True, True, True)\n lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')\n num = 0\n for s in seq_l:\n data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)\n data.normalize_mega_df()\n data.split_data_set(10, 15)\n data.flatten_data_set_to_3d()\n for n in nodes:\n for a in activations:\n for o in opts:\n for lr in learning_rate:\n if o == 'Adam':\n opt = optimizers.Adam(lr=lr)\n else:\n opt = optimizers.RMSprop(lr=lr)\n lstm.set_model(n, a, opt)\n out = lstm.train(100)\n res.append(out)\n settings = 'nodes: ' + str(n) + ' activation: ' + str(a\n ) + ' optimizer: ' + str(o) + ' lr: ' + str(lr\n ) + ' seq_l: ' + str(s)\n sets.append(settings)\n plot_history(settings, num, out)\n min_loss.append(min(out.history['loss']))\n min_vals.append(min(out.history['val_loss']))\n num = num + 1\n best_val_loss = min_vals.index(min(min_vals))\n print('BEST VAL LOSS: ')\n print(sets[best_val_loss])\n print('val loss: ' + str(min(min_vals)))\n print('epoch: ')\n print(res[best_val_loss].history['val_loss'].index(min(res[\n best_val_loss].history['val_loss'])))\n best_loss = min_loss.index(min(min_loss))\n print('BEST Train LOSS: ')\n print(sets[best_loss])\n print('train loss: ' + str(min(min_loss)))\n print('epoch: ')\n print(res[best_loss].history['loss'].index(min(res[best_loss].history[\n 'loss'])))\n\n\nrun_lstm_experiment(set='test')\n",
"step-4": "from data.dataframe_sequence_multi import DataFrameSequenceMulti\nfrom metrics import Metrics\nfrom models.models_ts_multi import lstm_model_multi\nimport threading\nimport sys\nfrom keras import optimizers\nfrom data.data_helper import plot_history\nepochs = 100\nstart = 6\nend = 18\nres = []\nsets = []\nmin_vals = []\nmin_loss = []\n\n\ndef run_final_all_days():\n data = DataFrameSequenceMulti(False, True, True, True)\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(5)\n name_data = 'data_' + 'all'\n name_epoch = 'epochs_' + str(epochs)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)\n lstm.set_days(data.get_all_test_days())\n lstm.run_experiment()\n\n\ndef run_final_test_days():\n sqs = [5]\n cams = [1]\n permutations = [(True, True, True)]\n permutations_names = ['all data perez']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams\n =c, clear_sky_label=True)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,\n pred_csi=True)\n lstm.set_days(data.get_thesis_test_days())\n lstm.run_experiment()\n\n\ndef run_lstm_experiment(set='test'):\n sqs = [5]\n cams = [1]\n permutations = [(True, True, False)]\n permutations_names = ['pxl_onsite']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n if set == 'test':\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,\n cams=c, clear_sky_label=False)\n else:\n data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,\n clear_sky_label=False)\n data.scale_mega('lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n if set == 'test':\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_TEST_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_thesis_test_days())\n else:\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_PREM2_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_prem_days())\n lstm.run_experiment()\n\n\ndef LSTM_test():\n data = DataFrameSequenceMulti(False, True, True, False)\n data.load_prev_mega_df()\n lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')\n data.split_data_set_EXPRMTL(9, 15, 3)\n data.scale_mega(model='lstm')\n data.flatten_data_set_to_3d()\n lstm.get_model()\n lstm.train(100)\n y_pred, rmse = lstm.predict()\n print(rmse)\n\n\ndef optimize():\n seq_l = [5]\n nodes = [(50, 25, 10)]\n activations = ['relu']\n opts = ['Adam']\n learning_rate = [0.001]\n data = DataFrameSequenceMulti(False, True, True, True)\n lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')\n num = 0\n for s in seq_l:\n data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)\n data.normalize_mega_df()\n data.split_data_set(10, 15)\n data.flatten_data_set_to_3d()\n for n in nodes:\n for a in activations:\n for o in opts:\n for lr in learning_rate:\n if o == 'Adam':\n opt = optimizers.Adam(lr=lr)\n else:\n opt = optimizers.RMSprop(lr=lr)\n lstm.set_model(n, a, opt)\n out = lstm.train(100)\n res.append(out)\n settings = 'nodes: ' + str(n) + ' activation: ' + str(a\n ) + ' optimizer: ' + str(o) + ' lr: ' + str(lr\n ) + ' seq_l: ' + str(s)\n sets.append(settings)\n plot_history(settings, num, out)\n min_loss.append(min(out.history['loss']))\n min_vals.append(min(out.history['val_loss']))\n num = num + 1\n best_val_loss = min_vals.index(min(min_vals))\n print('BEST VAL LOSS: ')\n print(sets[best_val_loss])\n print('val loss: ' + str(min(min_vals)))\n print('epoch: ')\n print(res[best_val_loss].history['val_loss'].index(min(res[\n best_val_loss].history['val_loss'])))\n best_loss = min_loss.index(min(min_loss))\n print('BEST Train LOSS: ')\n print(sets[best_loss])\n print('train loss: ' + str(min(min_loss)))\n print('epoch: ')\n print(res[best_loss].history['loss'].index(min(res[best_loss].history[\n 'loss'])))\n\n\nrun_lstm_experiment(set='test')\n",
"step-5": "from data.dataframe_sequence_multi import DataFrameSequenceMulti\nfrom metrics import Metrics\nfrom models.models_ts_multi import lstm_model_multi\nimport threading\nimport sys\nfrom keras import optimizers\nfrom data.data_helper import plot_history\n\nepochs = 100\nstart = 6\nend = 18\n\nres = []\nsets = []\nmin_vals = []\nmin_loss = []\n\ndef run_final_all_days():\n # onsite\n # data = DataFrameSequenceMulti(False, True, False, False)\n # onsite & img\n # data = DataFrameSequenceMulti(False, True, True, False)\n # all data\n data = DataFrameSequenceMulti(False, True, True, True)\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)\n data.scale_mega(model='lstm')\n\n name_time = '_sqnc_' + str(5)\n name_data = 'data_' + 'all'\n name_epoch = 'epochs_' + str(epochs)\n\n lstm = lstm_model_multi.LSTM_predictor(data, epochs,\n 'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)\n lstm.set_days(data.get_all_test_days())\n lstm.run_experiment()\n\ndef run_final_test_days():\n # sqs = [5, 10]\n sqs=[5]\n cams = [1]\n permutations = [(True,True,True)]\n # permutations = [(True, True, True), (True, False, False), (False, True, False)]\n # permutations_names = ['all data', 'onsite_only', 'img only']\n permutations_names = ['all data perez']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams=c, clear_sky_label=True)\n # data.normalize_mega_df()\n data.scale_mega(model='lstm')\n\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n\n lstm = lstm_model_multi.LSTM_predictor(data, epochs,\n 'LSTM_TSET GRAD prz' + name_time + name_data + name_cam, pred_csi=True)\n lstm.set_days(data.get_thesis_test_days())\n lstm.run_experiment()\n\n\ndef run_lstm_experiment(set='test'):\n sqs = [5]\n cams = [1]\n permutations = [(True, True, False)]\n permutations_names = ['pxl_onsite']\n\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n if set == 'test':\n # data.load_prev_mega_df()\n data.build_ts_df(start, end, [7,8,9,10,11,12], s, cams=c, clear_sky_label=False)\n # data.save_df()\n else:\n data.build_ts_df(start, end, [7,8,9,10], s, cams=c, clear_sky_label=False)\n data.scale_mega('lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n if set == 'test':\n lstm = lstm_model_multi.LSTM_predictor(data, 100,\n 'LSTM_TEST_PXL' + name_epoch + name_time + name_data + name_cam)\n lstm.set_days(data.get_thesis_test_days())\n else:\n lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_PREM2_PXL' + name_epoch + name_time + name_data + name_cam)\n lstm.set_days(data.get_prem_days())\n\n lstm.run_experiment()\n\n\ndef LSTM_test():\n data = DataFrameSequenceMulti(False, True, True, False)\n # data.build_ts_df(6, 19, [7,8,9,10], 5)\n data.load_prev_mega_df()\n lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')\n data.split_data_set_EXPRMTL(9, 15, 3)\n data.scale_mega(model='lstm')\n data.flatten_data_set_to_3d()\n\n lstm.get_model()\n lstm.train(100)\n y_pred, rmse = lstm.predict()\n # plot_history('s1', 1, lstm.history)\n\n # import matplotlib.pyplot as plt\n # from matplotlib.lines import lineStyles\n # plt.plot(lstm.history.history['loss'])\n # plt.plot(lstm.history.history['val_loss'], linestyle=':')\n # ymin = min(lstm.history.history['val_loss'])\n # xpos = lstm.history.history['val_loss'].index(ymin)\n # xmin = lstm.history.history['val_loss'][xpos]\n # plt.annotate('Minimum validation loss', size=20, xy=(xpos, ymin), xytext=(xpos, ymin + 30000),\n # arrowprops=dict(facecolor='black', shrink=0.05, width=5, headwidth=20),\n # horizontalalignment='center', verticalalignment='top',\n # )\n # plt.ylim(0, 100000)\n # plt.title('LSTM M 5 all data', size=20)\n # plt.ylabel('Mean squared error', size=20)\n # plt.xlabel('Epochs', size=20)\n # plt.legend(['train', 'validation'], loc='upper left')\n # plt.show()\n #\n # Metrics.write_results_multi('LSTM_TEST_MULTI', data.test_x_df.reshape(\n # (data.test_x_df.shape[0],\n # data.sequence_len_minutes,\n # data.number_of_features)),\n # data.test_y_df, y_pred)\n\n print(rmse)\n\ndef optimize():\n # data.build_ts_df(6, 19, [8, 9, 10,11,12], 10, cams=1, clear_sky_label=False)\n # data.normalize_mega_df()\n # data.split_data_set(10,15)\n # data.flatten_data_set_to_3d()\n #\n # seq_l = [3,5,10]\n # nodes = [(50,25,10),(60,30,15),(80,40,20)]\n # activations = ['relu', 'sigmoid']\n # opts = ['Adam', 'RMSprop']\n # learning_rate = [0.001, 0.01, 0.1]\n\n\n seq_l = [5]\n nodes = [(50,25,10)]\n activations = ['relu']\n opts = ['Adam']\n learning_rate = [0.001]\n\n data = DataFrameSequenceMulti(False, True, True, True)\n lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')\n num = 0\n for s in seq_l:\n data.build_ts_df(6, 19, [7,8,9,10,11,12], s, 1)\n data.normalize_mega_df()\n data.split_data_set(10, 15)\n data.flatten_data_set_to_3d()\n for n in nodes:\n for a in activations:\n for o in opts:\n for lr in learning_rate:\n\n if o == 'Adam':\n opt = optimizers.Adam(lr=lr)\n else:\n opt = optimizers.RMSprop(lr=lr)\n\n lstm.set_model(n, a, opt)\n out = lstm.train(100)\n res.append(out)\n settings = 'nodes: ' + str(n) + ' activation: ' + str(a) + ' optimizer: ' + str(o) + ' lr: ' + str(lr) + \" seq_l: \" + str(s)\n sets.append(settings)\n plot_history(settings, num, out)\n min_loss.append(min(out.history['loss']))\n min_vals.append(min(out.history['val_loss']))\n num = num + 1\n\n best_val_loss = min_vals.index(min(min_vals))\n print('BEST VAL LOSS: ')\n print(sets[best_val_loss])\n print('val loss: ' + str(min(min_vals)))\n print('epoch: ')\n print(res[best_val_loss].history['val_loss'].index(min(res[best_val_loss].history['val_loss'])))\n\n best_loss = min_loss.index(min(min_loss))\n print('BEST Train LOSS: ')\n print(sets[best_loss])\n print('train loss: ' + str(min(min_loss)))\n print('epoch: ')\n print(res[best_loss].history['loss'].index(min(res[best_loss].history['loss'])))\n\nrun_lstm_experiment(set='test')\n# run_final_test_days()\n# run_final_all_days()\n# LSTM_test()",
"step-ids": [
3,
5,
7,
8,
9
]
}
|
[
3,
5,
7,
8,
9
] |
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.__version__
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
import pathlib
from IPython import display
###------------------------------------------------------###
# READ IN IMAGE DATA
#(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
AUTOTUNE = tf.data.experimental.AUTOTUNE
import pathlib
data_root_orig = "Images"
data_root = pathlib.Path(data_root_orig)
#print(data_root)
#for item in data_root.iterdir():
# print(item)
import random
# Changed from orginal cause maybe a problem with the windows file system
#all_image_paths = list(data_root.glob('*/*'))
all_image_paths = list(data_root.glob('*'))
#print(all_image_paths)
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
#print(image_count)
# No good PATH format
# print(all_image_paths[:10])
img_path = all_image_paths[0]
#print(img_path)
img_raw = tf.io.read_file(img_path)
#print(repr(img_raw)[:100]+"...")
img_tensor = tf.image.decode_image(img_raw)
#print(img_tensor.shape)
#print(img_tensor.dtype)
img_final = tf.image.resize(img_tensor, [280, 280])
img_final = img_final/255.0
#print(img_final.shape)
#print(img_final.numpy().min())
#print(img_final.numpy().max())
#-----------------------------------------#
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [280, 280])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.io.read_file(path)
return preprocess_image(image)
#-----------------------------------------#
# BUILD A DATASET
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
#print(path_ds)
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
#print(ds)
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(280, 280, 3), include_top=False)
mobile_net.trainable=False
help(tf.keras.applications.mobilenet_v2.preprocess_input)
def change_range(image):
return 2*image-1
keras_ds = ds.map(change_range)
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
#print(feature_map_batch.shape)
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense((image_count))])
logit_batch = model(image_batch).numpy()
#print("min logit:", logit_batch.min())
#print("max logit:", logit_batch.max())
#print()
#print("Shape:", logit_batch.shape)
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='sparse_categorical_crossentropy',
metrics=["accuracy"])
#print(len(model.trainable_variables))
model.summary()
steps_per_epoch=tf.math.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
#print(steps_per_epoch)
#model.fit(ds, epochs=1, steps_per_epoch=3)
def make_generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 28, 28, 1)
return model
generator = make_generator_model()
noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)
plt.imshow(generated_image[0, :, :, 0], cmap='gray')
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
discriminator = make_discriminator_model()
decision = discriminator(generated_image)
print (decision)
# This method returns a helper function to compute cross entropy loss
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
# We will reuse this seed overtime (so it's easier)
# to visualize progress in the animated GIF)
seed = tf.random.normal([num_examples_to_generate, noise_dim])
# Notice the use of `tf.function`
# This annotation causes the function to be "compiled".
@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
# Produce images for the GIF as we go
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed)
# Save the model every 15 epochs
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
# Generate after the final epoch
display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
seed)
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
#time
train(ds, EPOCHS)
|
normal
|
{
"blob_id": "e007e2d32fa799e7658813f36911616f7bf58b48",
"index": 3972,
"step-1": "<mask token>\n\n\ndef preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [280, 280])\n image /= 255.0\n return image\n\n\n<mask token>\n\n\ndef make_generator_model():\n model = tf.keras.Sequential()\n model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Reshape((7, 7, 256)))\n assert model.output_shape == (None, 7, 7, 256)\n model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding=\n 'same', use_bias=False))\n assert model.output_shape == (None, 7, 7, 128)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding=\n 'same', use_bias=False))\n assert model.output_shape == (None, 14, 14, 64)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding=\n 'same', use_bias=False, activation='tanh'))\n assert model.output_shape == (None, 28, 28, 1)\n return model\n\n\n<mask token>\n\n\ndef make_discriminator_model():\n model = tf.keras.Sequential()\n model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',\n input_shape=[28, 28, 1]))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n model.add(layers.Flatten())\n model.add(layers.Dense(1))\n return model\n\n\n<mask token>\n\n\ndef discriminator_loss(real_output, fake_output):\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n\n\ndef generator_loss(fake_output):\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n\n\n<mask token>\n\n\[email protected]\ndef train_step(images):\n noise = tf.random.normal([BATCH_SIZE, noise_dim])\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True)\n real_output = discriminator(images, training=True)\n fake_output = discriminator(generated_images, training=True)\n gen_loss = generator_loss(fake_output)\n disc_loss = discriminator_loss(real_output, fake_output)\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.\n trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss,\n discriminator.trainable_variables)\n generator_optimizer.apply_gradients(zip(gradients_of_generator,\n generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,\n discriminator.trainable_variables))\n\n\ndef train(dataset, epochs):\n for epoch in range(epochs):\n start = time.time()\n for image_batch in dataset:\n train_step(image_batch)\n display.clear_output(wait=True)\n generate_and_save_images(generator, epoch + 1, seed)\n if (epoch + 1) % 15 == 0:\n checkpoint.save(file_prefix=checkpoint_prefix)\n print('Time for epoch {} is {} sec'.format(epoch + 1, time.time() -\n start))\n display.clear_output(wait=True)\n generate_and_save_images(generator, epochs, seed)\n\n\ndef generate_and_save_images(model, epoch, test_input):\n predictions = model(test_input, training=False)\n fig = plt.figure(figsize=(4, 4))\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i + 1)\n plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n plt.axis('off')\n plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [280, 280])\n image /= 255.0\n return image\n\n\ndef load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)\n\n\n<mask token>\n\n\ndef change_range(image):\n return 2 * image - 1\n\n\n<mask token>\n\n\ndef make_generator_model():\n model = tf.keras.Sequential()\n model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Reshape((7, 7, 256)))\n assert model.output_shape == (None, 7, 7, 256)\n model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding=\n 'same', use_bias=False))\n assert model.output_shape == (None, 7, 7, 128)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding=\n 'same', use_bias=False))\n assert model.output_shape == (None, 14, 14, 64)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding=\n 'same', use_bias=False, activation='tanh'))\n assert model.output_shape == (None, 28, 28, 1)\n return model\n\n\n<mask token>\n\n\ndef make_discriminator_model():\n model = tf.keras.Sequential()\n model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',\n input_shape=[28, 28, 1]))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n model.add(layers.Flatten())\n model.add(layers.Dense(1))\n return model\n\n\n<mask token>\n\n\ndef discriminator_loss(real_output, fake_output):\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n\n\ndef generator_loss(fake_output):\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n\n\n<mask token>\n\n\[email protected]\ndef train_step(images):\n noise = tf.random.normal([BATCH_SIZE, noise_dim])\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True)\n real_output = discriminator(images, training=True)\n fake_output = discriminator(generated_images, training=True)\n gen_loss = generator_loss(fake_output)\n disc_loss = discriminator_loss(real_output, fake_output)\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.\n trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss,\n discriminator.trainable_variables)\n generator_optimizer.apply_gradients(zip(gradients_of_generator,\n generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,\n discriminator.trainable_variables))\n\n\ndef train(dataset, epochs):\n for epoch in range(epochs):\n start = time.time()\n for image_batch in dataset:\n train_step(image_batch)\n display.clear_output(wait=True)\n generate_and_save_images(generator, epoch + 1, seed)\n if (epoch + 1) % 15 == 0:\n checkpoint.save(file_prefix=checkpoint_prefix)\n print('Time for epoch {} is {} sec'.format(epoch + 1, time.time() -\n start))\n display.clear_output(wait=True)\n generate_and_save_images(generator, epochs, seed)\n\n\ndef generate_and_save_images(model, epoch, test_input):\n predictions = model(test_input, training=False)\n fig = plt.figure(figsize=(4, 4))\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i + 1)\n plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n plt.axis('off')\n plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\ntf.__version__\n<mask token>\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n<mask token>\ndata_root_orig = 'Images'\ndata_root = pathlib.Path(data_root_orig)\n<mask token>\nall_image_paths = list(data_root.glob('*'))\nall_image_paths = [str(path) for path in all_image_paths]\nrandom.shuffle(all_image_paths)\nimage_count = len(all_image_paths)\nimg_path = all_image_paths[0]\nimg_raw = tf.io.read_file(img_path)\nimg_tensor = tf.image.decode_image(img_raw)\nimg_final = tf.image.resize(img_tensor, [280, 280])\nimg_final = img_final / 255.0\n\n\ndef preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [280, 280])\n image /= 255.0\n return image\n\n\ndef load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)\n\n\npath_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)\nimage_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)\nBATCH_SIZE = 32\nds = image_ds.shuffle(buffer_size=image_count)\nds = ds.repeat()\nds = ds.batch(BATCH_SIZE)\nds = ds.prefetch(buffer_size=AUTOTUNE)\nmobile_net = tf.keras.applications.MobileNetV2(input_shape=(280, 280, 3),\n include_top=False)\nmobile_net.trainable = False\nhelp(tf.keras.applications.mobilenet_v2.preprocess_input)\n\n\ndef change_range(image):\n return 2 * image - 1\n\n\nkeras_ds = ds.map(change_range)\nimage_batch = next(iter(keras_ds))\nfeature_map_batch = mobile_net(image_batch)\nmodel = tf.keras.Sequential([mobile_net, tf.keras.layers.\n GlobalAveragePooling2D(), tf.keras.layers.Dense(image_count)])\nlogit_batch = model(image_batch).numpy()\nmodel.compile(optimizer=tf.keras.optimizers.Adam(), loss=\n 'sparse_categorical_crossentropy', metrics=['accuracy'])\nmodel.summary()\nsteps_per_epoch = tf.math.ceil(len(all_image_paths) / BATCH_SIZE).numpy()\n\n\ndef make_generator_model():\n model = tf.keras.Sequential()\n model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Reshape((7, 7, 256)))\n assert model.output_shape == (None, 7, 7, 256)\n model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding=\n 'same', use_bias=False))\n assert model.output_shape == (None, 7, 7, 128)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding=\n 'same', use_bias=False))\n assert model.output_shape == (None, 14, 14, 64)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding=\n 'same', use_bias=False, activation='tanh'))\n assert model.output_shape == (None, 28, 28, 1)\n return model\n\n\ngenerator = make_generator_model()\nnoise = tf.random.normal([1, 100])\ngenerated_image = generator(noise, training=False)\nplt.imshow(generated_image[0, :, :, 0], cmap='gray')\n\n\ndef make_discriminator_model():\n model = tf.keras.Sequential()\n model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',\n input_shape=[28, 28, 1]))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n model.add(layers.Flatten())\n model.add(layers.Dense(1))\n return model\n\n\ndiscriminator = make_discriminator_model()\ndecision = discriminator(generated_image)\nprint(decision)\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\n\ndef discriminator_loss(real_output, fake_output):\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n\n\ndef generator_loss(fake_output):\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n\n\ngenerator_optimizer = tf.keras.optimizers.Adam(0.0001)\ndiscriminator_optimizer = tf.keras.optimizers.Adam(0.0001)\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer, generator=generator,\n discriminator=discriminator)\nEPOCHS = 50\nnoise_dim = 100\nnum_examples_to_generate = 16\nseed = tf.random.normal([num_examples_to_generate, noise_dim])\n\n\[email protected]\ndef train_step(images):\n noise = tf.random.normal([BATCH_SIZE, noise_dim])\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True)\n real_output = discriminator(images, training=True)\n fake_output = discriminator(generated_images, training=True)\n gen_loss = generator_loss(fake_output)\n disc_loss = discriminator_loss(real_output, fake_output)\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.\n trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss,\n discriminator.trainable_variables)\n generator_optimizer.apply_gradients(zip(gradients_of_generator,\n generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,\n discriminator.trainable_variables))\n\n\ndef train(dataset, epochs):\n for epoch in range(epochs):\n start = time.time()\n for image_batch in dataset:\n train_step(image_batch)\n display.clear_output(wait=True)\n generate_and_save_images(generator, epoch + 1, seed)\n if (epoch + 1) % 15 == 0:\n checkpoint.save(file_prefix=checkpoint_prefix)\n print('Time for epoch {} is {} sec'.format(epoch + 1, time.time() -\n start))\n display.clear_output(wait=True)\n generate_and_save_images(generator, epochs, seed)\n\n\ndef generate_and_save_images(model, epoch, test_input):\n predictions = model(test_input, training=False)\n fig = plt.figure(figsize=(4, 4))\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i + 1)\n plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n plt.axis('off')\n plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n plt.show()\n\n\ntrain(ds, EPOCHS)\n",
"step-4": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\ntf.__version__\nimport glob\nimport imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport PIL\nfrom tensorflow.keras import layers\nimport time\nimport pathlib\nfrom IPython import display\nAUTOTUNE = tf.data.experimental.AUTOTUNE\nimport pathlib\ndata_root_orig = 'Images'\ndata_root = pathlib.Path(data_root_orig)\nimport random\nall_image_paths = list(data_root.glob('*'))\nall_image_paths = [str(path) for path in all_image_paths]\nrandom.shuffle(all_image_paths)\nimage_count = len(all_image_paths)\nimg_path = all_image_paths[0]\nimg_raw = tf.io.read_file(img_path)\nimg_tensor = tf.image.decode_image(img_raw)\nimg_final = tf.image.resize(img_tensor, [280, 280])\nimg_final = img_final / 255.0\n\n\ndef preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [280, 280])\n image /= 255.0\n return image\n\n\ndef load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)\n\n\npath_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)\nimage_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)\nBATCH_SIZE = 32\nds = image_ds.shuffle(buffer_size=image_count)\nds = ds.repeat()\nds = ds.batch(BATCH_SIZE)\nds = ds.prefetch(buffer_size=AUTOTUNE)\nmobile_net = tf.keras.applications.MobileNetV2(input_shape=(280, 280, 3),\n include_top=False)\nmobile_net.trainable = False\nhelp(tf.keras.applications.mobilenet_v2.preprocess_input)\n\n\ndef change_range(image):\n return 2 * image - 1\n\n\nkeras_ds = ds.map(change_range)\nimage_batch = next(iter(keras_ds))\nfeature_map_batch = mobile_net(image_batch)\nmodel = tf.keras.Sequential([mobile_net, tf.keras.layers.\n GlobalAveragePooling2D(), tf.keras.layers.Dense(image_count)])\nlogit_batch = model(image_batch).numpy()\nmodel.compile(optimizer=tf.keras.optimizers.Adam(), loss=\n 'sparse_categorical_crossentropy', metrics=['accuracy'])\nmodel.summary()\nsteps_per_epoch = tf.math.ceil(len(all_image_paths) / BATCH_SIZE).numpy()\n\n\ndef make_generator_model():\n model = tf.keras.Sequential()\n model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Reshape((7, 7, 256)))\n assert model.output_shape == (None, 7, 7, 256)\n model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding=\n 'same', use_bias=False))\n assert model.output_shape == (None, 7, 7, 128)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding=\n 'same', use_bias=False))\n assert model.output_shape == (None, 14, 14, 64)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding=\n 'same', use_bias=False, activation='tanh'))\n assert model.output_shape == (None, 28, 28, 1)\n return model\n\n\ngenerator = make_generator_model()\nnoise = tf.random.normal([1, 100])\ngenerated_image = generator(noise, training=False)\nplt.imshow(generated_image[0, :, :, 0], cmap='gray')\n\n\ndef make_discriminator_model():\n model = tf.keras.Sequential()\n model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',\n input_shape=[28, 28, 1]))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n model.add(layers.Flatten())\n model.add(layers.Dense(1))\n return model\n\n\ndiscriminator = make_discriminator_model()\ndecision = discriminator(generated_image)\nprint(decision)\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\n\ndef discriminator_loss(real_output, fake_output):\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n\n\ndef generator_loss(fake_output):\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n\n\ngenerator_optimizer = tf.keras.optimizers.Adam(0.0001)\ndiscriminator_optimizer = tf.keras.optimizers.Adam(0.0001)\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer, generator=generator,\n discriminator=discriminator)\nEPOCHS = 50\nnoise_dim = 100\nnum_examples_to_generate = 16\nseed = tf.random.normal([num_examples_to_generate, noise_dim])\n\n\[email protected]\ndef train_step(images):\n noise = tf.random.normal([BATCH_SIZE, noise_dim])\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True)\n real_output = discriminator(images, training=True)\n fake_output = discriminator(generated_images, training=True)\n gen_loss = generator_loss(fake_output)\n disc_loss = discriminator_loss(real_output, fake_output)\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.\n trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss,\n discriminator.trainable_variables)\n generator_optimizer.apply_gradients(zip(gradients_of_generator,\n generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,\n discriminator.trainable_variables))\n\n\ndef train(dataset, epochs):\n for epoch in range(epochs):\n start = time.time()\n for image_batch in dataset:\n train_step(image_batch)\n display.clear_output(wait=True)\n generate_and_save_images(generator, epoch + 1, seed)\n if (epoch + 1) % 15 == 0:\n checkpoint.save(file_prefix=checkpoint_prefix)\n print('Time for epoch {} is {} sec'.format(epoch + 1, time.time() -\n start))\n display.clear_output(wait=True)\n generate_and_save_images(generator, epochs, seed)\n\n\ndef generate_and_save_images(model, epoch, test_input):\n predictions = model(test_input, training=False)\n fig = plt.figure(figsize=(4, 4))\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i + 1)\n plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n plt.axis('off')\n plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n plt.show()\n\n\ntrain(ds, EPOCHS)\n",
"step-5": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\ntf.__version__\nimport glob\nimport imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport PIL\nfrom tensorflow.keras import layers\nimport time\nimport pathlib\nfrom IPython import display\n\n\n###------------------------------------------------------###\n# READ IN IMAGE DATA\n\n#(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\nimport pathlib\ndata_root_orig = \"Images\"\ndata_root = pathlib.Path(data_root_orig)\n#print(data_root)\n\n#for item in data_root.iterdir():\n# print(item)\n\nimport random\n# Changed from orginal cause maybe a problem with the windows file system\n#all_image_paths = list(data_root.glob('*/*'))\nall_image_paths = list(data_root.glob('*'))\n#print(all_image_paths)\n\nall_image_paths = [str(path) for path in all_image_paths]\nrandom.shuffle(all_image_paths)\n\nimage_count = len(all_image_paths)\n#print(image_count)\n# No good PATH format\n# print(all_image_paths[:10])\n\nimg_path = all_image_paths[0]\n#print(img_path)\n\nimg_raw = tf.io.read_file(img_path)\n#print(repr(img_raw)[:100]+\"...\")\n\nimg_tensor = tf.image.decode_image(img_raw)\n#print(img_tensor.shape)\n#print(img_tensor.dtype)\n\nimg_final = tf.image.resize(img_tensor, [280, 280])\nimg_final = img_final/255.0\n#print(img_final.shape)\n#print(img_final.numpy().min())\n#print(img_final.numpy().max())\n\n#-----------------------------------------#\n\ndef preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [280, 280])\n image /= 255.0 # normalize to [0,1] range\n\n return image\n\ndef load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)\n\n#-----------------------------------------#\n# BUILD A DATASET\npath_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)\n#print(path_ds)\n\nimage_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)\n\nBATCH_SIZE = 32\n\n# Setting a shuffle buffer size as large as the dataset ensures that the data is\n# completely shuffled.\nds = image_ds.shuffle(buffer_size=image_count)\nds = ds.repeat()\nds = ds.batch(BATCH_SIZE)\n# `prefetch` lets the dataset fetch batches in the background while the model is training.\nds = ds.prefetch(buffer_size=AUTOTUNE)\n#print(ds)\n\nmobile_net = tf.keras.applications.MobileNetV2(input_shape=(280, 280, 3), include_top=False)\nmobile_net.trainable=False\nhelp(tf.keras.applications.mobilenet_v2.preprocess_input)\n\ndef change_range(image):\n return 2*image-1\n\nkeras_ds = ds.map(change_range)\n\n# The dataset may take a few seconds to start, as it fills its shuffle buffer.\nimage_batch = next(iter(keras_ds))\nfeature_map_batch = mobile_net(image_batch)\n#print(feature_map_batch.shape)\n\n\nmodel = tf.keras.Sequential([\n mobile_net,\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Dense((image_count))])\n\nlogit_batch = model(image_batch).numpy()\n\n#print(\"min logit:\", logit_batch.min())\n#print(\"max logit:\", logit_batch.max())\n#print()\n\n#print(\"Shape:\", logit_batch.shape)\n\nmodel.compile(optimizer=tf.keras.optimizers.Adam(),\n loss='sparse_categorical_crossentropy',\n metrics=[\"accuracy\"])\n\n#print(len(model.trainable_variables))\n\nmodel.summary()\n\nsteps_per_epoch=tf.math.ceil(len(all_image_paths)/BATCH_SIZE).numpy()\n#print(steps_per_epoch)\n\n#model.fit(ds, epochs=1, steps_per_epoch=3)\n\n\n\ndef make_generator_model():\n model = tf.keras.Sequential()\n model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Reshape((7, 7, 256)))\n assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size\n\n model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))\n assert model.output_shape == (None, 7, 7, 128)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))\n assert model.output_shape == (None, 14, 14, 64)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))\n assert model.output_shape == (None, 28, 28, 1)\n\n return model\n\ngenerator = make_generator_model()\n\nnoise = tf.random.normal([1, 100])\ngenerated_image = generator(noise, training=False)\n\nplt.imshow(generated_image[0, :, :, 0], cmap='gray')\n\ndef make_discriminator_model():\n model = tf.keras.Sequential()\n model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',\n input_shape=[28, 28, 1]))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(1))\n\n return model\n\ndiscriminator = make_discriminator_model()\ndecision = discriminator(generated_image)\nprint (decision)\n\n\n# This method returns a helper function to compute cross entropy loss\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\ndef discriminator_loss(real_output, fake_output):\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n\ndef generator_loss(fake_output):\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n\ngenerator_optimizer = tf.keras.optimizers.Adam(1e-4)\ndiscriminator_optimizer = tf.keras.optimizers.Adam(1e-4)\n\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n generator=generator,\n discriminator=discriminator)\n\n\nEPOCHS = 50\nnoise_dim = 100\nnum_examples_to_generate = 16\n\n# We will reuse this seed overtime (so it's easier)\n# to visualize progress in the animated GIF)\nseed = tf.random.normal([num_examples_to_generate, noise_dim])\n\n# Notice the use of `tf.function`\n# This annotation causes the function to be \"compiled\".\[email protected]\ndef train_step(images):\n noise = tf.random.normal([BATCH_SIZE, noise_dim])\n\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True)\n\n real_output = discriminator(images, training=True)\n fake_output = discriminator(generated_images, training=True)\n\n gen_loss = generator_loss(fake_output)\n disc_loss = discriminator_loss(real_output, fake_output)\n\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n\n generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\n\ndef train(dataset, epochs):\n for epoch in range(epochs):\n start = time.time()\n\n for image_batch in dataset:\n train_step(image_batch)\n\n # Produce images for the GIF as we go\n display.clear_output(wait=True)\n generate_and_save_images(generator,\n epoch + 1,\n seed)\n\n # Save the model every 15 epochs\n if (epoch + 1) % 15 == 0:\n checkpoint.save(file_prefix = checkpoint_prefix)\n\n print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))\n\n # Generate after the final epoch\n display.clear_output(wait=True)\n generate_and_save_images(generator,\n epochs,\n seed)\n\n\ndef generate_and_save_images(model, epoch, test_input):\n # Notice `training` is set to False.\n # This is so all layers run in inference mode (batchnorm).\n predictions = model(test_input, training=False)\n\n fig = plt.figure(figsize=(4,4))\n\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i+1)\n plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n plt.axis('off')\n\n plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n plt.show()\n\n#time\ntrain(ds, EPOCHS)",
"step-ids": [
8,
10,
12,
13,
14
]
}
|
[
8,
10,
12,
13,
14
] |
class Book:
"""Class that defines book model."""
def __init__(self, title, authors, pub_year):
self.title = title
self.authors = authors
self.pub_year = pub_year
|
normal
|
{
"blob_id": "14345a8c4e20d84dfc87476d890f59530a8f4d96",
"index": 7237,
"step-1": "<mask token>\n",
"step-2": "class Book:\n <mask token>\n <mask token>\n",
"step-3": "class Book:\n <mask token>\n\n def __init__(self, title, authors, pub_year):\n self.title = title\n self.authors = authors\n self.pub_year = pub_year\n",
"step-4": "class Book:\n \"\"\"Class that defines book model.\"\"\"\n\n def __init__(self, title, authors, pub_year):\n self.title = title\n self.authors = authors\n self.pub_year = pub_year\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import unittest
import hospital.employee.nurse as n
class TestNurse(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('Start testing nurse')
def setUp(self):
self.n1 = n.Nurse('Tess',18,"5436890982",3200,25)
self.n2 = n.Nurse('Melissa',40,"8920953924",9000,5)
def test_init(self):
self.assertEqual(self.n1.name,"Tess")
self.assertEqual(self.n1.age,18)
self.assertEqual(self.n1.phone_num,"5436890982")
self.assertEqual(self.n1.salary,3200)
self.assertEqual(self.n1.number_treated,25)
def test_display(self):
self.assertEqual(self.n1.display(),"Nurse {} is {} years old. \nThe best number to reach out is {}. \nThe nurse's salary is {}. \nThe nurse has treated {} patients.\n".format('Tess',18,"5436890982",3200,25))
def test_change_in_phone_num(self):
self.n1.change_in_phone_num("1234567890")
self.n2.change_in_phone_num("0987654321")
self.assertEqual(self.n1.phone_num,"1234567890")
self.assertEqual(self.n2.phone_num,"0987654321")
self.n1.change_in_phone_num("3254678313")
self.n2.change_in_phone_num("0928495820")
self.assertEqual(self.n1.phone_num,"3254678313")
self.assertEqual(self.n2.phone_num,"0928495820")
def test_change_in_salary(self):
self.n1.change_in_salary(9000)
self.n2.change_in_salary(10000)
self.assertEqual(self.n1.salary,9000)
self.assertEqual(self.n1.change_in_salary(-50),"Invalid salary.")
self.assertEqual(self.n2.salary,10000)
self.n1.change_in_salary(20)
self.assertEqual(self.n1.salary,20)
def test_bonus(self):
self.n1.bonus()
self.n2.bonus()
self.assertEqual(self.n1.salary,3450)
self.assertEqual(self.n2.salary,9050)
def tearDown(self):
self.n1 = None
self.n2 = None
@classmethod
def tearDownClass(cls):
print("Finish test nurse")
unittest.main(argv=[''], verbosity=2, exit=False)
|
normal
|
{
"blob_id": "f24075ea70851ce95bb6b3cd87b6417f8141d546",
"index": 9112,
"step-1": "<mask token>\n\n\nclass TestNurse(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)\n self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)\n <mask token>\n\n def test_display(self):\n self.assertEqual(self.n1.display(),\n \"\"\"Nurse {} is {} years old. \nThe best number to reach out is {}. \nThe nurse's salary is {}. \nThe nurse has treated {} patients.\n\"\"\"\n .format('Tess', 18, '5436890982', 3200, 25))\n\n def test_change_in_phone_num(self):\n self.n1.change_in_phone_num('1234567890')\n self.n2.change_in_phone_num('0987654321')\n self.assertEqual(self.n1.phone_num, '1234567890')\n self.assertEqual(self.n2.phone_num, '0987654321')\n self.n1.change_in_phone_num('3254678313')\n self.n2.change_in_phone_num('0928495820')\n self.assertEqual(self.n1.phone_num, '3254678313')\n self.assertEqual(self.n2.phone_num, '0928495820')\n\n def test_change_in_salary(self):\n self.n1.change_in_salary(9000)\n self.n2.change_in_salary(10000)\n self.assertEqual(self.n1.salary, 9000)\n self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')\n self.assertEqual(self.n2.salary, 10000)\n self.n1.change_in_salary(20)\n self.assertEqual(self.n1.salary, 20)\n\n def test_bonus(self):\n self.n1.bonus()\n self.n2.bonus()\n self.assertEqual(self.n1.salary, 3450)\n self.assertEqual(self.n2.salary, 9050)\n\n def tearDown(self):\n self.n1 = None\n self.n2 = None\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestNurse(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)\n self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)\n <mask token>\n\n def test_display(self):\n self.assertEqual(self.n1.display(),\n \"\"\"Nurse {} is {} years old. \nThe best number to reach out is {}. \nThe nurse's salary is {}. \nThe nurse has treated {} patients.\n\"\"\"\n .format('Tess', 18, '5436890982', 3200, 25))\n\n def test_change_in_phone_num(self):\n self.n1.change_in_phone_num('1234567890')\n self.n2.change_in_phone_num('0987654321')\n self.assertEqual(self.n1.phone_num, '1234567890')\n self.assertEqual(self.n2.phone_num, '0987654321')\n self.n1.change_in_phone_num('3254678313')\n self.n2.change_in_phone_num('0928495820')\n self.assertEqual(self.n1.phone_num, '3254678313')\n self.assertEqual(self.n2.phone_num, '0928495820')\n\n def test_change_in_salary(self):\n self.n1.change_in_salary(9000)\n self.n2.change_in_salary(10000)\n self.assertEqual(self.n1.salary, 9000)\n self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')\n self.assertEqual(self.n2.salary, 10000)\n self.n1.change_in_salary(20)\n self.assertEqual(self.n1.salary, 20)\n\n def test_bonus(self):\n self.n1.bonus()\n self.n2.bonus()\n self.assertEqual(self.n1.salary, 3450)\n self.assertEqual(self.n2.salary, 9050)\n\n def tearDown(self):\n self.n1 = None\n self.n2 = None\n\n @classmethod\n def tearDownClass(cls):\n print('Finish test nurse')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestNurse(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)\n self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)\n\n def test_init(self):\n self.assertEqual(self.n1.name, 'Tess')\n self.assertEqual(self.n1.age, 18)\n self.assertEqual(self.n1.phone_num, '5436890982')\n self.assertEqual(self.n1.salary, 3200)\n self.assertEqual(self.n1.number_treated, 25)\n\n def test_display(self):\n self.assertEqual(self.n1.display(),\n \"\"\"Nurse {} is {} years old. \nThe best number to reach out is {}. \nThe nurse's salary is {}. \nThe nurse has treated {} patients.\n\"\"\"\n .format('Tess', 18, '5436890982', 3200, 25))\n\n def test_change_in_phone_num(self):\n self.n1.change_in_phone_num('1234567890')\n self.n2.change_in_phone_num('0987654321')\n self.assertEqual(self.n1.phone_num, '1234567890')\n self.assertEqual(self.n2.phone_num, '0987654321')\n self.n1.change_in_phone_num('3254678313')\n self.n2.change_in_phone_num('0928495820')\n self.assertEqual(self.n1.phone_num, '3254678313')\n self.assertEqual(self.n2.phone_num, '0928495820')\n\n def test_change_in_salary(self):\n self.n1.change_in_salary(9000)\n self.n2.change_in_salary(10000)\n self.assertEqual(self.n1.salary, 9000)\n self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')\n self.assertEqual(self.n2.salary, 10000)\n self.n1.change_in_salary(20)\n self.assertEqual(self.n1.salary, 20)\n\n def test_bonus(self):\n self.n1.bonus()\n self.n2.bonus()\n self.assertEqual(self.n1.salary, 3450)\n self.assertEqual(self.n2.salary, 9050)\n\n def tearDown(self):\n self.n1 = None\n self.n2 = None\n\n @classmethod\n def tearDownClass(cls):\n print('Finish test nurse')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestNurse(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n print('Start testing nurse')\n\n def setUp(self):\n self.n1 = n.Nurse('Tess', 18, '5436890982', 3200, 25)\n self.n2 = n.Nurse('Melissa', 40, '8920953924', 9000, 5)\n\n def test_init(self):\n self.assertEqual(self.n1.name, 'Tess')\n self.assertEqual(self.n1.age, 18)\n self.assertEqual(self.n1.phone_num, '5436890982')\n self.assertEqual(self.n1.salary, 3200)\n self.assertEqual(self.n1.number_treated, 25)\n\n def test_display(self):\n self.assertEqual(self.n1.display(),\n \"\"\"Nurse {} is {} years old. \nThe best number to reach out is {}. \nThe nurse's salary is {}. \nThe nurse has treated {} patients.\n\"\"\"\n .format('Tess', 18, '5436890982', 3200, 25))\n\n def test_change_in_phone_num(self):\n self.n1.change_in_phone_num('1234567890')\n self.n2.change_in_phone_num('0987654321')\n self.assertEqual(self.n1.phone_num, '1234567890')\n self.assertEqual(self.n2.phone_num, '0987654321')\n self.n1.change_in_phone_num('3254678313')\n self.n2.change_in_phone_num('0928495820')\n self.assertEqual(self.n1.phone_num, '3254678313')\n self.assertEqual(self.n2.phone_num, '0928495820')\n\n def test_change_in_salary(self):\n self.n1.change_in_salary(9000)\n self.n2.change_in_salary(10000)\n self.assertEqual(self.n1.salary, 9000)\n self.assertEqual(self.n1.change_in_salary(-50), 'Invalid salary.')\n self.assertEqual(self.n2.salary, 10000)\n self.n1.change_in_salary(20)\n self.assertEqual(self.n1.salary, 20)\n\n def test_bonus(self):\n self.n1.bonus()\n self.n2.bonus()\n self.assertEqual(self.n1.salary, 3450)\n self.assertEqual(self.n2.salary, 9050)\n\n def tearDown(self):\n self.n1 = None\n self.n2 = None\n\n @classmethod\n def tearDownClass(cls):\n print('Finish test nurse')\n\n\nunittest.main(argv=[''], verbosity=2, exit=False)\n",
"step-5": "import unittest\nimport hospital.employee.nurse as n\n\nclass TestNurse(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n print('Start testing nurse')\n \n def setUp(self):\n self.n1 = n.Nurse('Tess',18,\"5436890982\",3200,25)\n self.n2 = n.Nurse('Melissa',40,\"8920953924\",9000,5)\n\n def test_init(self):\n self.assertEqual(self.n1.name,\"Tess\")\n self.assertEqual(self.n1.age,18)\n self.assertEqual(self.n1.phone_num,\"5436890982\")\n self.assertEqual(self.n1.salary,3200)\n self.assertEqual(self.n1.number_treated,25)\n\n def test_display(self):\n self.assertEqual(self.n1.display(),\"Nurse {} is {} years old. \\nThe best number to reach out is {}. \\nThe nurse's salary is {}. \\nThe nurse has treated {} patients.\\n\".format('Tess',18,\"5436890982\",3200,25))\n\n def test_change_in_phone_num(self):\n self.n1.change_in_phone_num(\"1234567890\")\n self.n2.change_in_phone_num(\"0987654321\")\n self.assertEqual(self.n1.phone_num,\"1234567890\")\n self.assertEqual(self.n2.phone_num,\"0987654321\")\n self.n1.change_in_phone_num(\"3254678313\")\n self.n2.change_in_phone_num(\"0928495820\")\n self.assertEqual(self.n1.phone_num,\"3254678313\")\n self.assertEqual(self.n2.phone_num,\"0928495820\")\n\n def test_change_in_salary(self):\n self.n1.change_in_salary(9000)\n self.n2.change_in_salary(10000)\n self.assertEqual(self.n1.salary,9000)\n self.assertEqual(self.n1.change_in_salary(-50),\"Invalid salary.\")\n self.assertEqual(self.n2.salary,10000)\n self.n1.change_in_salary(20)\n self.assertEqual(self.n1.salary,20)\n\n def test_bonus(self):\n self.n1.bonus()\n self.n2.bonus()\n self.assertEqual(self.n1.salary,3450)\n self.assertEqual(self.n2.salary,9050)\n \n\n def tearDown(self):\n self.n1 = None\n self.n2 = None\n \n @classmethod\n def tearDownClass(cls):\n print(\"Finish test nurse\")\n\nunittest.main(argv=[''], verbosity=2, exit=False)\n",
"step-ids": [
7,
8,
9,
11,
13
]
}
|
[
7,
8,
9,
11,
13
] |
from django.contrib import admin
from lesson.models import ProgrammingEnvironment, Language, Lesson, LessonHint
# list_display - Show these fields for each model on the Admin site
# search_fields - Allow searching in these fields
# Register models for the Admin site
class ProgrammingEnvironmentAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('environment_name', 'description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LanguageAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('language_name', 'description', 'environment')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('lesson_number', 'lesson_title', 'language', 'lesson_description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonHintAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('hint_title', 'lesson', 'hint_description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(ProgrammingEnvironment, ProgrammingEnvironmentAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(Lesson, LessonAdmin)
admin.site.register(LessonHint, LessonHintAdmin)
|
normal
|
{
"blob_id": "2500c3562819e4e85ce3cbc30e0ddf1b8437e0a2",
"index": 6448,
"step-1": "<mask token>\n\n\nclass LanguageAdmin(admin.ModelAdmin):\n <mask token>\n list_display = 'language_name', 'description', 'environment'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('lesson_number', 'lesson_title', 'language',\n 'lesson_description')\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonHintAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'hint_title', 'lesson', 'hint_description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LanguageAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'language_name', 'description', 'environment'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('lesson_number', 'lesson_title', 'language',\n 'lesson_description')\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonHintAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'hint_title', 'lesson', 'hint_description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProgrammingEnvironmentAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'environment_name', 'description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LanguageAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'language_name', 'description', 'environment'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('lesson_number', 'lesson_title', 'language',\n 'lesson_description')\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonHintAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'hint_title', 'lesson', 'hint_description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\n<mask token>\n",
"step-4": "from django.contrib import admin\nfrom lesson.models import ProgrammingEnvironment, Language, Lesson, LessonHint\n\n\nclass ProgrammingEnvironmentAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'environment_name', 'description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LanguageAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'language_name', 'description', 'environment'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('lesson_number', 'lesson_title', 'language',\n 'lesson_description')\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonHintAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'hint_title', 'lesson', 'hint_description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nadmin.site.register(ProgrammingEnvironment, ProgrammingEnvironmentAdmin)\nadmin.site.register(Language, LanguageAdmin)\nadmin.site.register(Lesson, LessonAdmin)\nadmin.site.register(LessonHint, LessonHintAdmin)\n",
"step-5": "from django.contrib import admin\nfrom lesson.models import ProgrammingEnvironment, Language, Lesson, LessonHint\n\n# list_display - Show these fields for each model on the Admin site\n# search_fields - Allow searching in these fields\n\n# Register models for the Admin site\nclass ProgrammingEnvironmentAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('environment_name', 'description')\n\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\nclass LanguageAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('language_name', 'description', 'environment')\n\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\nclass LessonAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('lesson_number', 'lesson_title', 'language', 'lesson_description')\n\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\nclass LessonHintAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('hint_title', 'lesson', 'hint_description')\n\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\nadmin.site.register(ProgrammingEnvironment, ProgrammingEnvironmentAdmin)\nadmin.site.register(Language, LanguageAdmin)\nadmin.site.register(Lesson, LessonAdmin)\nadmin.site.register(LessonHint, LessonHintAdmin)",
"step-ids": [
8,
9,
12,
14,
15
]
}
|
[
8,
9,
12,
14,
15
] |
from datetime import datetime
import struct
BEACON_LENGTH = 84
EPS_LENGTH = 20
COM_LENGTH = 10
# reverse engineered
ADCS1_LENGTH = 7
ADCS2_LENGTH = 6
AIS_LENGTH = 20
class EPS(object):
def __init__(self, eps_data):
if len(eps_data) != EPS_LENGTH:
raise InputException(len(eps_data), EPS_LENGTH)
self.boot_count, self.uptime, self.rt_clock, self.ping_status, self.subsystem_status,\
self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,\
self.temp, self.pa_temp, self.main_voltage = struct.unpack(">HIIBHBbbBbbb", eps_data)
self.battery_voltage *= 40
self.cell_diff *= 4
self.battery_current *= 10
self.solar_power *= 20
def __str__(self):
eps_str = ("""EPS:
Boot count:\t\t{0}
Up time:\t\t{1} seconds
Real time clock:\t{2}
Battery voltage:\t{3} mV
Cell difference:\t{4:.1f} mV
Battery current:\t{5} mA
Solar power:\t\t{6}
Temperature:\t\t{7} C
PA temperature:\t\t{8} C""".format(
self.boot_count, self.uptime, datetime.fromtimestamp(self.rt_clock),
self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,
self.temp, self.pa_temp))
return eps_str
class COM(object):
def __init__(self, com_data):
self.boot_count, self.packets_received, self.packets_send, self.latest_rssi,\
self.latest_bit_correction, self.latest_byte_correction = \
struct.unpack(">HHHhBB", com_data)
self.boot_count &= 0x1fff
def __str__(self):
com_str = ("""COM:
Boot count:\t\t{0}
Packets received:\t{1}
Packets send:\t\t{2}
Latest rssi:\t\t{3}
Latest bit corrections:\t{4}
Latest byte corrections:{5}""".format(
self.boot_count, self.packets_received, self.packets_send,
self.latest_rssi, self.latest_bit_correction, self.latest_byte_correction))
return com_str
# Reverse engineered classes
class ADCS1(object):
def __init__(self, adcs1_data):
data = struct.unpack(">hhhB", adcs1_data)
self.bdot = tuple(data[0:3])
self.state = data[3]
def __str__(self):
adcs1_str = ("""ADCS1:
State:\t{}
Bdot:\t{}""".format(self.state, self.bdot))
return adcs1_str
class ADCS2(object):
def __init__(self, adcs2_data):
self.gyro = tuple(struct.unpack(">hhh", adcs2_data))
def __str__(self):
adcs2_str = ("""ADCS2:
Gyro:\t{}""".format(self.gyro))
return adcs2_str
class AIS(object):
def __init__(self, ais_data):
# there are some fields which apparently are 0 all the time
# this fields can't be identified by reverse engineering
self.boot_count, _, _, self.unique_mssi, _ = struct.unpack(">HhhH12s", ais_data)
def __str__(self):
ais_str = ("""AIS:
Boot count:\t{}
Unique MSSI:\t{}""".format(self.boot_count, self.unique_mssi))
return ais_str
## Beacon
# The beacon class takes a string of bytes as input, and parses it to generate
# a representation of the beacon format used by AASUAT4
# The beacon format is as follows:
# [ 1 byte | 19 bytes | 12 bytes | 7 bytes | 6 bytes | 20 bytes | 20 bytes ]
# [ Valid | EPS | COM | ADCS1 | ADCS2 | AIS1 | AIS2 ]
# This is not correct EPS is 20 bytes and COM is 10 bytes
# The remaining fields seem to have the correct length
#
# For each subsystem, which are valid, are the corresponding data bytes passed to another
# class which parses the information.
#
# The __str__ method returns a human readable string with key information from the beacon
class Beacon(object):
def __init__(self, raw_data):
if len(raw_data) != BEACON_LENGTH:
raise ValueError("Malformed beacon (incorrect length)")
self.subsystems = {}
valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw = \
struct.unpack(("B"+"{}s"*6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data)
# reverse engineered valid bits
# EPS and COM are known from university team code
# valid byte is usually 0x27
# in DK3WN's blog we see that EPS, COM, AIS2 and ADCS1 are valid
eps_valid = valid & (1 << 0)
com_valid = valid & (1 << 1)
adcs1_valid = valid & (1 << 2)
adcs2_valid = valid & (1 << 3)
ais1_valid = valid & (1 << 4)
ais2_valid = valid & (1 << 5)
if eps_valid:
self.subsystems['EPS'] = EPS(eps_raw)
if com_valid:
self.subsystems['COM'] = COM(com_raw)
if adcs1_valid:
self.subsystems['ADCS1'] = ADCS1(adcs1_raw)
if adcs2_valid:
self.subsystems['ADCS2'] = ADCS2(adcs2_raw)
if ais1_valid:
self.subsystems['AIS1'] = AIS(ais1_raw)
if ais2_valid:
self.subsystems['AIS2'] = AIS(ais2_raw)
def __str__(self):
beacon_str = ""
for k,v in self.subsystems.items():
beacon_str += str(v) + "\n"
return beacon_str
|
normal
|
{
"blob_id": "505689803c8f4490619ab1a7579fde1e2c18c538",
"index": 5532,
"step-1": "<mask token>\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n <mask token>\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n",
"step-2": "<mask token>\n\n\nclass ADCS1(object):\n <mask token>\n <mask token>\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n\n def __str__(self):\n adcs2_str = \"\"\"ADCS2:\n Gyro:\t{}\"\"\".format(self.gyro)\n return adcs2_str\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n",
"step-3": "<mask token>\nBEACON_LENGTH = 84\nEPS_LENGTH = 20\nCOM_LENGTH = 10\nADCS1_LENGTH = 7\nADCS2_LENGTH = 6\nAIS_LENGTH = 20\n\n\nclass EPS(object):\n\n def __init__(self, eps_data):\n if len(eps_data) != EPS_LENGTH:\n raise InputException(len(eps_data), EPS_LENGTH)\n (self.boot_count, self.uptime, self.rt_clock, self.ping_status,\n self.subsystem_status, self.battery_voltage, self.cell_diff,\n self.battery_current, self.solar_power, self.temp, self.pa_temp,\n self.main_voltage) = struct.unpack('>HIIBHBbbBbbb', eps_data)\n self.battery_voltage *= 40\n self.cell_diff *= 4\n self.battery_current *= 10\n self.solar_power *= 20\n\n def __str__(self):\n eps_str = (\n \"\"\"EPS:\n Boot count:\t\t{0}\n Up time:\t\t{1} seconds\n Real time clock:\t{2}\n Battery voltage:\t{3} mV\n Cell difference:\t{4:.1f} mV\n Battery current:\t{5} mA\n Solar power:\t\t{6}\n Temperature:\t\t{7} C\n PA temperature:\t\t{8} C\"\"\"\n .format(self.boot_count, self.uptime, datetime.fromtimestamp(\n self.rt_clock), self.battery_voltage, self.cell_diff, self.\n battery_current, self.solar_power, self.temp, self.pa_temp))\n return eps_str\n\n\nclass COM(object):\n\n def __init__(self, com_data):\n (self.boot_count, self.packets_received, self.packets_send, self.\n latest_rssi, self.latest_bit_correction, self.\n latest_byte_correction) = struct.unpack('>HHHhBB', com_data)\n self.boot_count &= 8191\n\n def __str__(self):\n com_str = (\n \"\"\"COM:\n Boot count:\t\t{0}\n Packets received:\t{1}\n Packets send:\t\t{2}\n Latest rssi:\t\t{3}\n Latest bit corrections:\t{4}\n Latest byte corrections:{5}\"\"\"\n .format(self.boot_count, self.packets_received, self.\n packets_send, self.latest_rssi, self.latest_bit_correction,\n self.latest_byte_correction))\n return com_str\n\n\nclass ADCS1(object):\n\n def __init__(self, adcs1_data):\n data = struct.unpack('>hhhB', adcs1_data)\n self.bdot = tuple(data[0:3])\n self.state = data[3]\n\n def __str__(self):\n adcs1_str = \"\"\"ADCS1:\n State:\t{}\n Bdot:\t{}\"\"\".format(self\n .state, self.bdot)\n return adcs1_str\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n\n def __str__(self):\n adcs2_str = \"\"\"ADCS2:\n Gyro:\t{}\"\"\".format(self.gyro)\n return adcs2_str\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n",
"step-4": "from datetime import datetime\nimport struct\nBEACON_LENGTH = 84\nEPS_LENGTH = 20\nCOM_LENGTH = 10\nADCS1_LENGTH = 7\nADCS2_LENGTH = 6\nAIS_LENGTH = 20\n\n\nclass EPS(object):\n\n def __init__(self, eps_data):\n if len(eps_data) != EPS_LENGTH:\n raise InputException(len(eps_data), EPS_LENGTH)\n (self.boot_count, self.uptime, self.rt_clock, self.ping_status,\n self.subsystem_status, self.battery_voltage, self.cell_diff,\n self.battery_current, self.solar_power, self.temp, self.pa_temp,\n self.main_voltage) = struct.unpack('>HIIBHBbbBbbb', eps_data)\n self.battery_voltage *= 40\n self.cell_diff *= 4\n self.battery_current *= 10\n self.solar_power *= 20\n\n def __str__(self):\n eps_str = (\n \"\"\"EPS:\n Boot count:\t\t{0}\n Up time:\t\t{1} seconds\n Real time clock:\t{2}\n Battery voltage:\t{3} mV\n Cell difference:\t{4:.1f} mV\n Battery current:\t{5} mA\n Solar power:\t\t{6}\n Temperature:\t\t{7} C\n PA temperature:\t\t{8} C\"\"\"\n .format(self.boot_count, self.uptime, datetime.fromtimestamp(\n self.rt_clock), self.battery_voltage, self.cell_diff, self.\n battery_current, self.solar_power, self.temp, self.pa_temp))\n return eps_str\n\n\nclass COM(object):\n\n def __init__(self, com_data):\n (self.boot_count, self.packets_received, self.packets_send, self.\n latest_rssi, self.latest_bit_correction, self.\n latest_byte_correction) = struct.unpack('>HHHhBB', com_data)\n self.boot_count &= 8191\n\n def __str__(self):\n com_str = (\n \"\"\"COM:\n Boot count:\t\t{0}\n Packets received:\t{1}\n Packets send:\t\t{2}\n Latest rssi:\t\t{3}\n Latest bit corrections:\t{4}\n Latest byte corrections:{5}\"\"\"\n .format(self.boot_count, self.packets_received, self.\n packets_send, self.latest_rssi, self.latest_bit_correction,\n self.latest_byte_correction))\n return com_str\n\n\nclass ADCS1(object):\n\n def __init__(self, adcs1_data):\n data = struct.unpack('>hhhB', adcs1_data)\n self.bdot = tuple(data[0:3])\n self.state = data[3]\n\n def __str__(self):\n adcs1_str = \"\"\"ADCS1:\n State:\t{}\n Bdot:\t{}\"\"\".format(self\n .state, self.bdot)\n return adcs1_str\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n\n def __str__(self):\n adcs2_str = \"\"\"ADCS2:\n Gyro:\t{}\"\"\".format(self.gyro)\n return adcs2_str\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n",
"step-5": "from datetime import datetime\nimport struct\n\nBEACON_LENGTH = 84\nEPS_LENGTH = 20\nCOM_LENGTH = 10\n\n# reverse engineered\nADCS1_LENGTH = 7\nADCS2_LENGTH = 6\nAIS_LENGTH = 20\n\nclass EPS(object):\n def __init__(self, eps_data):\n if len(eps_data) != EPS_LENGTH:\n raise InputException(len(eps_data), EPS_LENGTH)\n\n self.boot_count, self.uptime, self.rt_clock, self.ping_status, self.subsystem_status,\\\n self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,\\\n self.temp, self.pa_temp, self.main_voltage = struct.unpack(\">HIIBHBbbBbbb\", eps_data)\n\n self.battery_voltage *= 40\n self.cell_diff *= 4\n self.battery_current *= 10\n self.solar_power *= 20\n\n def __str__(self):\n eps_str = (\"\"\"EPS:\n Boot count:\\t\\t{0}\n Up time:\\t\\t{1} seconds\n Real time clock:\\t{2}\n Battery voltage:\\t{3} mV\n Cell difference:\\t{4:.1f} mV\n Battery current:\\t{5} mA\n Solar power:\\t\\t{6}\n Temperature:\\t\\t{7} C\n PA temperature:\\t\\t{8} C\"\"\".format(\n self.boot_count, self.uptime, datetime.fromtimestamp(self.rt_clock),\n self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,\n self.temp, self.pa_temp))\n\n return eps_str\n\n\nclass COM(object):\n def __init__(self, com_data):\n self.boot_count, self.packets_received, self.packets_send, self.latest_rssi,\\\n self.latest_bit_correction, self.latest_byte_correction = \\\n struct.unpack(\">HHHhBB\", com_data)\n\n self.boot_count &= 0x1fff\n \n def __str__(self):\n com_str = (\"\"\"COM:\n Boot count:\\t\\t{0}\n Packets received:\\t{1}\n Packets send:\\t\\t{2}\n Latest rssi:\\t\\t{3}\n Latest bit corrections:\\t{4}\n Latest byte corrections:{5}\"\"\".format(\n self.boot_count, self.packets_received, self.packets_send,\n self.latest_rssi, self.latest_bit_correction, self.latest_byte_correction))\n\n return com_str\n\n# Reverse engineered classes\nclass ADCS1(object):\n def __init__(self, adcs1_data):\n data = struct.unpack(\">hhhB\", adcs1_data)\n self.bdot = tuple(data[0:3])\n self.state = data[3]\n\n def __str__(self):\n adcs1_str = (\"\"\"ADCS1:\n State:\\t{}\n Bdot:\\t{}\"\"\".format(self.state, self.bdot))\n\n return adcs1_str\n\nclass ADCS2(object):\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack(\">hhh\", adcs2_data))\n\n def __str__(self):\n adcs2_str = (\"\"\"ADCS2:\n Gyro:\\t{}\"\"\".format(self.gyro))\n\n return adcs2_str\n\nclass AIS(object):\n def __init__(self, ais_data):\n # there are some fields which apparently are 0 all the time\n # this fields can't be identified by reverse engineering\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack(\">HhhH12s\", ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\\t{}\n Unique MSSI:\\t{}\"\"\".format(self.boot_count, self.unique_mssi))\n\n return ais_str\n\n## Beacon\n# The beacon class takes a string of bytes as input, and parses it to generate\n# a representation of the beacon format used by AASUAT4\n# The beacon format is as follows:\n\n\n# [ 1 byte | 19 bytes | 12 bytes | 7 bytes | 6 bytes | 20 bytes | 20 bytes ]\n# [ Valid | EPS | COM | ADCS1 | ADCS2 | AIS1 | AIS2 ]\n# This is not correct EPS is 20 bytes and COM is 10 bytes\n# The remaining fields seem to have the correct length\n\n#\n# For each subsystem, which are valid, are the corresponding data bytes passed to another\n# class which parses the information.\n#\n# The __str__ method returns a human readable string with key information from the beacon\nclass Beacon(object):\n \n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError(\"Malformed beacon (incorrect length)\")\n\n self.subsystems = {}\n\n valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw = \\\n struct.unpack((\"B\"+\"{}s\"*6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data)\n\n # reverse engineered valid bits\n # EPS and COM are known from university team code\n # valid byte is usually 0x27\n # in DK3WN's blog we see that EPS, COM, AIS2 and ADCS1 are valid\n eps_valid = valid & (1 << 0)\n com_valid = valid & (1 << 1)\n adcs1_valid = valid & (1 << 2)\n adcs2_valid = valid & (1 << 3)\n ais1_valid = valid & (1 << 4)\n ais2_valid = valid & (1 << 5)\n \n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n \n def __str__(self):\n beacon_str = \"\"\n for k,v in self.subsystems.items():\n beacon_str += str(v) + \"\\n\"\n return beacon_str\n\n",
"step-ids": [
8,
10,
19,
20,
21
]
}
|
[
8,
10,
19,
20,
21
] |
"""SamsungTV Encrypted."""
import aiohttp
from aioresponses import aioresponses
import pytest
from yarl import URL
from samsungtvws.encrypted.authenticator import SamsungTVEncryptedWSAsyncAuthenticator
@pytest.mark.asyncio
async def test_authenticator(aioresponse: aioresponses) -> None:
with open("tests/fixtures/auth_pin_status.xml") as file:
aioresponse.get("http://1.2.3.4:8080/ws/apps/CloudPINPage", body=file.read())
with open("tests/fixtures/auth_pin_status.xml") as file:
aioresponse.post(
"http://1.2.3.4:8080/ws/apps/CloudPINPage",
body="http:///ws/apps/CloudPINPage/run",
)
with open("tests/fixtures/auth_empty.json") as file:
aioresponse.get(
"http://1.2.3.4:8080/ws/pairing?step=0&app_id=12345"
"&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&type=1",
body=file.read(),
)
with open("tests/fixtures/auth_generator_client_hello.json") as file:
aioresponse.post(
"http://1.2.3.4:8080/ws/pairing?step=1&app_id=12345"
"&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184",
body=file.read(),
)
with open("tests/fixtures/auth_client_ack_msg.json") as file:
aioresponse.post(
"http://1.2.3.4:8080/ws/pairing?step=2&app_id=12345"
"&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184",
body=file.read(),
)
aioresponse.delete("http://1.2.3.4:8080/ws/apps/CloudPINPage/run", body="")
authenticator = SamsungTVEncryptedWSAsyncAuthenticator(
"1.2.3.4", web_session=aiohttp.ClientSession()
)
await authenticator.start_pairing()
token = await authenticator.try_pin("0997")
assert token == "545a596ab96b289c60896255e8690288"
session_id = await authenticator.get_session_id_and_close()
assert session_id == "1"
assert len(aioresponse.requests) == 6
print(aioresponse.requests)
request = aioresponse.requests[
(
"POST",
URL(
"http://1.2.3.4:8080/ws/pairing?app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&step=1"
),
)
]
assert (
request[0].kwargs["data"]
== '{"auth_Data":{"auth_type":"SPC","GeneratorServerHello":'
'"010200000000000000008A000000063635343332317CAF9CBDC06B666D23EBC'
"A615E0666FEB2B807091BF507404DDD18329CD64A91E513DC704298CCE49C4C5"
"656C42141A696354A7145127BCD94CDD2B0D632D87E332437F86EBE5A50A1512"
"F3F54C71B791A88ECBAF562FBABE2731F27D851A764CA114DBE2C2C965DF151C"
'FC7401920FAA04636B356B97DBE1DA3A090004F81830000000000"}}'
)
request = aioresponse.requests[
(
"POST",
URL(
"http://1.2.3.4:8080/ws/pairing?app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&step=2"
),
)
]
assert (
request[0].kwargs["data"]
== '{"auth_Data":{"auth_type":"SPC","request_id":"0","ServerAckMsg":'
'"01030000000000000000145F38EAFF0F6A6FF062CA652CD6CBAD9AF1EC62470000000000"}}'
)
|
normal
|
{
"blob_id": "e1448e62020f87e315d219be97d9af84607441df",
"index": 9104,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\nasync def test_authenticator(aioresponse: aioresponses) ->None:\n with open('tests/fixtures/auth_pin_status.xml') as file:\n aioresponse.get('http://1.2.3.4:8080/ws/apps/CloudPINPage', body=\n file.read())\n with open('tests/fixtures/auth_pin_status.xml') as file:\n aioresponse.post('http://1.2.3.4:8080/ws/apps/CloudPINPage', body=\n 'http:///ws/apps/CloudPINPage/run')\n with open('tests/fixtures/auth_empty.json') as file:\n aioresponse.get(\n 'http://1.2.3.4:8080/ws/pairing?step=0&app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&type=1'\n , body=file.read())\n with open('tests/fixtures/auth_generator_client_hello.json') as file:\n aioresponse.post(\n 'http://1.2.3.4:8080/ws/pairing?step=1&app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184'\n , body=file.read())\n with open('tests/fixtures/auth_client_ack_msg.json') as file:\n aioresponse.post(\n 'http://1.2.3.4:8080/ws/pairing?step=2&app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184'\n , body=file.read())\n aioresponse.delete('http://1.2.3.4:8080/ws/apps/CloudPINPage/run', body='')\n authenticator = SamsungTVEncryptedWSAsyncAuthenticator('1.2.3.4',\n web_session=aiohttp.ClientSession())\n await authenticator.start_pairing()\n token = await authenticator.try_pin('0997')\n assert token == '545a596ab96b289c60896255e8690288'\n session_id = await authenticator.get_session_id_and_close()\n assert session_id == '1'\n assert len(aioresponse.requests) == 6\n print(aioresponse.requests)\n request = aioresponse.requests['POST', URL(\n 'http://1.2.3.4:8080/ws/pairing?app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&step=1'\n )]\n assert request[0].kwargs['data'\n ] == '{\"auth_Data\":{\"auth_type\":\"SPC\",\"GeneratorServerHello\":\"010200000000000000008A000000063635343332317CAF9CBDC06B666D23EBCA615E0666FEB2B807091BF507404DDD18329CD64A91E513DC704298CCE49C4C5656C42141A696354A7145127BCD94CDD2B0D632D87E332437F86EBE5A50A1512F3F54C71B791A88ECBAF562FBABE2731F27D851A764CA114DBE2C2C965DF151CFC7401920FAA04636B356B97DBE1DA3A090004F81830000000000\"}}'\n request = aioresponse.requests['POST', URL(\n 'http://1.2.3.4:8080/ws/pairing?app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&step=2'\n )]\n assert request[0].kwargs['data'\n ] == '{\"auth_Data\":{\"auth_type\":\"SPC\",\"request_id\":\"0\",\"ServerAckMsg\":\"01030000000000000000145F38EAFF0F6A6FF062CA652CD6CBAD9AF1EC62470000000000\"}}'\n",
"step-3": "<mask token>\nimport aiohttp\nfrom aioresponses import aioresponses\nimport pytest\nfrom yarl import URL\nfrom samsungtvws.encrypted.authenticator import SamsungTVEncryptedWSAsyncAuthenticator\n\n\[email protected]\nasync def test_authenticator(aioresponse: aioresponses) ->None:\n with open('tests/fixtures/auth_pin_status.xml') as file:\n aioresponse.get('http://1.2.3.4:8080/ws/apps/CloudPINPage', body=\n file.read())\n with open('tests/fixtures/auth_pin_status.xml') as file:\n aioresponse.post('http://1.2.3.4:8080/ws/apps/CloudPINPage', body=\n 'http:///ws/apps/CloudPINPage/run')\n with open('tests/fixtures/auth_empty.json') as file:\n aioresponse.get(\n 'http://1.2.3.4:8080/ws/pairing?step=0&app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&type=1'\n , body=file.read())\n with open('tests/fixtures/auth_generator_client_hello.json') as file:\n aioresponse.post(\n 'http://1.2.3.4:8080/ws/pairing?step=1&app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184'\n , body=file.read())\n with open('tests/fixtures/auth_client_ack_msg.json') as file:\n aioresponse.post(\n 'http://1.2.3.4:8080/ws/pairing?step=2&app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184'\n , body=file.read())\n aioresponse.delete('http://1.2.3.4:8080/ws/apps/CloudPINPage/run', body='')\n authenticator = SamsungTVEncryptedWSAsyncAuthenticator('1.2.3.4',\n web_session=aiohttp.ClientSession())\n await authenticator.start_pairing()\n token = await authenticator.try_pin('0997')\n assert token == '545a596ab96b289c60896255e8690288'\n session_id = await authenticator.get_session_id_and_close()\n assert session_id == '1'\n assert len(aioresponse.requests) == 6\n print(aioresponse.requests)\n request = aioresponse.requests['POST', URL(\n 'http://1.2.3.4:8080/ws/pairing?app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&step=1'\n )]\n assert request[0].kwargs['data'\n ] == '{\"auth_Data\":{\"auth_type\":\"SPC\",\"GeneratorServerHello\":\"010200000000000000008A000000063635343332317CAF9CBDC06B666D23EBCA615E0666FEB2B807091BF507404DDD18329CD64A91E513DC704298CCE49C4C5656C42141A696354A7145127BCD94CDD2B0D632D87E332437F86EBE5A50A1512F3F54C71B791A88ECBAF562FBABE2731F27D851A764CA114DBE2C2C965DF151CFC7401920FAA04636B356B97DBE1DA3A090004F81830000000000\"}}'\n request = aioresponse.requests['POST', URL(\n 'http://1.2.3.4:8080/ws/pairing?app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&step=2'\n )]\n assert request[0].kwargs['data'\n ] == '{\"auth_Data\":{\"auth_type\":\"SPC\",\"request_id\":\"0\",\"ServerAckMsg\":\"01030000000000000000145F38EAFF0F6A6FF062CA652CD6CBAD9AF1EC62470000000000\"}}'\n",
"step-4": "\"\"\"SamsungTV Encrypted.\"\"\"\nimport aiohttp\nfrom aioresponses import aioresponses\nimport pytest\nfrom yarl import URL\n\nfrom samsungtvws.encrypted.authenticator import SamsungTVEncryptedWSAsyncAuthenticator\n\n\[email protected]\nasync def test_authenticator(aioresponse: aioresponses) -> None:\n with open(\"tests/fixtures/auth_pin_status.xml\") as file:\n aioresponse.get(\"http://1.2.3.4:8080/ws/apps/CloudPINPage\", body=file.read())\n with open(\"tests/fixtures/auth_pin_status.xml\") as file:\n aioresponse.post(\n \"http://1.2.3.4:8080/ws/apps/CloudPINPage\",\n body=\"http:///ws/apps/CloudPINPage/run\",\n )\n with open(\"tests/fixtures/auth_empty.json\") as file:\n aioresponse.get(\n \"http://1.2.3.4:8080/ws/pairing?step=0&app_id=12345\"\n \"&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&type=1\",\n body=file.read(),\n )\n with open(\"tests/fixtures/auth_generator_client_hello.json\") as file:\n aioresponse.post(\n \"http://1.2.3.4:8080/ws/pairing?step=1&app_id=12345\"\n \"&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184\",\n body=file.read(),\n )\n with open(\"tests/fixtures/auth_client_ack_msg.json\") as file:\n aioresponse.post(\n \"http://1.2.3.4:8080/ws/pairing?step=2&app_id=12345\"\n \"&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184\",\n body=file.read(),\n )\n aioresponse.delete(\"http://1.2.3.4:8080/ws/apps/CloudPINPage/run\", body=\"\")\n\n authenticator = SamsungTVEncryptedWSAsyncAuthenticator(\n \"1.2.3.4\", web_session=aiohttp.ClientSession()\n )\n await authenticator.start_pairing()\n token = await authenticator.try_pin(\"0997\")\n assert token == \"545a596ab96b289c60896255e8690288\"\n\n session_id = await authenticator.get_session_id_and_close()\n assert session_id == \"1\"\n\n assert len(aioresponse.requests) == 6\n print(aioresponse.requests)\n\n request = aioresponse.requests[\n (\n \"POST\",\n URL(\n \"http://1.2.3.4:8080/ws/pairing?app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&step=1\"\n ),\n )\n ]\n assert (\n request[0].kwargs[\"data\"]\n == '{\"auth_Data\":{\"auth_type\":\"SPC\",\"GeneratorServerHello\":'\n '\"010200000000000000008A000000063635343332317CAF9CBDC06B666D23EBC'\n \"A615E0666FEB2B807091BF507404DDD18329CD64A91E513DC704298CCE49C4C5\"\n \"656C42141A696354A7145127BCD94CDD2B0D632D87E332437F86EBE5A50A1512\"\n \"F3F54C71B791A88ECBAF562FBABE2731F27D851A764CA114DBE2C2C965DF151C\"\n 'FC7401920FAA04636B356B97DBE1DA3A090004F81830000000000\"}}'\n )\n request = aioresponse.requests[\n (\n \"POST\",\n URL(\n \"http://1.2.3.4:8080/ws/pairing?app_id=12345&device_id=7e509404-9d7c-46b4-8f6a-e2a9668ad184&step=2\"\n ),\n )\n ]\n assert (\n request[0].kwargs[\"data\"]\n == '{\"auth_Data\":{\"auth_type\":\"SPC\",\"request_id\":\"0\",\"ServerAckMsg\":'\n '\"01030000000000000000145F38EAFF0F6A6FF062CA652CD6CBAD9AF1EC62470000000000\"}}'\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding:UTF-8 -*-
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
import os
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
import jieba_cut
import random
import cPickle
import re
outputfile = "dzsptfidf"
X_train,y_train = cPickle.load(open(os.path.join(outputfile,"train.data"),"rb"))
X_test,y_test = cPickle.load(open(os.path.join(outputfile,"test.data"),"rb"))
vectorizer = cPickle.load(open(os.path.join(outputfile,"vectorizer.data"),"rb"))
chi2 = cPickle.load(open(os.path.join(outputfile,"ch2.data"),"rb"))
clf = cPickle.load(open(os.path.join(outputfile,"SGD_l2.model"),"rb"))
#inputpath =u"E:\\项目需求\\JDPower\\分类\\4月份\\financeoutput1_final.txt"
#outputpath =u"E:\\项目需求\\JDPower\\分类\\4月份\\大宗商品.txt"
inputpath =u"E:\\项目需求\\JDPower\\分类\\5月份\\financeoutput1_final_05.txt"
outputpath =u"E:\\项目需求\\JDPower\\分类\\5月份\\大宗商品.txt"
label = "大宗商品"
forbidkword = {}
# load
forbidpath = u"..//keyword.txt"
with open(forbidpath, "rb") as f:
for line in f:
word = line.strip()
forbidkword[word] = 0
outfile = open(outputpath,"wb")
with open(inputpath, "rb") as f:
for line in f:
splits = line.strip().split("\t")
tag = splits[0]
if tag.find(label) > -1 :
print(tag)
train = []
#print (splits[-1])
seg = jieba_cut.cut(splits[-1], cut_all=False)
#seglist = [i for i in seg]
seglist = []
for w in seg:
#print w
w = w.strip().encode("utf-8")
if w not in forbidkword:
if not re.match(r"\d+$", w):
seglist.append(w)
train.append(" ".join(seglist))
X_test = vectorizer.transform(train)
X_test = chi2.transform(X_test)
pred = clf.predict(X_test)
#print(" ".join(pred))
print (pred)
lb = str(pred[0])
#print(isinstance(lb, unicode))
#print( lb.decode("gbk").encode("utf-8"))
#outfile.writelines(lb+"\n")
if lb == '1' :
outfile.writelines(line.strip()+"\t")
outfile.writelines(lb+"\n")
#outfile.writelines(line.strip()+"\t"+lb.decode("utf-8").encode("utf-8")+"\n")
outfile.close()
|
normal
|
{
"blob_id": "84a516e924252d897be7444e11acfecd66474090",
"index": 1177,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(forbidpath, 'rb') as f:\n for line in f:\n word = line.strip()\n forbidkword[word] = 0\n<mask token>\nwith open(inputpath, 'rb') as f:\n for line in f:\n splits = line.strip().split('\\t')\n tag = splits[0]\n if tag.find(label) > -1:\n print(tag)\n train = []\n seg = jieba_cut.cut(splits[-1], cut_all=False)\n seglist = []\n for w in seg:\n w = w.strip().encode('utf-8')\n if w not in forbidkword:\n if not re.match('\\\\d+$', w):\n seglist.append(w)\n train.append(' '.join(seglist))\n X_test = vectorizer.transform(train)\n X_test = chi2.transform(X_test)\n pred = clf.predict(X_test)\n print(pred)\n lb = str(pred[0])\n if lb == '1':\n outfile.writelines(line.strip() + '\\t')\n outfile.writelines(lb + '\\n')\noutfile.close()\n",
"step-3": "<mask token>\noutputfile = 'dzsptfidf'\nX_train, y_train = cPickle.load(open(os.path.join(outputfile, 'train.data'),\n 'rb'))\nX_test, y_test = cPickle.load(open(os.path.join(outputfile, 'test.data'), 'rb')\n )\nvectorizer = cPickle.load(open(os.path.join(outputfile, 'vectorizer.data'),\n 'rb'))\nchi2 = cPickle.load(open(os.path.join(outputfile, 'ch2.data'), 'rb'))\nclf = cPickle.load(open(os.path.join(outputfile, 'SGD_l2.model'), 'rb'))\ninputpath = u'E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\financeoutput1_final_05.txt'\noutputpath = u'E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\大宗商品.txt'\nlabel = '大宗商品'\nforbidkword = {}\nforbidpath = u'..//keyword.txt'\nwith open(forbidpath, 'rb') as f:\n for line in f:\n word = line.strip()\n forbidkword[word] = 0\noutfile = open(outputpath, 'wb')\nwith open(inputpath, 'rb') as f:\n for line in f:\n splits = line.strip().split('\\t')\n tag = splits[0]\n if tag.find(label) > -1:\n print(tag)\n train = []\n seg = jieba_cut.cut(splits[-1], cut_all=False)\n seglist = []\n for w in seg:\n w = w.strip().encode('utf-8')\n if w not in forbidkword:\n if not re.match('\\\\d+$', w):\n seglist.append(w)\n train.append(' '.join(seglist))\n X_test = vectorizer.transform(train)\n X_test = chi2.transform(X_test)\n pred = clf.predict(X_test)\n print(pred)\n lb = str(pred[0])\n if lb == '1':\n outfile.writelines(line.strip() + '\\t')\n outfile.writelines(lb + '\\n')\noutfile.close()\n",
"step-4": "from __future__ import print_function\nimport logging\nimport numpy as np\nfrom optparse import OptionParser\nimport sys\nfrom time import time\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import NearestCentroid\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils.extmath import density\nfrom sklearn import metrics\nimport jieba_cut\nimport random\nimport cPickle\nimport re\noutputfile = 'dzsptfidf'\nX_train, y_train = cPickle.load(open(os.path.join(outputfile, 'train.data'),\n 'rb'))\nX_test, y_test = cPickle.load(open(os.path.join(outputfile, 'test.data'), 'rb')\n )\nvectorizer = cPickle.load(open(os.path.join(outputfile, 'vectorizer.data'),\n 'rb'))\nchi2 = cPickle.load(open(os.path.join(outputfile, 'ch2.data'), 'rb'))\nclf = cPickle.load(open(os.path.join(outputfile, 'SGD_l2.model'), 'rb'))\ninputpath = u'E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\financeoutput1_final_05.txt'\noutputpath = u'E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\大宗商品.txt'\nlabel = '大宗商品'\nforbidkword = {}\nforbidpath = u'..//keyword.txt'\nwith open(forbidpath, 'rb') as f:\n for line in f:\n word = line.strip()\n forbidkword[word] = 0\noutfile = open(outputpath, 'wb')\nwith open(inputpath, 'rb') as f:\n for line in f:\n splits = line.strip().split('\\t')\n tag = splits[0]\n if tag.find(label) > -1:\n print(tag)\n train = []\n seg = jieba_cut.cut(splits[-1], cut_all=False)\n seglist = []\n for w in seg:\n w = w.strip().encode('utf-8')\n if w not in forbidkword:\n if not re.match('\\\\d+$', w):\n seglist.append(w)\n train.append(' '.join(seglist))\n X_test = vectorizer.transform(train)\n X_test = chi2.transform(X_test)\n pred = clf.predict(X_test)\n print(pred)\n lb = str(pred[0])\n if lb == '1':\n outfile.writelines(line.strip() + '\\t')\n outfile.writelines(lb + '\\n')\noutfile.close()\n",
"step-5": "# -*- coding:UTF-8 -*-\nfrom __future__ import print_function\nimport logging\nimport numpy as np\nfrom optparse import OptionParser\nimport sys\nfrom time import time\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import NearestCentroid\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils.extmath import density\nfrom sklearn import metrics\nimport jieba_cut\nimport random\nimport cPickle\nimport re\noutputfile = \"dzsptfidf\"\nX_train,y_train = cPickle.load(open(os.path.join(outputfile,\"train.data\"),\"rb\"))\nX_test,y_test = cPickle.load(open(os.path.join(outputfile,\"test.data\"),\"rb\"))\nvectorizer = cPickle.load(open(os.path.join(outputfile,\"vectorizer.data\"),\"rb\"))\nchi2 = cPickle.load(open(os.path.join(outputfile,\"ch2.data\"),\"rb\"))\nclf = cPickle.load(open(os.path.join(outputfile,\"SGD_l2.model\"),\"rb\"))\n#inputpath =u\"E:\\\\项目需求\\\\JDPower\\\\分类\\\\4月份\\\\financeoutput1_final.txt\"\n#outputpath =u\"E:\\\\项目需求\\\\JDPower\\\\分类\\\\4月份\\\\大宗商品.txt\"\n\ninputpath =u\"E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\financeoutput1_final_05.txt\"\noutputpath =u\"E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\大宗商品.txt\"\n\nlabel = \"大宗商品\"\n\nforbidkword = {}\n# load\n\nforbidpath = u\"..//keyword.txt\"\nwith open(forbidpath, \"rb\") as f:\n for line in f:\n word = line.strip()\n forbidkword[word] = 0\n\noutfile = open(outputpath,\"wb\")\nwith open(inputpath, \"rb\") as f:\n for line in f:\n splits = line.strip().split(\"\\t\")\n tag = splits[0]\n\n if tag.find(label) > -1 :\n print(tag)\n train = []\n #print (splits[-1])\n seg = jieba_cut.cut(splits[-1], cut_all=False)\n #seglist = [i for i in seg]\n seglist = []\n for w in seg:\n #print w\n w = w.strip().encode(\"utf-8\")\n if w not in forbidkword:\n if not re.match(r\"\\d+$\", w):\n seglist.append(w)\n train.append(\" \".join(seglist))\n X_test = vectorizer.transform(train)\n X_test = chi2.transform(X_test)\n pred = clf.predict(X_test)\n #print(\" \".join(pred))\n print (pred)\n lb = str(pred[0])\n #print(isinstance(lb, unicode))\n #print( lb.decode(\"gbk\").encode(\"utf-8\"))\n #outfile.writelines(lb+\"\\n\")\n if lb == '1' :\n outfile.writelines(line.strip()+\"\\t\")\n outfile.writelines(lb+\"\\n\")\n #outfile.writelines(line.strip()+\"\\t\"+lb.decode(\"utf-8\").encode(\"utf-8\")+\"\\n\")\noutfile.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
er = ['why','who','how','where','which','what','when','was','were','did','do','does','is','are','many','much']
qst = []
txt = None
ans = None
fnd = []
def chek_qst(qst):
global er
for h in er:
for i in qst:
if i == h:
qst.remove(i)
# qst = np.delete(qst, ([i for i, j in enumerate(qst) if h in j]))
return qst
def search_word(qst):
global txt
for h in qst:
temp = []
for n,l in enumerate(txt):
if [n for i,j in enumerate(l) if h in j] != []:
temp.append(n)
# temp = np.array(temp)
if temp != []:
fnd.append(temp)
def read():
global txt
global qst
global ans
txt = np.array((input().lower()).split('.'))
txt = txt.reshape(len(txt), 1)
for i in range(5):
qst.append((input().lower()).replace('?','').split())
split_quest()
qst = np.array(qst)
ans = np.array((input().lower()).split(';'))
ans = ans.reshape(len(ans), 1)
def split_quest():
for i in range(len(qst)):
qst[i] = chek_qst(qst[i])
def find_answer(fnd):
flag = False
answer = None
global ans
temp_min = []
for i in fnd:
if len(i) == 1:
answer = i[0]
# print(str(txt[answer][0]))
for i in ans:
for j in i:
if j in txt[answer][0]:
# print('from first :: ',j,'\n',answer)
print(j)
flag = True
if flag:
break
if flag:
break
if not flag:
for i in fnd:
temp_min.append(len(i))
temp_min = np.array(temp_min)
temp_min = temp_min.argmin()
# print(temp)
p = []
for i in fnd[temp_min]:
count = 0
for j,h in enumerate(fnd):
if fnd[temp_min] != h:
if i in h:
count +=1
p.append(count)
p = np.array(p)
# print('from second :: ',str(txt[fnd[temp_min][p.argmax()]][0]))
print(str(txt[fnd[temp_min][p.argmax()]][0]))
# for i in ans:
# for j in i:
# if j in txt[fnd[temp_min][p.argmax()]][0]:
# print(j)
# # break
# break
read()
for i,qst_num in enumerate(qst):
fnd = []
search_word(qst_num)
# print('\n',fnd)
find_answer(fnd)
# fnd = np.array(fnd).reshape(len(fnd))
# print('questin #{}'.format(i+1),fnd,'\n')
# print(str(txt[find_answer(fnd)][0]))
# print(ans)
# print('\n',qst)
# print('\n\n',[(i,j[0]) for i,j in enumerate(txt)])
# print('\n\n',[(i,j[0]) for i,j in enumerate(ans)])
'''Zebras are several species of African equids (horse family) united by their distinctive black and white stripes. Their stripes come in different patterns, unique to each individual. They are generally social animals that live in small harems to large herds. Unlike their closest relatives, horses and donkeys, zebras have never been truly domesticated. There are three species of zebras: the plains zebra, the Grévy's zebra and the mountain zebra. The plains zebra and the mountain zebra belong to the subgenus Hippotigris, but Grévy's zebra is the sole species of subgenus Dolichohippus. The latter resembles an ass, to which it is closely related, while the former two are more horse-like. All three belong to the genus Equus, along with other living equids. The unique stripes of zebras make them one of the animals most familiar to people. They occur in a variety of habitats, such as grasslands, savannas, woodlands, thorny scrublands, mountains, and coastal hills. However, various anthropogenic factors have had a severe impact on zebra populations, in particular hunting for skins and habitat destruction. Grévy's zebra and the mountain zebra are endangered. While plains zebras are much more plentiful, one subspecies, the quagga, became extinct in the late 19th century – though there is currently a plan, called the Quagga Project, that aims to breed zebras that are phenotypically similar to the quagga in a process called breeding back.
Which Zebras are endangered?
What is the aim of the Quagga Project?
Which animals are some of their closest relatives?
Which are the three species of zebras?
Which subgenus do the plains zebra and the mountain zebra belong to?
subgenus Hippotigris;the plains zebra, the Grévy's zebra and the mountain zebra;horses and donkeys;aims to breed zebras that are phenotypically similar to the quagga;Grévy's zebra and the mountain zebra
'''
|
normal
|
{
"blob_id": "d30129248f5245560ee0d3ee786e118427e169d7",
"index": 4616,
"step-1": "<mask token>\n\n\ndef search_word(qst):\n global txt\n for h in qst:\n temp = []\n for n, l in enumerate(txt):\n if [n for i, j in enumerate(l) if h in j] != []:\n temp.append(n)\n if temp != []:\n fnd.append(temp)\n\n\n<mask token>\n\n\ndef split_quest():\n for i in range(len(qst)):\n qst[i] = chek_qst(qst[i])\n\n\ndef find_answer(fnd):\n flag = False\n answer = None\n global ans\n temp_min = []\n for i in fnd:\n if len(i) == 1:\n answer = i[0]\n for i in ans:\n for j in i:\n if j in txt[answer][0]:\n print(j)\n flag = True\n if flag:\n break\n if flag:\n break\n if not flag:\n for i in fnd:\n temp_min.append(len(i))\n temp_min = np.array(temp_min)\n temp_min = temp_min.argmin()\n p = []\n for i in fnd[temp_min]:\n count = 0\n for j, h in enumerate(fnd):\n if fnd[temp_min] != h:\n if i in h:\n count += 1\n p.append(count)\n p = np.array(p)\n print(str(txt[fnd[temp_min][p.argmax()]][0]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef chek_qst(qst):\n global er\n for h in er:\n for i in qst:\n if i == h:\n qst.remove(i)\n return qst\n\n\ndef search_word(qst):\n global txt\n for h in qst:\n temp = []\n for n, l in enumerate(txt):\n if [n for i, j in enumerate(l) if h in j] != []:\n temp.append(n)\n if temp != []:\n fnd.append(temp)\n\n\ndef read():\n global txt\n global qst\n global ans\n txt = np.array(input().lower().split('.'))\n txt = txt.reshape(len(txt), 1)\n for i in range(5):\n qst.append(input().lower().replace('?', '').split())\n split_quest()\n qst = np.array(qst)\n ans = np.array(input().lower().split(';'))\n ans = ans.reshape(len(ans), 1)\n\n\ndef split_quest():\n for i in range(len(qst)):\n qst[i] = chek_qst(qst[i])\n\n\ndef find_answer(fnd):\n flag = False\n answer = None\n global ans\n temp_min = []\n for i in fnd:\n if len(i) == 1:\n answer = i[0]\n for i in ans:\n for j in i:\n if j in txt[answer][0]:\n print(j)\n flag = True\n if flag:\n break\n if flag:\n break\n if not flag:\n for i in fnd:\n temp_min.append(len(i))\n temp_min = np.array(temp_min)\n temp_min = temp_min.argmin()\n p = []\n for i in fnd[temp_min]:\n count = 0\n for j, h in enumerate(fnd):\n if fnd[temp_min] != h:\n if i in h:\n count += 1\n p.append(count)\n p = np.array(p)\n print(str(txt[fnd[temp_min][p.argmax()]][0]))\n\n\nread()\nfor i, qst_num in enumerate(qst):\n fnd = []\n search_word(qst_num)\n find_answer(fnd)\n<mask token>\n",
"step-3": "<mask token>\ner = ['why', 'who', 'how', 'where', 'which', 'what', 'when', 'was', 'were',\n 'did', 'do', 'does', 'is', 'are', 'many', 'much']\nqst = []\ntxt = None\nans = None\nfnd = []\n\n\ndef chek_qst(qst):\n global er\n for h in er:\n for i in qst:\n if i == h:\n qst.remove(i)\n return qst\n\n\ndef search_word(qst):\n global txt\n for h in qst:\n temp = []\n for n, l in enumerate(txt):\n if [n for i, j in enumerate(l) if h in j] != []:\n temp.append(n)\n if temp != []:\n fnd.append(temp)\n\n\ndef read():\n global txt\n global qst\n global ans\n txt = np.array(input().lower().split('.'))\n txt = txt.reshape(len(txt), 1)\n for i in range(5):\n qst.append(input().lower().replace('?', '').split())\n split_quest()\n qst = np.array(qst)\n ans = np.array(input().lower().split(';'))\n ans = ans.reshape(len(ans), 1)\n\n\ndef split_quest():\n for i in range(len(qst)):\n qst[i] = chek_qst(qst[i])\n\n\ndef find_answer(fnd):\n flag = False\n answer = None\n global ans\n temp_min = []\n for i in fnd:\n if len(i) == 1:\n answer = i[0]\n for i in ans:\n for j in i:\n if j in txt[answer][0]:\n print(j)\n flag = True\n if flag:\n break\n if flag:\n break\n if not flag:\n for i in fnd:\n temp_min.append(len(i))\n temp_min = np.array(temp_min)\n temp_min = temp_min.argmin()\n p = []\n for i in fnd[temp_min]:\n count = 0\n for j, h in enumerate(fnd):\n if fnd[temp_min] != h:\n if i in h:\n count += 1\n p.append(count)\n p = np.array(p)\n print(str(txt[fnd[temp_min][p.argmax()]][0]))\n\n\nread()\nfor i, qst_num in enumerate(qst):\n fnd = []\n search_word(qst_num)\n find_answer(fnd)\n<mask token>\n",
"step-4": "import numpy as np\ner = ['why', 'who', 'how', 'where', 'which', 'what', 'when', 'was', 'were',\n 'did', 'do', 'does', 'is', 'are', 'many', 'much']\nqst = []\ntxt = None\nans = None\nfnd = []\n\n\ndef chek_qst(qst):\n global er\n for h in er:\n for i in qst:\n if i == h:\n qst.remove(i)\n return qst\n\n\ndef search_word(qst):\n global txt\n for h in qst:\n temp = []\n for n, l in enumerate(txt):\n if [n for i, j in enumerate(l) if h in j] != []:\n temp.append(n)\n if temp != []:\n fnd.append(temp)\n\n\ndef read():\n global txt\n global qst\n global ans\n txt = np.array(input().lower().split('.'))\n txt = txt.reshape(len(txt), 1)\n for i in range(5):\n qst.append(input().lower().replace('?', '').split())\n split_quest()\n qst = np.array(qst)\n ans = np.array(input().lower().split(';'))\n ans = ans.reshape(len(ans), 1)\n\n\ndef split_quest():\n for i in range(len(qst)):\n qst[i] = chek_qst(qst[i])\n\n\ndef find_answer(fnd):\n flag = False\n answer = None\n global ans\n temp_min = []\n for i in fnd:\n if len(i) == 1:\n answer = i[0]\n for i in ans:\n for j in i:\n if j in txt[answer][0]:\n print(j)\n flag = True\n if flag:\n break\n if flag:\n break\n if not flag:\n for i in fnd:\n temp_min.append(len(i))\n temp_min = np.array(temp_min)\n temp_min = temp_min.argmin()\n p = []\n for i in fnd[temp_min]:\n count = 0\n for j, h in enumerate(fnd):\n if fnd[temp_min] != h:\n if i in h:\n count += 1\n p.append(count)\n p = np.array(p)\n print(str(txt[fnd[temp_min][p.argmax()]][0]))\n\n\nread()\nfor i, qst_num in enumerate(qst):\n fnd = []\n search_word(qst_num)\n find_answer(fnd)\n<mask token>\n",
"step-5": "import numpy as np\n\ner = ['why','who','how','where','which','what','when','was','were','did','do','does','is','are','many','much']\nqst = []\ntxt = None\nans = None\nfnd = []\n\n\ndef chek_qst(qst):\n global er\n for h in er:\n for i in qst:\n if i == h:\n qst.remove(i)\n # qst = np.delete(qst, ([i for i, j in enumerate(qst) if h in j]))\n return qst\n\ndef search_word(qst):\n global txt\n for h in qst:\n temp = []\n for n,l in enumerate(txt):\n if [n for i,j in enumerate(l) if h in j] != []:\n temp.append(n)\n # temp = np.array(temp)\n if temp != []:\n fnd.append(temp)\n\ndef read():\n global txt\n global qst\n global ans\n txt = np.array((input().lower()).split('.'))\n txt = txt.reshape(len(txt), 1)\n for i in range(5):\n qst.append((input().lower()).replace('?','').split())\n\n split_quest()\n qst = np.array(qst)\n ans = np.array((input().lower()).split(';'))\n ans = ans.reshape(len(ans), 1)\n\ndef split_quest():\n for i in range(len(qst)):\n qst[i] = chek_qst(qst[i])\n\ndef find_answer(fnd):\n flag = False\n answer = None\n global ans\n temp_min = []\n for i in fnd:\n if len(i) == 1:\n answer = i[0]\n # print(str(txt[answer][0]))\n for i in ans:\n for j in i:\n if j in txt[answer][0]:\n # print('from first :: ',j,'\\n',answer)\n print(j)\n flag = True\n if flag:\n break\n if flag:\n break\n\n if not flag:\n for i in fnd:\n temp_min.append(len(i))\n temp_min = np.array(temp_min)\n temp_min = temp_min.argmin()\n # print(temp)\n p = []\n for i in fnd[temp_min]:\n count = 0\n for j,h in enumerate(fnd):\n if fnd[temp_min] != h:\n if i in h:\n count +=1\n p.append(count)\n p = np.array(p)\n # print('from second :: ',str(txt[fnd[temp_min][p.argmax()]][0]))\n print(str(txt[fnd[temp_min][p.argmax()]][0]))\n # for i in ans:\n # for j in i:\n # if j in txt[fnd[temp_min][p.argmax()]][0]:\n # print(j)\n # # break\n # break\n\n\n\nread()\nfor i,qst_num in enumerate(qst):\n fnd = []\n search_word(qst_num)\n # print('\\n',fnd)\n find_answer(fnd)\n # fnd = np.array(fnd).reshape(len(fnd))\n # print('questin #{}'.format(i+1),fnd,'\\n')\n # print(str(txt[find_answer(fnd)][0]))\n\n# print(ans)\n# print('\\n',qst)\n# print('\\n\\n',[(i,j[0]) for i,j in enumerate(txt)])\n# print('\\n\\n',[(i,j[0]) for i,j in enumerate(ans)])\n\n\n\n\n\n'''Zebras are several species of African equids (horse family) united by their distinctive black and white stripes. Their stripes come in different patterns, unique to each individual. They are generally social animals that live in small harems to large herds. Unlike their closest relatives, horses and donkeys, zebras have never been truly domesticated. There are three species of zebras: the plains zebra, the Grévy's zebra and the mountain zebra. The plains zebra and the mountain zebra belong to the subgenus Hippotigris, but Grévy's zebra is the sole species of subgenus Dolichohippus. The latter resembles an ass, to which it is closely related, while the former two are more horse-like. All three belong to the genus Equus, along with other living equids. The unique stripes of zebras make them one of the animals most familiar to people. They occur in a variety of habitats, such as grasslands, savannas, woodlands, thorny scrublands, mountains, and coastal hills. However, various anthropogenic factors have had a severe impact on zebra populations, in particular hunting for skins and habitat destruction. Grévy's zebra and the mountain zebra are endangered. While plains zebras are much more plentiful, one subspecies, the quagga, became extinct in the late 19th century – though there is currently a plan, called the Quagga Project, that aims to breed zebras that are phenotypically similar to the quagga in a process called breeding back.\nWhich Zebras are endangered?\nWhat is the aim of the Quagga Project?\nWhich animals are some of their closest relatives?\nWhich are the three species of zebras?\nWhich subgenus do the plains zebra and the mountain zebra belong to?\nsubgenus Hippotigris;the plains zebra, the Grévy's zebra and the mountain zebra;horses and donkeys;aims to breed zebras that are phenotypically similar to the quagga;Grévy's zebra and the mountain zebra\n'''",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.utils import timezone
from timesheets.models import TimeSheet
from channels import Group
class ProjectTS(models.Model):
class Meta:
permissions = (
("approve_project_ts", "Can approve timesheet"),
)
pay_period_begin = models.DateField()
pay_period_end = models.DateField()
ambassador = models.ForeignKey(
User, related_name='project_ts_member',
limit_choices_to={'is_staff' : True})
ambassador_finalized = models.BooleanField(default=False)
final_approval = models.BooleanField(default=False)
date_submitted = models.DateTimeField(auto_now_add=True)
date_approved = models.DateTimeField(auto_now_add=True)
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default="")
project_time_sheet = models.ForeignKey(ProjectTS, related_name="project_time_sheet")
project_leader = models.ForeignKey(User, related_name="pl",
limit_choices_to={'is_staff' : True, 'groups__name' : 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default="")
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
|
normal
|
{
"blob_id": "df39a97db25f03aca8ebd501283fd6a7c486db8c",
"index": 1243,
"step-1": "<mask token>\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-2": "<mask token>\n\n\nclass ProjectTS(models.Model):\n\n\n class Meta:\n permissions = ('approve_project_ts', 'Can approve timesheet'),\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-3": "<mask token>\n\n\nclass ProjectTS(models.Model):\n\n\n class Meta:\n permissions = ('approve_project_ts', 'Can approve timesheet'),\n pay_period_begin = models.DateField()\n pay_period_end = models.DateField()\n ambassador = models.ForeignKey(User, related_name='project_ts_member',\n limit_choices_to={'is_staff': True})\n ambassador_finalized = models.BooleanField(default=False)\n final_approval = models.BooleanField(default=False)\n date_submitted = models.DateTimeField(auto_now_add=True)\n date_approved = models.DateTimeField(auto_now_add=True)\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\nfrom timesheets.models import TimeSheet\nfrom channels import Group\n\n\nclass ProjectTS(models.Model):\n\n\n class Meta:\n permissions = ('approve_project_ts', 'Can approve timesheet'),\n pay_period_begin = models.DateField()\n pay_period_end = models.DateField()\n ambassador = models.ForeignKey(User, related_name='project_ts_member',\n limit_choices_to={'is_staff': True})\n ambassador_finalized = models.BooleanField(default=False)\n final_approval = models.BooleanField(default=False)\n date_submitted = models.DateTimeField(auto_now_add=True)\n date_approved = models.DateTimeField(auto_now_add=True)\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-5": "from __future__ import unicode_literals\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\nfrom timesheets.models import TimeSheet\nfrom channels import Group\n\n\nclass ProjectTS(models.Model):\n class Meta:\n permissions = (\n (\"approve_project_ts\", \"Can approve timesheet\"),\n )\n\n pay_period_begin = models.DateField()\n pay_period_end = models.DateField()\n ambassador = models.ForeignKey(\n User, related_name='project_ts_member',\n limit_choices_to={'is_staff' : True})\n ambassador_finalized = models.BooleanField(default=False)\n final_approval = models.BooleanField(default=False)\n date_submitted = models.DateTimeField(auto_now_add=True)\n date_approved = models.DateTimeField(auto_now_add=True)\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default=\"\")\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\"project_time_sheet\")\n project_leader = models.ForeignKey(User, related_name=\"pl\",\n limit_choices_to={'is_staff' : True, 'groups__name' : 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default=\"\")\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from __future__ import absolute_import, unicode_literals
from django.db import DataError, IntegrityError, connection
import pytest
from .models import Page
pytestmark = pytest.mark.django_db
MYSQL_REASON = 'MySQL parses check constraints but are ignored by all engines'
def test_match():
Page.objects.create(url='^/[A-Z]*/$')
assert Page.objects.filter(url__match='/PATH/')
assert not Page.objects.filter(url__match='/path/')
def test_imatch():
Page.objects.create(url='^/[a-z]*/$')
assert Page.objects.filter(url__imatch='/path/')
assert Page.objects.filter(url__imatch='/PATH/')
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
@pytest.mark.parametrize('regex', ('', '.*', '.?', '[\w]*', '[\w]?'))
def test_empty_regex(regex):
with pytest.raises(IntegrityError):
Page.objects.create(url=regex)
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
|
normal
|
{
"blob_id": "96065e7e61b63f915561f117d71092e4bfb9a5da",
"index": 1149,
"step-1": "<mask token>\n\n\[email protected]('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\ndef test_invalid_regex():\n exception = IntegrityError if connection.vendor == 'sqlite' else DataError\n with pytest.raises(exception):\n Page.objects.create(url='(?P<match>.*)')\n",
"step-2": "<mask token>\n\n\ndef test_match():\n Page.objects.create(url='^/[A-Z]*/$')\n assert Page.objects.filter(url__match='/PATH/')\n assert not Page.objects.filter(url__match='/path/')\n\n\ndef test_imatch():\n Page.objects.create(url='^/[a-z]*/$')\n assert Page.objects.filter(url__imatch='/path/')\n assert Page.objects.filter(url__imatch='/PATH/')\n\n\n<mask token>\n\n\[email protected]('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\ndef test_invalid_regex():\n exception = IntegrityError if connection.vendor == 'sqlite' else DataError\n with pytest.raises(exception):\n Page.objects.create(url='(?P<match>.*)')\n",
"step-3": "<mask token>\n\n\ndef test_match():\n Page.objects.create(url='^/[A-Z]*/$')\n assert Page.objects.filter(url__match='/PATH/')\n assert not Page.objects.filter(url__match='/path/')\n\n\ndef test_imatch():\n Page.objects.create(url='^/[a-z]*/$')\n assert Page.objects.filter(url__imatch='/path/')\n assert Page.objects.filter(url__imatch='/PATH/')\n\n\[email protected]('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\[email protected]('regex', ('', '.*', '.?', '[\\\\w]*', '[\\\\w]?'))\ndef test_empty_regex(regex):\n with pytest.raises(IntegrityError):\n Page.objects.create(url=regex)\n\n\[email protected]('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\ndef test_invalid_regex():\n exception = IntegrityError if connection.vendor == 'sqlite' else DataError\n with pytest.raises(exception):\n Page.objects.create(url='(?P<match>.*)')\n",
"step-4": "<mask token>\npytestmark = pytest.mark.django_db\nMYSQL_REASON = 'MySQL parses check constraints but are ignored by all engines'\n\n\ndef test_match():\n Page.objects.create(url='^/[A-Z]*/$')\n assert Page.objects.filter(url__match='/PATH/')\n assert not Page.objects.filter(url__match='/path/')\n\n\ndef test_imatch():\n Page.objects.create(url='^/[a-z]*/$')\n assert Page.objects.filter(url__imatch='/path/')\n assert Page.objects.filter(url__imatch='/PATH/')\n\n\[email protected]('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\[email protected]('regex', ('', '.*', '.?', '[\\\\w]*', '[\\\\w]?'))\ndef test_empty_regex(regex):\n with pytest.raises(IntegrityError):\n Page.objects.create(url=regex)\n\n\[email protected]('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\ndef test_invalid_regex():\n exception = IntegrityError if connection.vendor == 'sqlite' else DataError\n with pytest.raises(exception):\n Page.objects.create(url='(?P<match>.*)')\n",
"step-5": "from __future__ import absolute_import, unicode_literals\n\nfrom django.db import DataError, IntegrityError, connection\n\nimport pytest\n\nfrom .models import Page\n\npytestmark = pytest.mark.django_db\n\nMYSQL_REASON = 'MySQL parses check constraints but are ignored by all engines'\n\n\ndef test_match():\n Page.objects.create(url='^/[A-Z]*/$')\n assert Page.objects.filter(url__match='/PATH/')\n assert not Page.objects.filter(url__match='/path/')\n\n\ndef test_imatch():\n Page.objects.create(url='^/[a-z]*/$')\n assert Page.objects.filter(url__imatch='/path/')\n assert Page.objects.filter(url__imatch='/PATH/')\n\n\[email protected]('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\[email protected]('regex', ('', '.*', '.?', '[\\w]*', '[\\w]?'))\ndef test_empty_regex(regex):\n with pytest.raises(IntegrityError):\n Page.objects.create(url=regex)\n\n\[email protected]('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\ndef test_invalid_regex():\n exception = IntegrityError if connection.vendor == 'sqlite' else DataError\n with pytest.raises(exception):\n Page.objects.create(url='(?P<match>.*)')\n",
"step-ids": [
1,
3,
4,
5,
7
]
}
|
[
1,
3,
4,
5,
7
] |
import requests
import urllib.request
from utilities.read_write_utilities import read_set,write_to_csv
import time
from bs4 import BeautifulSoup
import pickledb
import json
import glob
import csv
drugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')
print(drugs)
output_records = []
# fields = ["equiv_name","coupon_network","npi","default_quantity","price_type","scrape_date","price","root","dosage",
# "generic","drug_id","date","form_name","ncpdp","pharmacy","geo","slug","quantity"]
fields = ["equiv_name","default_quantity","root","dosage","generic","drug_id","form_name","slug"]
for drug in drugs:
# print('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s'%drug)
with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s'%drug) as json_file:
for record in json.load(json_file):
# print(record)
output_records.append({field:str(record[field]) if field in record else '' for field in fields})
write_to_csv('/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',output_records)
# filename = '/Users/sdey/Downloads/privia_utilization_data.csv'
# output_filename = '/Users/sdey/Downloads/privia_utilization_raw_data.csv'
#
# with open(filename, 'r') as input_file:
# with open(output_filename, 'w') as output_file:
# reader = csv.DictReader(input_file)
# writer = csv.DictWriter(output_file, fieldnames=reader.fieldnames)
# writer.writeheader()
# number_of_lines = 0
# for row in reader:
# row['Medication Name'] = row['Medication Name'].replace(',',':')
# writer.writerow(row)
# number_of_lines+=1
# if number_of_lines % 10000 == 0 :
# print('%d lines'%number_of_lines)
#
# filename = '/Users/sandeep.dey/Downloads/pricing_nadac_cost_20190515.csv'
# output_filename = '/Users/sandeep.dey/Downloads/pricing_nadac_cost_20190515_output.csv'
#
# with open(filename, 'r') as input_file:
# with open(output_filename, 'w') as output_file:
# reader = csv.DictReader(input_file)
# fieldnames = ['ndc','nadac_per_unit','effective_date','pricing_unit','otc',
# 'explanation_code','classification_for_rate_setting','corresponding_generic_drug_nadac_per_unit',
# 'corresponding_generic_drug_effective_date','as_of_date']
# writer = csv.DictWriter(output_file, fieldnames=fieldnames)
# writer.writeheader()
# number_of_lines = 0
# for row in reader:
# row['explanation_code'] = row['explanation_code'].replace('\"','').replace(',','').replace(' ','')
# row.pop('ndc_description')
# row.pop('pharmacy_type_indicator')
# writer.writerow(row)
# number_of_lines+=1
# if number_of_lines % 10000 == 0 :
# print('%d lines'%number_of_lines)
|
normal
|
{
"blob_id": "e7f511b97f316157a768203afe9f36ea834ebb6c",
"index": 5493,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(drugs)\n<mask token>\nfor drug in drugs:\n with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s' % drug\n ) as json_file:\n for record in json.load(json_file):\n output_records.append({field: (str(record[field]) if field in\n record else '') for field in fields})\nwrite_to_csv(\n '/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',\n output_records)\n",
"step-3": "<mask token>\ndrugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')\nprint(drugs)\noutput_records = []\nfields = ['equiv_name', 'default_quantity', 'root', 'dosage', 'generic',\n 'drug_id', 'form_name', 'slug']\nfor drug in drugs:\n with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s' % drug\n ) as json_file:\n for record in json.load(json_file):\n output_records.append({field: (str(record[field]) if field in\n record else '') for field in fields})\nwrite_to_csv(\n '/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',\n output_records)\n",
"step-4": "import requests\nimport urllib.request\nfrom utilities.read_write_utilities import read_set, write_to_csv\nimport time\nfrom bs4 import BeautifulSoup\nimport pickledb\nimport json\nimport glob\nimport csv\ndrugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')\nprint(drugs)\noutput_records = []\nfields = ['equiv_name', 'default_quantity', 'root', 'dosage', 'generic',\n 'drug_id', 'form_name', 'slug']\nfor drug in drugs:\n with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s' % drug\n ) as json_file:\n for record in json.load(json_file):\n output_records.append({field: (str(record[field]) if field in\n record else '') for field in fields})\nwrite_to_csv(\n '/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',\n output_records)\n",
"step-5": "import requests\nimport urllib.request\nfrom utilities.read_write_utilities import read_set,write_to_csv\nimport time\nfrom bs4 import BeautifulSoup\nimport pickledb\nimport json\nimport glob\nimport csv\n\n\ndrugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')\nprint(drugs)\noutput_records = []\n# fields = [\"equiv_name\",\"coupon_network\",\"npi\",\"default_quantity\",\"price_type\",\"scrape_date\",\"price\",\"root\",\"dosage\",\n# \"generic\",\"drug_id\",\"date\",\"form_name\",\"ncpdp\",\"pharmacy\",\"geo\",\"slug\",\"quantity\"]\n\nfields = [\"equiv_name\",\"default_quantity\",\"root\",\"dosage\",\"generic\",\"drug_id\",\"form_name\",\"slug\"]\n\nfor drug in drugs:\n # print('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s'%drug)\n with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s'%drug) as json_file:\n for record in json.load(json_file):\n # print(record)\n output_records.append({field:str(record[field]) if field in record else '' for field in fields})\nwrite_to_csv('/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',output_records)\n\n\n# filename = '/Users/sdey/Downloads/privia_utilization_data.csv'\n# output_filename = '/Users/sdey/Downloads/privia_utilization_raw_data.csv'\n#\n# with open(filename, 'r') as input_file:\n# with open(output_filename, 'w') as output_file:\n# reader = csv.DictReader(input_file)\n# writer = csv.DictWriter(output_file, fieldnames=reader.fieldnames)\n# writer.writeheader()\n# number_of_lines = 0\n# for row in reader:\n# row['Medication Name'] = row['Medication Name'].replace(',',':')\n# writer.writerow(row)\n# number_of_lines+=1\n# if number_of_lines % 10000 == 0 :\n# print('%d lines'%number_of_lines)\n\n#\n# filename = '/Users/sandeep.dey/Downloads/pricing_nadac_cost_20190515.csv'\n# output_filename = '/Users/sandeep.dey/Downloads/pricing_nadac_cost_20190515_output.csv'\n#\n# with open(filename, 'r') as input_file:\n# with open(output_filename, 'w') as output_file:\n# reader = csv.DictReader(input_file)\n# fieldnames = ['ndc','nadac_per_unit','effective_date','pricing_unit','otc',\n# 'explanation_code','classification_for_rate_setting','corresponding_generic_drug_nadac_per_unit',\n# 'corresponding_generic_drug_effective_date','as_of_date']\n# writer = csv.DictWriter(output_file, fieldnames=fieldnames)\n# writer.writeheader()\n# number_of_lines = 0\n# for row in reader:\n# row['explanation_code'] = row['explanation_code'].replace('\\\"','').replace(',','').replace(' ','')\n# row.pop('ndc_description')\n# row.pop('pharmacy_type_indicator')\n# writer.writerow(row)\n# number_of_lines+=1\n# if number_of_lines % 10000 == 0 :\n# print('%d lines'%number_of_lines)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import ccxt
import json
import time
from baglanti import mysql_baglan
import datetime
import requests
from urllib.parse import urljoin
import sys
db = mysql_baglan("bingo")
cursor = db.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
sql = "SELECT apikey,secret,id FROM `users` WHERE status = '1' order by id desc"
cursor.execute(sql)
results = cursor.fetchall()
column_names = ['apikey', 'secret', 'id']
for row in results:
user = dict(zip(column_names, row))
print(user['id'])
exchange = ccxt.binance({
'apiKey': user['apikey'],
'secret': user['secret'],
'enableRateLimit': True
})
#BTC
if exchange.has['fetchDeposits']:
withdrawals = exchange.fetch_withdrawals()
set_data = []
for withdraw in withdrawals:
date_time = int(withdraw['timestamp'])/1000
date_time = datetime.datetime.fromtimestamp(date_time)
set_data.append([user['id'], withdraw['currency'], withdraw['txid'], withdraw['address'], withdraw['type'], withdraw['amount'], withdraw['status'], withdraw['fee']['cost'],date_time])
sqlguncelleme = "INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)"
cursor.executemany(sqlguncelleme, set_data,)
db.commit()
withdrawals = exchange.fetch_deposits()
set_data = []
for withdraw in withdrawals:
date_time = int(withdraw['timestamp'])/1000
date_time = datetime.datetime.fromtimestamp(date_time)
set_data.append([user['id'], withdraw['currency'], withdraw['txid'], withdraw['address'], withdraw['type'], withdraw['amount'], withdraw['status'], '0',date_time])
sqlguncelleme = "INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)"
cursor.executemany(sqlguncelleme, set_data,)
db.commit()
|
normal
|
{
"blob_id": "1d29ce58ca626155d626216fbbd70d7b241efa25",
"index": 6363,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncursor.execute('SET NAMES utf8;')\ncursor.execute('SET CHARACTER SET utf8;')\ncursor.execute('SET character_set_connection=utf8;')\n<mask token>\ncursor.execute(sql)\n<mask token>\nfor row in results:\n user = dict(zip(column_names, row))\n print(user['id'])\n exchange = ccxt.binance({'apiKey': user['apikey'], 'secret': user[\n 'secret'], 'enableRateLimit': True})\n if exchange.has['fetchDeposits']:\n withdrawals = exchange.fetch_withdrawals()\n set_data = []\n for withdraw in withdrawals:\n date_time = int(withdraw['timestamp']) / 1000\n date_time = datetime.datetime.fromtimestamp(date_time)\n set_data.append([user['id'], withdraw['currency'], withdraw[\n 'txid'], withdraw['address'], withdraw['type'], withdraw[\n 'amount'], withdraw['status'], withdraw['fee']['cost'],\n date_time])\n sqlguncelleme = (\n 'INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)'\n )\n cursor.executemany(sqlguncelleme, set_data)\n db.commit()\n withdrawals = exchange.fetch_deposits()\n set_data = []\n for withdraw in withdrawals:\n date_time = int(withdraw['timestamp']) / 1000\n date_time = datetime.datetime.fromtimestamp(date_time)\n set_data.append([user['id'], withdraw['currency'], withdraw[\n 'txid'], withdraw['address'], withdraw['type'], withdraw[\n 'amount'], withdraw['status'], '0', date_time])\n sqlguncelleme = (\n 'INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)'\n )\n cursor.executemany(sqlguncelleme, set_data)\n db.commit()\n",
"step-3": "<mask token>\ndb = mysql_baglan('bingo')\ncursor = db.cursor()\ncursor.execute('SET NAMES utf8;')\ncursor.execute('SET CHARACTER SET utf8;')\ncursor.execute('SET character_set_connection=utf8;')\nsql = (\n \"SELECT apikey,secret,id FROM `users` WHERE status = '1' order by id desc\")\ncursor.execute(sql)\nresults = cursor.fetchall()\ncolumn_names = ['apikey', 'secret', 'id']\nfor row in results:\n user = dict(zip(column_names, row))\n print(user['id'])\n exchange = ccxt.binance({'apiKey': user['apikey'], 'secret': user[\n 'secret'], 'enableRateLimit': True})\n if exchange.has['fetchDeposits']:\n withdrawals = exchange.fetch_withdrawals()\n set_data = []\n for withdraw in withdrawals:\n date_time = int(withdraw['timestamp']) / 1000\n date_time = datetime.datetime.fromtimestamp(date_time)\n set_data.append([user['id'], withdraw['currency'], withdraw[\n 'txid'], withdraw['address'], withdraw['type'], withdraw[\n 'amount'], withdraw['status'], withdraw['fee']['cost'],\n date_time])\n sqlguncelleme = (\n 'INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)'\n )\n cursor.executemany(sqlguncelleme, set_data)\n db.commit()\n withdrawals = exchange.fetch_deposits()\n set_data = []\n for withdraw in withdrawals:\n date_time = int(withdraw['timestamp']) / 1000\n date_time = datetime.datetime.fromtimestamp(date_time)\n set_data.append([user['id'], withdraw['currency'], withdraw[\n 'txid'], withdraw['address'], withdraw['type'], withdraw[\n 'amount'], withdraw['status'], '0', date_time])\n sqlguncelleme = (\n 'INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)'\n )\n cursor.executemany(sqlguncelleme, set_data)\n db.commit()\n",
"step-4": "import ccxt\nimport json\nimport time\nfrom baglanti import mysql_baglan\nimport datetime\nimport requests\nfrom urllib.parse import urljoin\nimport sys\ndb = mysql_baglan('bingo')\ncursor = db.cursor()\ncursor.execute('SET NAMES utf8;')\ncursor.execute('SET CHARACTER SET utf8;')\ncursor.execute('SET character_set_connection=utf8;')\nsql = (\n \"SELECT apikey,secret,id FROM `users` WHERE status = '1' order by id desc\")\ncursor.execute(sql)\nresults = cursor.fetchall()\ncolumn_names = ['apikey', 'secret', 'id']\nfor row in results:\n user = dict(zip(column_names, row))\n print(user['id'])\n exchange = ccxt.binance({'apiKey': user['apikey'], 'secret': user[\n 'secret'], 'enableRateLimit': True})\n if exchange.has['fetchDeposits']:\n withdrawals = exchange.fetch_withdrawals()\n set_data = []\n for withdraw in withdrawals:\n date_time = int(withdraw['timestamp']) / 1000\n date_time = datetime.datetime.fromtimestamp(date_time)\n set_data.append([user['id'], withdraw['currency'], withdraw[\n 'txid'], withdraw['address'], withdraw['type'], withdraw[\n 'amount'], withdraw['status'], withdraw['fee']['cost'],\n date_time])\n sqlguncelleme = (\n 'INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)'\n )\n cursor.executemany(sqlguncelleme, set_data)\n db.commit()\n withdrawals = exchange.fetch_deposits()\n set_data = []\n for withdraw in withdrawals:\n date_time = int(withdraw['timestamp']) / 1000\n date_time = datetime.datetime.fromtimestamp(date_time)\n set_data.append([user['id'], withdraw['currency'], withdraw[\n 'txid'], withdraw['address'], withdraw['type'], withdraw[\n 'amount'], withdraw['status'], '0', date_time])\n sqlguncelleme = (\n 'INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)'\n )\n cursor.executemany(sqlguncelleme, set_data)\n db.commit()\n",
"step-5": "import ccxt\r\nimport json\r\nimport time\r\nfrom baglanti import mysql_baglan\r\nimport datetime\r\nimport requests\r\nfrom urllib.parse import urljoin\r\nimport sys\r\n\r\ndb = mysql_baglan(\"bingo\")\r\ncursor = db.cursor()\r\ncursor.execute('SET NAMES utf8;')\r\ncursor.execute('SET CHARACTER SET utf8;')\r\ncursor.execute('SET character_set_connection=utf8;')\r\n\r\nsql = \"SELECT apikey,secret,id FROM `users` WHERE status = '1' order by id desc\"\r\ncursor.execute(sql)\r\nresults = cursor.fetchall()\r\ncolumn_names = ['apikey', 'secret', 'id']\r\nfor row in results:\r\n user = dict(zip(column_names, row))\r\n print(user['id'])\r\n exchange = ccxt.binance({\r\n 'apiKey': user['apikey'],\r\n 'secret': user['secret'],\r\n 'enableRateLimit': True\r\n })\r\n\r\n #BTC\r\n if exchange.has['fetchDeposits']:\r\n withdrawals = exchange.fetch_withdrawals()\r\n set_data = []\r\n for withdraw in withdrawals:\r\n date_time = int(withdraw['timestamp'])/1000\r\n date_time = datetime.datetime.fromtimestamp(date_time)\r\n \r\n set_data.append([user['id'], withdraw['currency'], withdraw['txid'], withdraw['address'], withdraw['type'], withdraw['amount'], withdraw['status'], withdraw['fee']['cost'],date_time])\r\n sqlguncelleme = \"INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)\"\r\n cursor.executemany(sqlguncelleme, set_data,)\r\n db.commit()\r\n\r\n withdrawals = exchange.fetch_deposits()\r\n set_data = []\r\n for withdraw in withdrawals:\r\n date_time = int(withdraw['timestamp'])/1000\r\n date_time = datetime.datetime.fromtimestamp(date_time)\r\n \r\n set_data.append([user['id'], withdraw['currency'], withdraw['txid'], withdraw['address'], withdraw['type'], withdraw['amount'], withdraw['status'], '0',date_time])\r\n sqlguncelleme = \"INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)\"\r\n cursor.executemany(sqlguncelleme, set_data,)\r\n db.commit()\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from app.routes import home
from .home import bp as home
from .dashboard import bp as dashboard
|
normal
|
{
"blob_id": "358a4948ac1f60e0966328cebf401777042c3d0e",
"index": 5239,
"step-1": "<mask token>\n",
"step-2": "from app.routes import home\nfrom .home import bp as home\nfrom .dashboard import bp as dashboard\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# encoding=utf-8
from lib.calculate_time import tic,toc
import scipy as sp
import numpy as np
from lib.make_A import make_A
from lib.make_distance import make_distance
from lib.lambda_sum_smallest import lambda_sum_smallest
from lib.fiedler import fiedler
from lib.make_al import make_al
import math
from lib.newmatrix import newmatrix
from lib.grComp import gr_comp
from lib.Divide2 import Divide2
def mainFunctionD2( Ad, vertex_num, edge_num, nodes_G, iter_times, group):
s = []
s.append(vertex_num)
if (vertex_num == 3 or edge_num < 5 or iter_times > 4 ):
print "something strange in mainfuntiond2"
return
iter=1
tic()
#the transposed matrix of Adjacent matrix
#邻接矩阵补全下三角!
size_of_Ad = Ad.shape
transposed_Ad = np.transpose(Ad)
for i in range(size_of_Ad[0]):
for j in range(size_of_Ad[1]):
Ad[i][j] = Ad[i][j] or transposed_Ad[i][j]
#得出A 有1和-1的34行78列的矩阵
A = make_A(Ad, vertex_num, edge_num)
transposed_A = np.transpose(A)
#列求和得出度矩阵B 34行一列 看成一个列表就行
B = sum(Ad)
#构造一个78*5的距离矩阵
Distance = make_distance(Ad, A, vertex_num, edge_num, B)
#变量POS记录Distance中第五行中最大值所在的位置
max_list = []
for each in Distance[:,4]:
max_list.append(each)
Pos = max_list.index(max(max_list)) + 1
#把度矩阵展开成全矩阵,并且构造拉普拉斯矩阵
D = np.diag(B)
L = np.dot(A, transposed_A)
W = Ad
L1 = D - W
cutIndexSign = 0
#构造x为L的升序特征值矩阵
eig_val,eig_vec = np.linalg.eig(L)
eig_val_list = []
for each in eig_val:
eig_val_list.append(each)
eig_val_list = sorted(eig_val_list)
x = np.array(eig_val_list)
x = np.diag(x)
#构造Q得L的正交规范化矩阵(求矩阵正交基)
Q = sp.linalg.orth(L)
#求L的费德勒向量:第二小特征值的特征向量
v = fiedler(L)
#找特征向量的特征值
lambda2 = lambda_sum_smallest(L,2)
print "ECCEM"
print "切割第"+str(iter)+"次"
#t为算法运行的时间,写入time中
t=toc()
with open("/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/time.txt","a") as f:
f.write(str(t)+"\n")
f.close()
#求第三小的lambda
lambda3 = lambda_sum_smallest(L,3)-lambda2
aa = (v[int(Distance[Pos - 1][0])-1] - v[int(Distance[Pos - 1][1])-1]) ** 2
b1 = 1 + (2 - aa) / (lambda2 - lambda3)
low = lambda2 - aa / b1
#矩阵U是Q的转置和al的积
al = make_al(vertex_num,Distance[Pos-1][0],Distance[Pos-1][1])
transposed_Q = np.transpose(Q)
u = np.dot(transposed_Q,al)
with open("/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/out.txt","a") as f:
f.write(str(lambda2)+"\n")
f.close()
while(lambda2>math.exp(-23)):
cutIndexSigen = 1
if( vertex_num == 1 or edge_num < 3):
break
#将矩阵中的信息A,edge_num,B进行刷新
result_list = newmatrix(Distance, A, edge_num, B, Pos)
A = result_list[0]
edge_num = result_list[1]
B = result_list[2]
Distance = make_distance(Ad, A, vertex_num, edge_num, B)
max_list = []
for each in Distance[:,4]:
max_list.append(each)
Pos = max_list.index(max(max_list)) + 1
iter = iter + 1
print "切割第" + str(iter) + "次"
D = np.diag(B)
transposed_A = np.transpose(A)
L = np.dot(A, transposed_A)
v = fiedler(L)
#有结点取为零直接跳出循环
list_B = []
for each in B:
list_B.append(each)
if(0 in list_B):
print "Distance_size[0]有节点度为0的孤立节点跳出了循环"
break
lambda2 = lambda_sum_smallest(L, 2)
#写一次时间
t=toc()
with open("/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/time.txt","a") as f:
f.write(str(t) + "\n")
f.close()
lambda3 = lambda_sum_smallest(L,3)-lambda2
a1 = (v[int(Distance[Pos - 1][0])-1] - v[int(Distance[Pos - 1][1])-1]) ** 2
b1 = 1 + (2 - a1) / (lambda2 - lambda3)
low = lambda2 - a1 / b1
with open("/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/out.txt","a") as f:
f.write(str(lambda2) + "\n")
f.close()
#构造comMatrix 就是Distance的前两行
Distance_size = Distance.shape
compMatrix = np.arange(Distance_size[0]*2).reshape(Distance_size[0],2)
i = 0
for each in Distance[:,0]:
compMatrix[i][0] = each
i = i + 1
j = 0
for each in Distance[:,1]:
compMatrix[j][1] = each
j = j + 1
ncV = gr_comp(compMatrix,vertex_num)
s.append(group)
s.append(iter_times)
with open("/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_group/out.txt","a") as f:
f.write(str(s)+"\n")
f.closed
nodes_G = np.transpose(nodes_G)
result_list_of_Divide2 = D
|
normal
|
{
"blob_id": "77d545d1a4fc5f96ae19f654a32ab75707434d46",
"index": 7614,
"step-1": "# encoding=utf-8\nfrom lib.calculate_time import tic,toc\nimport scipy as sp\nimport numpy as np\nfrom lib.make_A import make_A\nfrom lib.make_distance import make_distance\nfrom lib.lambda_sum_smallest import lambda_sum_smallest\nfrom lib.fiedler import fiedler\nfrom lib.make_al import make_al\nimport math\nfrom lib.newmatrix import newmatrix\nfrom lib.grComp import gr_comp\nfrom lib.Divide2 import Divide2\n\ndef mainFunctionD2( Ad, vertex_num, edge_num, nodes_G, iter_times, group):\n s = []\n s.append(vertex_num)\n if (vertex_num == 3 or edge_num < 5 or iter_times > 4 ):\n print \"something strange in mainfuntiond2\"\n return\n iter=1\n tic()\n #the transposed matrix of Adjacent matrix\n #邻接矩阵补全下三角!\n size_of_Ad = Ad.shape\n transposed_Ad = np.transpose(Ad)\n for i in range(size_of_Ad[0]):\n for j in range(size_of_Ad[1]):\n Ad[i][j] = Ad[i][j] or transposed_Ad[i][j]\n #得出A 有1和-1的34行78列的矩阵\n A = make_A(Ad, vertex_num, edge_num)\n transposed_A = np.transpose(A)\n #列求和得出度矩阵B 34行一列 看成一个列表就行\n B = sum(Ad)\n #构造一个78*5的距离矩阵\n Distance = make_distance(Ad, A, vertex_num, edge_num, B)\n #变量POS记录Distance中第五行中最大值所在的位置\n max_list = []\n for each in Distance[:,4]:\n max_list.append(each)\n Pos = max_list.index(max(max_list)) + 1\n #把度矩阵展开成全矩阵,并且构造拉普拉斯矩阵\n D = np.diag(B)\n L = np.dot(A, transposed_A)\n W = Ad\n L1 = D - W\n cutIndexSign = 0\n #构造x为L的升序特征值矩阵\n eig_val,eig_vec = np.linalg.eig(L)\n eig_val_list = []\n for each in eig_val:\n eig_val_list.append(each)\n eig_val_list = sorted(eig_val_list)\n x = np.array(eig_val_list)\n x = np.diag(x)\n #构造Q得L的正交规范化矩阵(求矩阵正交基)\n Q = sp.linalg.orth(L)\n #求L的费德勒向量:第二小特征值的特征向量\n v = fiedler(L)\n #找特征向量的特征值\n lambda2 = lambda_sum_smallest(L,2)\n print \"ECCEM\"\n print \"切割第\"+str(iter)+\"次\"\n #t为算法运行的时间,写入time中\n t=toc()\n with open(\"/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/time.txt\",\"a\") as f:\n f.write(str(t)+\"\\n\")\n f.close()\n #求第三小的lambda\n lambda3 = lambda_sum_smallest(L,3)-lambda2\n aa = (v[int(Distance[Pos - 1][0])-1] - v[int(Distance[Pos - 1][1])-1]) ** 2\n\n b1 = 1 + (2 - aa) / (lambda2 - lambda3)\n low = lambda2 - aa / b1\n #矩阵U是Q的转置和al的积\n al = make_al(vertex_num,Distance[Pos-1][0],Distance[Pos-1][1])\n transposed_Q = np.transpose(Q)\n u = np.dot(transposed_Q,al)\n with open(\"/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/out.txt\",\"a\") as f:\n f.write(str(lambda2)+\"\\n\")\n f.close()\n\n while(lambda2>math.exp(-23)):\n cutIndexSigen = 1\n if( vertex_num == 1 or edge_num < 3):\n break\n #将矩阵中的信息A,edge_num,B进行刷新\n\n result_list = newmatrix(Distance, A, edge_num, B, Pos)\n A = result_list[0]\n edge_num = result_list[1]\n B = result_list[2]\n Distance = make_distance(Ad, A, vertex_num, edge_num, B)\n max_list = []\n for each in Distance[:,4]:\n max_list.append(each)\n Pos = max_list.index(max(max_list)) + 1\n iter = iter + 1\n print \"切割第\" + str(iter) + \"次\"\n D = np.diag(B)\n transposed_A = np.transpose(A)\n L = np.dot(A, transposed_A)\n v = fiedler(L)\n\n #有结点取为零直接跳出循环\n list_B = []\n for each in B:\n list_B.append(each)\n if(0 in list_B):\n print \"Distance_size[0]有节点度为0的孤立节点跳出了循环\"\n break\n lambda2 = lambda_sum_smallest(L, 2)\n #写一次时间\n t=toc()\n\n with open(\"/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/time.txt\",\"a\") as f:\n f.write(str(t) + \"\\n\")\n f.close()\n lambda3 = lambda_sum_smallest(L,3)-lambda2\n a1 = (v[int(Distance[Pos - 1][0])-1] - v[int(Distance[Pos - 1][1])-1]) ** 2\n b1 = 1 + (2 - a1) / (lambda2 - lambda3)\n low = lambda2 - a1 / b1\n with open(\"/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_data/out.txt\",\"a\") as f:\n f.write(str(lambda2) + \"\\n\")\n f.close()\n #构造comMatrix 就是Distance的前两行\n Distance_size = Distance.shape\n compMatrix = np.arange(Distance_size[0]*2).reshape(Distance_size[0],2)\n i = 0\n for each in Distance[:,0]:\n compMatrix[i][0] = each\n i = i + 1\n j = 0\n for each in Distance[:,1]:\n compMatrix[j][1] = each\n j = j + 1\n ncV = gr_comp(compMatrix,vertex_num)\n s.append(group)\n s.append(iter_times)\n with open(\"/home/a/PycharmProjects/TestZhu/tjufe_1/Social_Network_Graph/output_group/out.txt\",\"a\") as f:\n f.write(str(s)+\"\\n\")\n f.closed\n nodes_G = np.transpose(nodes_G)\n\n\n\n\n\n\n\n\n result_list_of_Divide2 = D\n\n\n\n\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def prime_sieve(n):
if n==2: return [2]
elif n<2: return []
s=range(3,n+1,2)
mroot = n ** 0.5
half=(n+1)/2-1
i=0
m=3
while m <= mroot:
if s[i]:
j=(m*m-3)/2
s[j]=0
while j<half:
s[j]=0
j+=m
i=i+1
m=2*i+3
return [2]+[x for x in s if x]
ps = prime_sieve(1000000)
def get_primes_upto(n):
i = 0
while ps[i] <= n:
i += 1
return ps[0:i+1];
def trial_division(n):
if n == 1: return [1]
primes = get_primes_upto(int(n**0.5) + 1)
prime_factors = []
for p in primes:
if p*p > n: break
while n % p == 0:
prime_factors.append(p)
n //= p
if n > 1: prime_factors.append(n)
return prime_factors
def unique_factors(n):
return len(set(trial_division(n)))
fs = [0]
c = 0
for i in range(1,1000000):
c+= 1
fs.append(unique_factors(i))
if len(fs) > 4:
if fs[-4:] == [4,4,4,4]:
print c -3
break
|
normal
|
{
"blob_id": "5771f49ad5254588f1683a8d45aa81ce472bb562",
"index": 30,
"step-1": "\ndef prime_sieve(n): \n\tif n==2: return [2]\n\telif n<2: return []\n\ts=range(3,n+1,2)\n\tmroot = n ** 0.5\n\thalf=(n+1)/2-1\n\ti=0\n\tm=3\n\twhile m <= mroot:\n\t\tif s[i]:\n\t\t\tj=(m*m-3)/2\n\t\t\ts[j]=0\n\t\t\twhile j<half:\n\t\t\t\ts[j]=0\n\t\t\t\tj+=m\n\t\ti=i+1\n\t\tm=2*i+3\n\treturn [2]+[x for x in s if x]\n\nps = prime_sieve(1000000)\n\ndef get_primes_upto(n):\n i = 0\n while ps[i] <= n:\n i += 1\n return ps[0:i+1];\n\ndef trial_division(n):\n if n == 1: return [1]\n primes = get_primes_upto(int(n**0.5) + 1)\n prime_factors = []\n \n for p in primes:\n if p*p > n: break\n while n % p == 0:\n prime_factors.append(p)\n n //= p\n if n > 1: prime_factors.append(n)\n \n return prime_factors\n\ndef unique_factors(n):\n return len(set(trial_division(n)))\n\nfs = [0]\nc = 0\nfor i in range(1,1000000):\n c+= 1\n fs.append(unique_factors(i))\n if len(fs) > 4:\n if fs[-4:] == [4,4,4,4]:\n print c -3\n break\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import bs4
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import pandas as pd
import time
from urllib.request import Request
import requests
import json
import re
import sys
def compare(mystring):
def usd_to_ngn():
print("Getting USD to NGN Rate")
req = requests.get("http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63")
req.raise_for_status()
res = str(req.content)[2:-1]
res = json.loads(res)
rate = float(res['results']['USD_NGN']['val'])
return rate
def amazon(mystring):
search_term = mystring.replace(" ", "+")
header = {'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'}
html = Request("https://www.amazon.com/s?k={}&ref=nb_sb_noss_1".format(search_term), headers=header)
time.sleep(10)
page_html2 = uReq(html).read()
page_soup = soup(page_html2, 'html.parser')
price_tags1 = page_soup.select('span.a-offscreen')
prices = [el.get_text() for el in price_tags1] # get text
# print(f"1 : {prices}")
prices = ["".join(re.findall("([\S]?)([0-9\.]+)", i)[0]) for i in prices]
# ^ remove spaces, and get the price range minimum, with the currency
rate = usd_to_ngn()
prices = [(float(i[1:]) * rate) for i in prices]
return prices
def konga(mystring):
#mystring = (input('enter your search term: '))
search_term = mystring.replace(" ", "+")
my_url = 'https://www.konga.com/search?search='
new = my_url+search_term
header = {'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'}
#print(new)
request = Request(new, headers=header)
time.sleep(10)
response = uReq(request).read()
page_soup = soup(response, 'html.parser')
#price_containers = page_soup.find_all('span', {'class':'d7c0f_sJAqi'})
#containers = page_soup.find_all('div', {'class':'af885_1iPzH'})
price_tags = page_soup.select("span.d7c0f_sJAqi")
prices = [float(str(el.contents[1]).replace(",", "")) for el in price_tags[:30]]
return prices
konga = konga(mystring)
# print(konga)
amazon = amazon(mystring)
# print(alibaba)
"""
if len(konga) > len(alibaba) > 0:
konga = konga[:len(alibaba)]
elif len(konga) > 0:
alibaba = alibaba[:len(konga)]
"""
def find_avg(lst):
if len(lst) < 1:
return None
avg = 0
for i in lst:
avg += i
return avg / len(lst)
obj = {"avg_konga_price": find_avg(konga), "avg_Amazon_price": find_avg(amazon),
"currency" : "NGN",
'konga' : ("Unable To Fetch Prices" if (len(konga) < 1) else konga),
'amazon' : ("Unable To Fetch Prices" if (len(amazon) < 1) else amazon)}
# print(f"k = {konga} : a = {alibaba}")
print(obj)
if len(sys.argv) > 1:
compare(" ".join(sys.argv[1:]))
# Uncomment the code below to run a test with query='diamond jewelry'
term = str(input('enter your search term: '))
compare(term)
|
normal
|
{
"blob_id": "d96038a715406388b4de4611391dee18fc559d5a",
"index": 2693,
"step-1": "<mask token>\n\n\ndef compare(mystring):\n\n def usd_to_ngn():\n print('Getting USD to NGN Rate')\n req = requests.get(\n 'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'\n )\n req.raise_for_status()\n res = str(req.content)[2:-1]\n res = json.loads(res)\n rate = float(res['results']['USD_NGN']['val'])\n return rate\n\n def amazon(mystring):\n search_term = mystring.replace(' ', '+')\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.\n format(search_term), headers=header)\n time.sleep(10)\n page_html2 = uReq(html).read()\n page_soup = soup(page_html2, 'html.parser')\n price_tags1 = page_soup.select('span.a-offscreen')\n prices = [el.get_text() for el in price_tags1]\n prices = [''.join(re.findall('([\\\\S]?)([0-9\\\\.]+)', i)[0]) for i in\n prices]\n rate = usd_to_ngn()\n prices = [(float(i[1:]) * rate) for i in prices]\n return prices\n\n def konga(mystring):\n search_term = mystring.replace(' ', '+')\n my_url = 'https://www.konga.com/search?search='\n new = my_url + search_term\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n request = Request(new, headers=header)\n time.sleep(10)\n response = uReq(request).read()\n page_soup = soup(response, 'html.parser')\n price_tags = page_soup.select('span.d7c0f_sJAqi')\n prices = [float(str(el.contents[1]).replace(',', '')) for el in\n price_tags[:30]]\n return prices\n konga = konga(mystring)\n amazon = amazon(mystring)\n \"\"\"\n if len(konga) > len(alibaba) > 0:\n konga = konga[:len(alibaba)]\n elif len(konga) > 0:\n alibaba = alibaba[:len(konga)]\n \"\"\"\n\n def find_avg(lst):\n if len(lst) < 1:\n return None\n avg = 0\n for i in lst:\n avg += i\n return avg / len(lst)\n obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg\n (amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if \n len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if \n len(amazon) < 1 else amazon}\n print(obj)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef compare(mystring):\n\n def usd_to_ngn():\n print('Getting USD to NGN Rate')\n req = requests.get(\n 'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'\n )\n req.raise_for_status()\n res = str(req.content)[2:-1]\n res = json.loads(res)\n rate = float(res['results']['USD_NGN']['val'])\n return rate\n\n def amazon(mystring):\n search_term = mystring.replace(' ', '+')\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.\n format(search_term), headers=header)\n time.sleep(10)\n page_html2 = uReq(html).read()\n page_soup = soup(page_html2, 'html.parser')\n price_tags1 = page_soup.select('span.a-offscreen')\n prices = [el.get_text() for el in price_tags1]\n prices = [''.join(re.findall('([\\\\S]?)([0-9\\\\.]+)', i)[0]) for i in\n prices]\n rate = usd_to_ngn()\n prices = [(float(i[1:]) * rate) for i in prices]\n return prices\n\n def konga(mystring):\n search_term = mystring.replace(' ', '+')\n my_url = 'https://www.konga.com/search?search='\n new = my_url + search_term\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n request = Request(new, headers=header)\n time.sleep(10)\n response = uReq(request).read()\n page_soup = soup(response, 'html.parser')\n price_tags = page_soup.select('span.d7c0f_sJAqi')\n prices = [float(str(el.contents[1]).replace(',', '')) for el in\n price_tags[:30]]\n return prices\n konga = konga(mystring)\n amazon = amazon(mystring)\n \"\"\"\n if len(konga) > len(alibaba) > 0:\n konga = konga[:len(alibaba)]\n elif len(konga) > 0:\n alibaba = alibaba[:len(konga)]\n \"\"\"\n\n def find_avg(lst):\n if len(lst) < 1:\n return None\n avg = 0\n for i in lst:\n avg += i\n return avg / len(lst)\n obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg\n (amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if \n len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if \n len(amazon) < 1 else amazon}\n print(obj)\n\n\nif len(sys.argv) > 1:\n compare(' '.join(sys.argv[1:]))\n<mask token>\ncompare(term)\n",
"step-3": "<mask token>\n\n\ndef compare(mystring):\n\n def usd_to_ngn():\n print('Getting USD to NGN Rate')\n req = requests.get(\n 'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'\n )\n req.raise_for_status()\n res = str(req.content)[2:-1]\n res = json.loads(res)\n rate = float(res['results']['USD_NGN']['val'])\n return rate\n\n def amazon(mystring):\n search_term = mystring.replace(' ', '+')\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.\n format(search_term), headers=header)\n time.sleep(10)\n page_html2 = uReq(html).read()\n page_soup = soup(page_html2, 'html.parser')\n price_tags1 = page_soup.select('span.a-offscreen')\n prices = [el.get_text() for el in price_tags1]\n prices = [''.join(re.findall('([\\\\S]?)([0-9\\\\.]+)', i)[0]) for i in\n prices]\n rate = usd_to_ngn()\n prices = [(float(i[1:]) * rate) for i in prices]\n return prices\n\n def konga(mystring):\n search_term = mystring.replace(' ', '+')\n my_url = 'https://www.konga.com/search?search='\n new = my_url + search_term\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n request = Request(new, headers=header)\n time.sleep(10)\n response = uReq(request).read()\n page_soup = soup(response, 'html.parser')\n price_tags = page_soup.select('span.d7c0f_sJAqi')\n prices = [float(str(el.contents[1]).replace(',', '')) for el in\n price_tags[:30]]\n return prices\n konga = konga(mystring)\n amazon = amazon(mystring)\n \"\"\"\n if len(konga) > len(alibaba) > 0:\n konga = konga[:len(alibaba)]\n elif len(konga) > 0:\n alibaba = alibaba[:len(konga)]\n \"\"\"\n\n def find_avg(lst):\n if len(lst) < 1:\n return None\n avg = 0\n for i in lst:\n avg += i\n return avg / len(lst)\n obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg\n (amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if \n len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if \n len(amazon) < 1 else amazon}\n print(obj)\n\n\nif len(sys.argv) > 1:\n compare(' '.join(sys.argv[1:]))\nterm = str(input('enter your search term: '))\ncompare(term)\n",
"step-4": "import bs4\nfrom urllib.request import urlopen as uReq\nfrom bs4 import BeautifulSoup as soup\nimport pandas as pd\nimport time\nfrom urllib.request import Request\nimport requests\nimport json\nimport re\nimport sys\n\n\ndef compare(mystring):\n\n def usd_to_ngn():\n print('Getting USD to NGN Rate')\n req = requests.get(\n 'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'\n )\n req.raise_for_status()\n res = str(req.content)[2:-1]\n res = json.loads(res)\n rate = float(res['results']['USD_NGN']['val'])\n return rate\n\n def amazon(mystring):\n search_term = mystring.replace(' ', '+')\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.\n format(search_term), headers=header)\n time.sleep(10)\n page_html2 = uReq(html).read()\n page_soup = soup(page_html2, 'html.parser')\n price_tags1 = page_soup.select('span.a-offscreen')\n prices = [el.get_text() for el in price_tags1]\n prices = [''.join(re.findall('([\\\\S]?)([0-9\\\\.]+)', i)[0]) for i in\n prices]\n rate = usd_to_ngn()\n prices = [(float(i[1:]) * rate) for i in prices]\n return prices\n\n def konga(mystring):\n search_term = mystring.replace(' ', '+')\n my_url = 'https://www.konga.com/search?search='\n new = my_url + search_term\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n request = Request(new, headers=header)\n time.sleep(10)\n response = uReq(request).read()\n page_soup = soup(response, 'html.parser')\n price_tags = page_soup.select('span.d7c0f_sJAqi')\n prices = [float(str(el.contents[1]).replace(',', '')) for el in\n price_tags[:30]]\n return prices\n konga = konga(mystring)\n amazon = amazon(mystring)\n \"\"\"\n if len(konga) > len(alibaba) > 0:\n konga = konga[:len(alibaba)]\n elif len(konga) > 0:\n alibaba = alibaba[:len(konga)]\n \"\"\"\n\n def find_avg(lst):\n if len(lst) < 1:\n return None\n avg = 0\n for i in lst:\n avg += i\n return avg / len(lst)\n obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg\n (amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if \n len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if \n len(amazon) < 1 else amazon}\n print(obj)\n\n\nif len(sys.argv) > 1:\n compare(' '.join(sys.argv[1:]))\nterm = str(input('enter your search term: '))\ncompare(term)\n",
"step-5": "import bs4\r\nfrom urllib.request import urlopen as uReq\r\nfrom bs4 import BeautifulSoup as soup\r\nimport pandas as pd\r\nimport time\r\nfrom urllib.request import Request\r\nimport requests\r\nimport json\r\nimport re\r\nimport sys\r\n\r\n\r\ndef compare(mystring):\r\n def usd_to_ngn():\r\n print(\"Getting USD to NGN Rate\")\r\n req = requests.get(\"http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63\")\r\n req.raise_for_status()\r\n\r\n res = str(req.content)[2:-1]\r\n res = json.loads(res)\r\n\r\n rate = float(res['results']['USD_NGN']['val'])\r\n return rate\r\n \r\n def amazon(mystring):\r\n search_term = mystring.replace(\" \", \"+\")\r\n header = {'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'}\r\n html = Request(\"https://www.amazon.com/s?k={}&ref=nb_sb_noss_1\".format(search_term), headers=header)\r\n time.sleep(10)\r\n page_html2 = uReq(html).read()\r\n page_soup = soup(page_html2, 'html.parser')\r\n price_tags1 = page_soup.select('span.a-offscreen')\r\n prices = [el.get_text() for el in price_tags1] # get text\r\n # print(f\"1 : {prices}\")\r\n prices = [\"\".join(re.findall(\"([\\S]?)([0-9\\.]+)\", i)[0]) for i in prices]\r\n # ^ remove spaces, and get the price range minimum, with the currency\r\n rate = usd_to_ngn()\r\n prices = [(float(i[1:]) * rate) for i in prices] \r\n return prices\r\n\r\n \r\n \r\n def konga(mystring):\r\n #mystring = (input('enter your search term: '))\r\n search_term = mystring.replace(\" \", \"+\")\r\n my_url = 'https://www.konga.com/search?search='\r\n new = my_url+search_term\r\n header = {'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'}\r\n #print(new)\r\n request = Request(new, headers=header)\r\n time.sleep(10)\r\n response = uReq(request).read()\r\n page_soup = soup(response, 'html.parser')\r\n #price_containers = page_soup.find_all('span', {'class':'d7c0f_sJAqi'})\r\n #containers = page_soup.find_all('div', {'class':'af885_1iPzH'})\r\n price_tags = page_soup.select(\"span.d7c0f_sJAqi\")\r\n prices = [float(str(el.contents[1]).replace(\",\", \"\")) for el in price_tags[:30]]\r\n return prices\r\n\r\n \r\n \r\n konga = konga(mystring)\r\n # print(konga)\r\n amazon = amazon(mystring)\r\n # print(alibaba)\r\n \"\"\"\r\n if len(konga) > len(alibaba) > 0:\r\n konga = konga[:len(alibaba)]\r\n elif len(konga) > 0:\r\n alibaba = alibaba[:len(konga)]\r\n \"\"\"\r\n def find_avg(lst):\r\n if len(lst) < 1:\r\n return None\r\n avg = 0\r\n for i in lst:\r\n avg += i\r\n return avg / len(lst)\r\n\r\n obj = {\"avg_konga_price\": find_avg(konga), \"avg_Amazon_price\": find_avg(amazon),\r\n \"currency\" : \"NGN\",\r\n 'konga' : (\"Unable To Fetch Prices\" if (len(konga) < 1) else konga),\r\n 'amazon' : (\"Unable To Fetch Prices\" if (len(amazon) < 1) else amazon)}\r\n # print(f\"k = {konga} : a = {alibaba}\")\r\n print(obj)\r\n\r\n\r\nif len(sys.argv) > 1:\r\n compare(\" \".join(sys.argv[1:]))\r\n\r\n# Uncomment the code below to run a test with query='diamond jewelry'\r\nterm = str(input('enter your search term: '))\r\ncompare(term)\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import configure
import connectify
import userlog
import dirlog
import time
def getUser(sock):
try:
userinfo = userlog.getInfo()
except:
userinfo = configure.init(sock)
userinfo = userinfo.split('^')[0]
# print userinfo
return userinfo
if __name__=="__main__":
sock = connectify.createCon()
userinfo = getUser(sock)
while 1:
dirs, flag = dirlog.getDirs()
if flag:
sock.send('2'+userinfo+'^'+dirs)
print sock.recv(1024)
sock.send('3'+userinfo)
update_count = sock.recv(1024)
update = []
for x in range(0,int(update_count)):
sock.send('4'+userinfo)
update.append(sock.recv(1024))
print update
time.sleep(2)
connectify.closeCon(sock)
|
normal
|
{
"blob_id": "2ca1b603b18316bc1d970b5e32389e10e4b532e2",
"index": 1071,
"step-1": "import configure\nimport connectify\nimport userlog\nimport dirlog\nimport time\n\n\ndef getUser(sock):\n\ttry:\n\t\tuserinfo = userlog.getInfo()\n\texcept:\t\n\t\tuserinfo = configure.init(sock)\n\tuserinfo = userinfo.split('^')[0]\n#\tprint userinfo\n\treturn userinfo\n\nif __name__==\"__main__\":\t\n\tsock = connectify.createCon()\n\tuserinfo = getUser(sock)\n\twhile 1:\n\t\tdirs, flag = dirlog.getDirs()\n\t\tif flag:\n\t\t\tsock.send('2'+userinfo+'^'+dirs)\n\t\t\tprint sock.recv(1024)\n\t\tsock.send('3'+userinfo)\n\t\tupdate_count = sock.recv(1024)\n\t\tupdate = []\n\t\tfor x in range(0,int(update_count)):\n\t\t\tsock.send('4'+userinfo)\n\t\t\tupdate.append(sock.recv(1024))\n\t\tprint update\n\t\ttime.sleep(2)\n\tconnectify.closeCon(sock)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding=utf-8
# Copyright 2019 SK T-Brain Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kobert import download
def get_onnx_kobert_model(cachedir=".cache"):
"""Get KoBERT ONNX file path after downloading"""
onnx_kobert = {
"url": "s3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx",
"chksum": "6f6610f2e3b61da6de8dbce",
}
model_info = onnx_kobert
model_path, is_cached = download(
model_info["url"], model_info["chksum"], cachedir=cachedir
)
return model_path
def make_dummy_input(max_seq_len):
def do_pad(x, max_seq_len, pad_id):
return [_x + [pad_id] * (max_seq_len - len(_x)) for _x in x]
input_ids = do_pad([[31, 51, 99], [15, 5]], max_seq_len, pad_id=1)
token_type_ids = do_pad([[0, 0, 0], [0, 0]], max_seq_len, pad_id=0)
input_mask = do_pad([[1, 1, 1], [1, 1]], max_seq_len, pad_id=0)
position_ids = list(range(max_seq_len))
return (input_ids, token_type_ids, input_mask, position_ids)
if __name__ == "__main__":
import onnxruntime
import numpy as np
from kobert import get_onnx_kobert_model
onnx_path = get_onnx_kobert_model()
dummy_input = make_dummy_input(max_seq_len=512)
so = onnxruntime.SessionOptions()
sess = onnxruntime.InferenceSession(onnx_path)
outputs = sess.run(
None,
{
"input_ids": np.array(dummy_input[0]),
"token_type_ids": np.array(dummy_input[1]),
"input_mask": np.array(dummy_input[2]),
"position_ids": np.array(dummy_input[3]),
},
)
print(outputs[-2][0])
|
normal
|
{
"blob_id": "b6e4214ace89165f6cfde9f2b97fcee8be81f2ed",
"index": 4301,
"step-1": "<mask token>\n\n\ndef get_onnx_kobert_model(cachedir='.cache'):\n \"\"\"Get KoBERT ONNX file path after downloading\"\"\"\n onnx_kobert = {'url':\n 's3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx',\n 'chksum': '6f6610f2e3b61da6de8dbce'}\n model_info = onnx_kobert\n model_path, is_cached = download(model_info['url'], model_info['chksum'\n ], cachedir=cachedir)\n return model_path\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_onnx_kobert_model(cachedir='.cache'):\n \"\"\"Get KoBERT ONNX file path after downloading\"\"\"\n onnx_kobert = {'url':\n 's3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx',\n 'chksum': '6f6610f2e3b61da6de8dbce'}\n model_info = onnx_kobert\n model_path, is_cached = download(model_info['url'], model_info['chksum'\n ], cachedir=cachedir)\n return model_path\n\n\ndef make_dummy_input(max_seq_len):\n\n def do_pad(x, max_seq_len, pad_id):\n return [(_x + [pad_id] * (max_seq_len - len(_x))) for _x in x]\n input_ids = do_pad([[31, 51, 99], [15, 5]], max_seq_len, pad_id=1)\n token_type_ids = do_pad([[0, 0, 0], [0, 0]], max_seq_len, pad_id=0)\n input_mask = do_pad([[1, 1, 1], [1, 1]], max_seq_len, pad_id=0)\n position_ids = list(range(max_seq_len))\n return input_ids, token_type_ids, input_mask, position_ids\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_onnx_kobert_model(cachedir='.cache'):\n \"\"\"Get KoBERT ONNX file path after downloading\"\"\"\n onnx_kobert = {'url':\n 's3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx',\n 'chksum': '6f6610f2e3b61da6de8dbce'}\n model_info = onnx_kobert\n model_path, is_cached = download(model_info['url'], model_info['chksum'\n ], cachedir=cachedir)\n return model_path\n\n\ndef make_dummy_input(max_seq_len):\n\n def do_pad(x, max_seq_len, pad_id):\n return [(_x + [pad_id] * (max_seq_len - len(_x))) for _x in x]\n input_ids = do_pad([[31, 51, 99], [15, 5]], max_seq_len, pad_id=1)\n token_type_ids = do_pad([[0, 0, 0], [0, 0]], max_seq_len, pad_id=0)\n input_mask = do_pad([[1, 1, 1], [1, 1]], max_seq_len, pad_id=0)\n position_ids = list(range(max_seq_len))\n return input_ids, token_type_ids, input_mask, position_ids\n\n\nif __name__ == '__main__':\n import onnxruntime\n import numpy as np\n from kobert import get_onnx_kobert_model\n onnx_path = get_onnx_kobert_model()\n dummy_input = make_dummy_input(max_seq_len=512)\n so = onnxruntime.SessionOptions()\n sess = onnxruntime.InferenceSession(onnx_path)\n outputs = sess.run(None, {'input_ids': np.array(dummy_input[0]),\n 'token_type_ids': np.array(dummy_input[1]), 'input_mask': np.array(\n dummy_input[2]), 'position_ids': np.array(dummy_input[3])})\n print(outputs[-2][0])\n",
"step-4": "from kobert import download\n\n\ndef get_onnx_kobert_model(cachedir='.cache'):\n \"\"\"Get KoBERT ONNX file path after downloading\"\"\"\n onnx_kobert = {'url':\n 's3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx',\n 'chksum': '6f6610f2e3b61da6de8dbce'}\n model_info = onnx_kobert\n model_path, is_cached = download(model_info['url'], model_info['chksum'\n ], cachedir=cachedir)\n return model_path\n\n\ndef make_dummy_input(max_seq_len):\n\n def do_pad(x, max_seq_len, pad_id):\n return [(_x + [pad_id] * (max_seq_len - len(_x))) for _x in x]\n input_ids = do_pad([[31, 51, 99], [15, 5]], max_seq_len, pad_id=1)\n token_type_ids = do_pad([[0, 0, 0], [0, 0]], max_seq_len, pad_id=0)\n input_mask = do_pad([[1, 1, 1], [1, 1]], max_seq_len, pad_id=0)\n position_ids = list(range(max_seq_len))\n return input_ids, token_type_ids, input_mask, position_ids\n\n\nif __name__ == '__main__':\n import onnxruntime\n import numpy as np\n from kobert import get_onnx_kobert_model\n onnx_path = get_onnx_kobert_model()\n dummy_input = make_dummy_input(max_seq_len=512)\n so = onnxruntime.SessionOptions()\n sess = onnxruntime.InferenceSession(onnx_path)\n outputs = sess.run(None, {'input_ids': np.array(dummy_input[0]),\n 'token_type_ids': np.array(dummy_input[1]), 'input_mask': np.array(\n dummy_input[2]), 'position_ids': np.array(dummy_input[3])})\n print(outputs[-2][0])\n",
"step-5": "# coding=utf-8\n# Copyright 2019 SK T-Brain Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom kobert import download\n\n\ndef get_onnx_kobert_model(cachedir=\".cache\"):\n \"\"\"Get KoBERT ONNX file path after downloading\"\"\"\n onnx_kobert = {\n \"url\": \"s3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx\",\n \"chksum\": \"6f6610f2e3b61da6de8dbce\",\n }\n\n model_info = onnx_kobert\n model_path, is_cached = download(\n model_info[\"url\"], model_info[\"chksum\"], cachedir=cachedir\n )\n return model_path\n\n\ndef make_dummy_input(max_seq_len):\n def do_pad(x, max_seq_len, pad_id):\n return [_x + [pad_id] * (max_seq_len - len(_x)) for _x in x]\n\n input_ids = do_pad([[31, 51, 99], [15, 5]], max_seq_len, pad_id=1)\n token_type_ids = do_pad([[0, 0, 0], [0, 0]], max_seq_len, pad_id=0)\n input_mask = do_pad([[1, 1, 1], [1, 1]], max_seq_len, pad_id=0)\n position_ids = list(range(max_seq_len))\n return (input_ids, token_type_ids, input_mask, position_ids)\n\n\nif __name__ == \"__main__\":\n import onnxruntime\n import numpy as np\n from kobert import get_onnx_kobert_model\n\n onnx_path = get_onnx_kobert_model()\n dummy_input = make_dummy_input(max_seq_len=512)\n so = onnxruntime.SessionOptions()\n sess = onnxruntime.InferenceSession(onnx_path)\n outputs = sess.run(\n None,\n {\n \"input_ids\": np.array(dummy_input[0]),\n \"token_type_ids\": np.array(dummy_input[1]),\n \"input_mask\": np.array(dummy_input[2]),\n \"position_ids\": np.array(dummy_input[3]),\n },\n )\n print(outputs[-2][0])\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# The error measures used in this project
#
# Rooth Mean Squared Error
# Mean Absolute Error
#
# ! Both calculated after descaling the output of the system first
import numpy as np
def RMSE(min_y, max_y, yhat, y):
# first scale output and target back to
# original scale, to prevent scale bias
yhat = descale(yhat, min_y, max_y)
y = descale(y, min_y, max_y)
return(np.mean(np.power(np.subtract(yhat,y),2)))
def MAE(min_y, max_y, yhat, y):
# first scale output and target back to
# original scale, to prevent scale bias
yhat = descale(yhat, min_y, max_y)
y = descale(y, min_y, max_y)
return(np.mean(np.absolute(np.subtract(yhat,y))))
def descale(scaled_y, min_y, max_y):
'''
Descaled data back to original scale
Inputs:
y = vector of values
min_y = minimum value of original data
max_y = minimum value of original data
Output: y in original scale
'''
diff = np.subtract(max_y ,min_y)
descaled_y = np.add(np.multiply(scaled_y, diff), min_y)
# descaled y = scaled_y *(ymax-ymin)+ymin
# descaled_y = [(y*(diff)+min_y) for y in scaled_y]
return(descaled_y)
|
normal
|
{
"blob_id": "4fd4c9cf3bdb73a003ce860bf2ee0ccab01f0009",
"index": 4646,
"step-1": "<mask token>\n\n\ndef RMSE(min_y, max_y, yhat, y):\n yhat = descale(yhat, min_y, max_y)\n y = descale(y, min_y, max_y)\n return np.mean(np.power(np.subtract(yhat, y), 2))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef RMSE(min_y, max_y, yhat, y):\n yhat = descale(yhat, min_y, max_y)\n y = descale(y, min_y, max_y)\n return np.mean(np.power(np.subtract(yhat, y), 2))\n\n\ndef MAE(min_y, max_y, yhat, y):\n yhat = descale(yhat, min_y, max_y)\n y = descale(y, min_y, max_y)\n return np.mean(np.absolute(np.subtract(yhat, y)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef RMSE(min_y, max_y, yhat, y):\n yhat = descale(yhat, min_y, max_y)\n y = descale(y, min_y, max_y)\n return np.mean(np.power(np.subtract(yhat, y), 2))\n\n\ndef MAE(min_y, max_y, yhat, y):\n yhat = descale(yhat, min_y, max_y)\n y = descale(y, min_y, max_y)\n return np.mean(np.absolute(np.subtract(yhat, y)))\n\n\ndef descale(scaled_y, min_y, max_y):\n \"\"\"\n\tDescaled data back to original scale\n\n\tInputs: \n\t\ty = vector of values\n\t\tmin_y = minimum value of original data\n\t\tmax_y = minimum value of original data\n\n\tOutput: y in original scale\n\t\"\"\"\n diff = np.subtract(max_y, min_y)\n descaled_y = np.add(np.multiply(scaled_y, diff), min_y)\n return descaled_y\n",
"step-4": "import numpy as np\n\n\ndef RMSE(min_y, max_y, yhat, y):\n yhat = descale(yhat, min_y, max_y)\n y = descale(y, min_y, max_y)\n return np.mean(np.power(np.subtract(yhat, y), 2))\n\n\ndef MAE(min_y, max_y, yhat, y):\n yhat = descale(yhat, min_y, max_y)\n y = descale(y, min_y, max_y)\n return np.mean(np.absolute(np.subtract(yhat, y)))\n\n\ndef descale(scaled_y, min_y, max_y):\n \"\"\"\n\tDescaled data back to original scale\n\n\tInputs: \n\t\ty = vector of values\n\t\tmin_y = minimum value of original data\n\t\tmax_y = minimum value of original data\n\n\tOutput: y in original scale\n\t\"\"\"\n diff = np.subtract(max_y, min_y)\n descaled_y = np.add(np.multiply(scaled_y, diff), min_y)\n return descaled_y\n",
"step-5": "# The error measures used in this project\n#\n# Rooth Mean Squared Error\n# Mean Absolute Error\n#\n# ! Both calculated after descaling the output of the system first\n\nimport numpy as np\n\ndef RMSE(min_y, max_y, yhat, y):\n\t# first scale output and target back to \n\t# original scale, to prevent scale bias\n\tyhat = descale(yhat, min_y, max_y)\n\ty = descale(y, min_y, max_y)\n\treturn(np.mean(np.power(np.subtract(yhat,y),2)))\n\ndef MAE(min_y, max_y, yhat, y):\n\t# first scale output and target back to \n\t# original scale, to prevent scale bias\n\tyhat = descale(yhat, min_y, max_y)\n\ty = descale(y, min_y, max_y)\n\treturn(np.mean(np.absolute(np.subtract(yhat,y))))\n\ndef descale(scaled_y, min_y, max_y):\n\t'''\n\tDescaled data back to original scale\n\n\tInputs: \n\t\ty = vector of values\n\t\tmin_y = minimum value of original data\n\t\tmax_y = minimum value of original data\n\n\tOutput: y in original scale\n\t'''\n\tdiff = np.subtract(max_y ,min_y)\n\tdescaled_y = np.add(np.multiply(scaled_y, diff), min_y)\n\t# descaled y = scaled_y *(ymax-ymin)+ymin\n\t# descaled_y = [(y*(diff)+min_y) for y in scaled_y]\n\treturn(descaled_y)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
elements = str(input("Type the elements of list: ")).split()
elements = list(map(float,elements))
times = int(input("How many times you wish shift to right: "))
for _ in range(times):
removed = elements.pop()
elements.insert(0,removed)
print(elements)
|
normal
|
{
"blob_id": "307bb7461a729ba979f6a862fe7c292c42f96ce6",
"index": 1164,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(times):\n removed = elements.pop()\n elements.insert(0, removed)\nprint(elements)\n",
"step-3": "elements = str(input('Type the elements of list: ')).split()\nelements = list(map(float, elements))\ntimes = int(input('How many times you wish shift to right: '))\nfor _ in range(times):\n removed = elements.pop()\n elements.insert(0, removed)\nprint(elements)\n",
"step-4": "# -*- coding: utf-8 -*-\n\nelements = str(input(\"Type the elements of list: \")).split()\nelements = list(map(float,elements))\n\ntimes = int(input(\"How many times you wish shift to right: \"))\n\nfor _ in range(times):\n\tremoved = elements.pop()\n\telements.insert(0,removed)\n\nprint(elements)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# encoding:utf-8
import tensorflow as tf
import p182.py as p182
# 创建文件列表,并通过文件列表创建输入文件队列。在调用输入数据处理流程前,需要
# 统一所有原始数据的格式并将它们存储到TFRcord文件中。下面给出的文件列表应该包含所
# 有提供训练数据的TFRcord文件
files = tf.train.match_filenames_once("/home/shenxj/tf-work/datasets/file_pattern-*")
filename_queue = tf.train.string_input_producer(files, shuffle=False)
# 使用类似7.1节中结婚嫂的方法解析TFRecord文件里的数据。这里假设image中存储的是图像
# 的原始数据,label为该样例所对应的标签。height,width和channels给出了图像的维度。
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'weigth': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
}
)
image, label = features['image'], features['label']
height, width = features['height'], features['wigth']
channels = features['channels']
# 从原始图像数据解析出像素矩阵,并根据图像尺寸还原图像
decoded_image = tf.decode_raw(image, tf.uint8)
decoded_image.set_shape([height, width, channels])
# 定义神经网络输入层图片的大小。
image_size = 299
# preprocess_for_train为7.2.2小节中介绍的图像预处理程序
distorted_image = p182.preprocess_for_train(
decoded_image, image_size, image_size, None
)
# 将处理后的图像和标签数据通过tf.train.shuffle_batch整理成神经网络训练时
# 需要的batch
min_after_dequeque = 10000
batch_size = 100
capacity = min_after_dequeque + 3 * batch_size
image_batch, label_batch = tf.train.shuffle_batch(
[distorted_image, label], batch_size=batch_size,
capacity=capacity, min_after_dequeue=min_after_dequeque
)
# 定义神经网络的结构以及优化过程。image_batch可以作为输入提供给神经网络的输入层。
# label_batch则提供了输入batch中样例的正确答案
logit = inference(image_batch)
loss = calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
# 声明会话并运行神经网络的优化过程
with tf.Session() as sess:
# 神经网络训练准备工作。这些工作包括变量初始化、线程启动
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# 神经网络训练过程
for i in range(TRAINING_ROUNDS):
sess.run(train_step)
# 停止所有线程
coord.request_stop()
coord.join(threads)
|
normal
|
{
"blob_id": "1685a2c49bea14e6fcaffb03634f6875f8fa1049",
"index": 3726,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndecoded_image.set_shape([height, width, channels])\n<mask token>\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n for i in range(TRAINING_ROUNDS):\n sess.run(train_step)\n coord.request_stop()\n coord.join(threads)\n",
"step-3": "<mask token>\nfiles = tf.train.match_filenames_once(\n '/home/shenxj/tf-work/datasets/file_pattern-*')\nfilename_queue = tf.train.string_input_producer(files, shuffle=False)\nreader = tf.TFRecordReader()\n_, serialized_example = reader.read(filename_queue)\nfeatures = tf.parse_single_example(serialized_example, features={'image':\n tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.\n int64), 'height': tf.FixedLenFeature([], tf.int64), 'weigth': tf.\n FixedLenFeature([], tf.int64), 'channels': tf.FixedLenFeature([], tf.\n int64)})\nimage, label = features['image'], features['label']\nheight, width = features['height'], features['wigth']\nchannels = features['channels']\ndecoded_image = tf.decode_raw(image, tf.uint8)\ndecoded_image.set_shape([height, width, channels])\nimage_size = 299\ndistorted_image = p182.preprocess_for_train(decoded_image, image_size,\n image_size, None)\nmin_after_dequeque = 10000\nbatch_size = 100\ncapacity = min_after_dequeque + 3 * batch_size\nimage_batch, label_batch = tf.train.shuffle_batch([distorted_image, label],\n batch_size=batch_size, capacity=capacity, min_after_dequeue=\n min_after_dequeque)\nlogit = inference(image_batch)\nloss = calc_loss(logit, label_batch)\ntrain_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate\n ).minimize(loss)\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n for i in range(TRAINING_ROUNDS):\n sess.run(train_step)\n coord.request_stop()\n coord.join(threads)\n",
"step-4": "import tensorflow as tf\nimport p182.py as p182\nfiles = tf.train.match_filenames_once(\n '/home/shenxj/tf-work/datasets/file_pattern-*')\nfilename_queue = tf.train.string_input_producer(files, shuffle=False)\nreader = tf.TFRecordReader()\n_, serialized_example = reader.read(filename_queue)\nfeatures = tf.parse_single_example(serialized_example, features={'image':\n tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.\n int64), 'height': tf.FixedLenFeature([], tf.int64), 'weigth': tf.\n FixedLenFeature([], tf.int64), 'channels': tf.FixedLenFeature([], tf.\n int64)})\nimage, label = features['image'], features['label']\nheight, width = features['height'], features['wigth']\nchannels = features['channels']\ndecoded_image = tf.decode_raw(image, tf.uint8)\ndecoded_image.set_shape([height, width, channels])\nimage_size = 299\ndistorted_image = p182.preprocess_for_train(decoded_image, image_size,\n image_size, None)\nmin_after_dequeque = 10000\nbatch_size = 100\ncapacity = min_after_dequeque + 3 * batch_size\nimage_batch, label_batch = tf.train.shuffle_batch([distorted_image, label],\n batch_size=batch_size, capacity=capacity, min_after_dequeue=\n min_after_dequeque)\nlogit = inference(image_batch)\nloss = calc_loss(logit, label_batch)\ntrain_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate\n ).minimize(loss)\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n for i in range(TRAINING_ROUNDS):\n sess.run(train_step)\n coord.request_stop()\n coord.join(threads)\n",
"step-5": "# encoding:utf-8\nimport tensorflow as tf\nimport p182.py as p182\n# 创建文件列表,并通过文件列表创建输入文件队列。在调用输入数据处理流程前,需要\n# 统一所有原始数据的格式并将它们存储到TFRcord文件中。下面给出的文件列表应该包含所\n# 有提供训练数据的TFRcord文件\nfiles = tf.train.match_filenames_once(\"/home/shenxj/tf-work/datasets/file_pattern-*\")\nfilename_queue = tf.train.string_input_producer(files, shuffle=False)\n\n# 使用类似7.1节中结婚嫂的方法解析TFRecord文件里的数据。这里假设image中存储的是图像\n# 的原始数据,label为该样例所对应的标签。height,width和channels给出了图像的维度。\nreader = tf.TFRecordReader()\n_, serialized_example = reader.read(filename_queue)\nfeatures = tf.parse_single_example(\n serialized_example,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'weigth': tf.FixedLenFeature([], tf.int64),\n 'channels': tf.FixedLenFeature([], tf.int64),\n }\n)\nimage, label = features['image'], features['label']\nheight, width = features['height'], features['wigth']\nchannels = features['channels']\n\n# 从原始图像数据解析出像素矩阵,并根据图像尺寸还原图像\ndecoded_image = tf.decode_raw(image, tf.uint8)\ndecoded_image.set_shape([height, width, channels])\n# 定义神经网络输入层图片的大小。\nimage_size = 299\n# preprocess_for_train为7.2.2小节中介绍的图像预处理程序\ndistorted_image = p182.preprocess_for_train(\n decoded_image, image_size, image_size, None\n)\n\n# 将处理后的图像和标签数据通过tf.train.shuffle_batch整理成神经网络训练时\n# 需要的batch\nmin_after_dequeque = 10000\nbatch_size = 100\ncapacity = min_after_dequeque + 3 * batch_size\nimage_batch, label_batch = tf.train.shuffle_batch(\n [distorted_image, label], batch_size=batch_size,\n capacity=capacity, min_after_dequeue=min_after_dequeque\n)\n\n# 定义神经网络的结构以及优化过程。image_batch可以作为输入提供给神经网络的输入层。\n# label_batch则提供了输入batch中样例的正确答案\nlogit = inference(image_batch)\nloss = calc_loss(logit, label_batch)\ntrain_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)\n\n# 声明会话并运行神经网络的优化过程\nwith tf.Session() as sess:\n # 神经网络训练准备工作。这些工作包括变量初始化、线程启动\n tf.initialize_all_variables().run()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # 神经网络训练过程\n for i in range(TRAINING_ROUNDS):\n sess.run(train_step)\n\n # 停止所有线程\n coord.request_stop()\n coord.join(threads)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding:utf-8 -*-
#随机森林调参
#RandomizedSearchCV 随机最佳
#GridSearchCV 地毯式最佳
import pandas as pd
features = pd.read_csv('data/temps_extended.csv')
features = pd.get_dummies(features)
labels = features['actual']
features = features.drop('actual', axis = 1)
feature_list = list(features.columns)
import numpy as np
features = np.array(features)
labels = np.array(labels)
from sklearn.model_selection import train_test_split
train_features, test_features, train_labels, test_labels = train_test_split(features, labels,
test_size = 0.25, random_state = 42)
print('Training Features Shape:', train_features.shape)
print('Training Labels Shape:', train_labels.shape)
print('Testing Features Shape:', test_features.shape)
print('Testing Labels Shape:', test_labels.shape)
#################选择6个比较重要的参数当做训练集,重新创建训练集##############################
important_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend', 'year']
important_indices = [feature_list.index(feature) for feature in important_feature_names]
important_train_features = train_features[:, important_indices]
important_test_features = test_features[:, important_indices]
print('Important train features shape:', important_train_features.shape)
print('Important test features shape:', important_test_features.shape)
train_features = important_train_features[:]
test_features = important_test_features[:]
feature_list = important_feature_names[:]
#################选择6个比较重要的参数当做训练集,重新创建训练集##############################
########创建随机森林模型###################
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(random_state = 42)
from pprint import pprint
# 打印所有参数
pprint(rf.get_params())
# {'bootstrap': True,#是否随机采样
# 'criterion': 'mse',#指定目标方程 损失的计算方法 熵值 回归 mse计算误差
# 'max_depth': None,# 树的最大深度 重要
# 'max_features': 'auto',
# 'max_leaf_nodes': None, 最大叶子节点 重要
# 'min_impurity_decrease': 0.0,
# 'min_impurity_split': None,
# 'min_samples_leaf': 1, 信息增益 重要
# 'min_samples_split': 2, 最小分裂次数 重要
# 'min_weight_fraction_leaf': 0.0,
# 'n_estimators': 'warn',
# 'n_jobs': None, #多少核CPU 去跑
# 'oob_score': False,
# 'random_state': 42,
# 'verbose': 0,
# 'warm_start': False}
from sklearn.model_selection import RandomizedSearchCV# 随机最好
# 建立树的个数
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# 最大特征的选择方式
max_features = ['auto', 'sqrt']
# 树的最大深度 10 20 none
max_depth = [int(x) for x in np.linspace(10, 20, num = 2)]
max_depth.append(None)
# 节点最小分裂所需样本个数
min_samples_split = [2, 5, 10]
# 叶子节点最小样本数,任何分裂不能让其子节点样本数少于此值
min_samples_leaf = [1, 2, 4]
# 样本采样方法
bootstrap = [True, False]
# Random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
rf = RandomForestRegressor()# 创建模型
#随机寻找参数 cv:交叉验证 , n_iter 随机100次,scoring:评估方法,verbose:打印信息,n_jobs:所以cpu去跑
rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid,
n_iter = 100, scoring='neg_mean_absolute_error',
cv = 3, verbose=2, random_state=42, n_jobs=-1)
# 执行寻找操作
# rf_random.fit(train_features, train_labels)
# print(rf_random.best_params_)
best_params = {'n_estimators': 1800, 'min_samples_split': 10, 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None, 'bootstrap': True}
def evaluate(model, test_features, test_labels): #评估
predictions = model.predict(test_features)
errors = abs(predictions - test_labels)
mape = 100 * np.mean(errors / test_labels)
accuracy = 100 - mape
print('平均气温误差.',np.mean(errors))
print('Accuracy = {:0.2f}%.'.format(accuracy))
#################使用默认参数##########################
# 平均气温误差. 3.91697080292
# Accuracy = 93.36%.
base_model = RandomForestRegressor( random_state = 42) #使用默认的参数
base_model.fit(train_features, train_labels)
print('默认参数')
evaluate(base_model, test_features, test_labels)
#################使用默认参数##########################
#################使用最好参数##########################
# 平均气温误差. 3.7141472957
# Accuracy = 93.73%.
best_random = RandomForestRegressor(n_estimators=1800,min_samples_split=10,random_state = 42,min_samples_leaf=4,max_features='auto',max_depth=None,bootstrap=True)
best_random.fit(train_features, train_labels)
print('局部最好')
evaluate(best_random, test_features, test_labels)
#################使用最好参数##########################
################在随机最好的参数进行微调######################
# 平均气温误差. 3.69222090145
# Accuracy = 93.77%.
from sklearn.model_selection import GridSearchCV# 地毯式搜索
param_grid = {'n_estimators': [1000, 1200, 1400, 1600],
'min_samples_split': [3, 5, 7],
'min_samples_leaf': [2,3, 4, 5,6],
'max_features': ['auto'],
'max_depth': [None],
'bootstrap': [True]}
rf = RandomForestRegressor()
# 网络搜索
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
scoring = 'neg_mean_absolute_error', cv = 3,
n_jobs = -1, verbose = 2)
grid_search.fit(train_features, train_labels)
best_grid = grid_search.best_estimator_
evaluate(best_grid, test_features, test_labels)
################在随机最好的参数进行微调######################
########创建随机森林模型###################
|
normal
|
{
"blob_id": "de4e14a4fa8520c1aae60805084224337dd9620c",
"index": 9009,
"step-1": "<mask token>\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\n<mask token>\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\n<mask token>\npprint(rf.get_params())\n<mask token>\nmax_depth.append(None)\n<mask token>\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\n<mask token>\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\n<mask token>\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\n<mask token>\ngrid_search.fit(train_features, train_labels)\n<mask token>\nevaluate(best_grid, test_features, test_labels)\n",
"step-3": "<mask token>\nfeatures = pd.read_csv('data/temps_extended.csv')\nfeatures = pd.get_dummies(features)\nlabels = features['actual']\nfeatures = features.drop('actual', axis=1)\nfeature_list = list(features.columns)\n<mask token>\nfeatures = np.array(features)\nlabels = np.array(labels)\n<mask token>\ntrain_features, test_features, train_labels, test_labels = train_test_split(\n features, labels, test_size=0.25, random_state=42)\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\nimportant_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend',\n 'year']\nimportant_indices = [feature_list.index(feature) for feature in\n important_feature_names]\nimportant_train_features = train_features[:, important_indices]\nimportant_test_features = test_features[:, important_indices]\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\ntrain_features = important_train_features[:]\ntest_features = important_test_features[:]\nfeature_list = important_feature_names[:]\n<mask token>\nrf = RandomForestRegressor(random_state=42)\n<mask token>\npprint(rf.get_params())\n<mask token>\nn_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\nmax_features = ['auto', 'sqrt']\nmax_depth = [int(x) for x in np.linspace(10, 20, num=2)]\nmax_depth.append(None)\nmin_samples_split = [2, 5, 10]\nmin_samples_leaf = [1, 2, 4]\nbootstrap = [True, False]\nrandom_grid = {'n_estimators': n_estimators, 'max_features': max_features,\n 'max_depth': max_depth, 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}\nrf = RandomForestRegressor()\nrf_random = RandomizedSearchCV(estimator=rf, param_distributions=\n random_grid, n_iter=100, scoring='neg_mean_absolute_error', cv=3,\n verbose=2, random_state=42, n_jobs=-1)\nbest_params = {'n_estimators': 1800, 'min_samples_split': 10,\n 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None,\n 'bootstrap': True}\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\nbase_model = RandomForestRegressor(random_state=42)\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\nbest_random = RandomForestRegressor(n_estimators=1800, min_samples_split=10,\n random_state=42, min_samples_leaf=4, max_features='auto', max_depth=\n None, bootstrap=True)\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\n<mask token>\nparam_grid = {'n_estimators': [1000, 1200, 1400, 1600], 'min_samples_split':\n [3, 5, 7], 'min_samples_leaf': [2, 3, 4, 5, 6], 'max_features': ['auto'\n ], 'max_depth': [None], 'bootstrap': [True]}\nrf = RandomForestRegressor()\ngrid_search = GridSearchCV(estimator=rf, param_grid=param_grid, scoring=\n 'neg_mean_absolute_error', cv=3, n_jobs=-1, verbose=2)\ngrid_search.fit(train_features, train_labels)\nbest_grid = grid_search.best_estimator_\nevaluate(best_grid, test_features, test_labels)\n",
"step-4": "import pandas as pd\nfeatures = pd.read_csv('data/temps_extended.csv')\nfeatures = pd.get_dummies(features)\nlabels = features['actual']\nfeatures = features.drop('actual', axis=1)\nfeature_list = list(features.columns)\nimport numpy as np\nfeatures = np.array(features)\nlabels = np.array(labels)\nfrom sklearn.model_selection import train_test_split\ntrain_features, test_features, train_labels, test_labels = train_test_split(\n features, labels, test_size=0.25, random_state=42)\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\nimportant_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend',\n 'year']\nimportant_indices = [feature_list.index(feature) for feature in\n important_feature_names]\nimportant_train_features = train_features[:, important_indices]\nimportant_test_features = test_features[:, important_indices]\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\ntrain_features = important_train_features[:]\ntest_features = important_test_features[:]\nfeature_list = important_feature_names[:]\nfrom sklearn.ensemble import RandomForestRegressor\nrf = RandomForestRegressor(random_state=42)\nfrom pprint import pprint\npprint(rf.get_params())\nfrom sklearn.model_selection import RandomizedSearchCV\nn_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\nmax_features = ['auto', 'sqrt']\nmax_depth = [int(x) for x in np.linspace(10, 20, num=2)]\nmax_depth.append(None)\nmin_samples_split = [2, 5, 10]\nmin_samples_leaf = [1, 2, 4]\nbootstrap = [True, False]\nrandom_grid = {'n_estimators': n_estimators, 'max_features': max_features,\n 'max_depth': max_depth, 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}\nrf = RandomForestRegressor()\nrf_random = RandomizedSearchCV(estimator=rf, param_distributions=\n random_grid, n_iter=100, scoring='neg_mean_absolute_error', cv=3,\n verbose=2, random_state=42, n_jobs=-1)\nbest_params = {'n_estimators': 1800, 'min_samples_split': 10,\n 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None,\n 'bootstrap': True}\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\nbase_model = RandomForestRegressor(random_state=42)\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\nbest_random = RandomForestRegressor(n_estimators=1800, min_samples_split=10,\n random_state=42, min_samples_leaf=4, max_features='auto', max_depth=\n None, bootstrap=True)\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\nfrom sklearn.model_selection import GridSearchCV\nparam_grid = {'n_estimators': [1000, 1200, 1400, 1600], 'min_samples_split':\n [3, 5, 7], 'min_samples_leaf': [2, 3, 4, 5, 6], 'max_features': ['auto'\n ], 'max_depth': [None], 'bootstrap': [True]}\nrf = RandomForestRegressor()\ngrid_search = GridSearchCV(estimator=rf, param_grid=param_grid, scoring=\n 'neg_mean_absolute_error', cv=3, n_jobs=-1, verbose=2)\ngrid_search.fit(train_features, train_labels)\nbest_grid = grid_search.best_estimator_\nevaluate(best_grid, test_features, test_labels)\n",
"step-5": "# -*- coding:utf-8 -*-\n\n#随机森林调参\n#RandomizedSearchCV 随机最佳\n#GridSearchCV 地毯式最佳\n\n\nimport pandas as pd\nfeatures = pd.read_csv('data/temps_extended.csv')\n\n\nfeatures = pd.get_dummies(features)\n\nlabels = features['actual']\nfeatures = features.drop('actual', axis = 1)\n\nfeature_list = list(features.columns)\n\nimport numpy as np\n\nfeatures = np.array(features)\nlabels = np.array(labels)\n\nfrom sklearn.model_selection import train_test_split\n\ntrain_features, test_features, train_labels, test_labels = train_test_split(features, labels,\n test_size = 0.25, random_state = 42)\n\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\n\n#################选择6个比较重要的参数当做训练集,重新创建训练集##############################\nimportant_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend', 'year']\n\nimportant_indices = [feature_list.index(feature) for feature in important_feature_names]\n\nimportant_train_features = train_features[:, important_indices]\nimportant_test_features = test_features[:, important_indices]\n\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\n\ntrain_features = important_train_features[:]\ntest_features = important_test_features[:]\n\nfeature_list = important_feature_names[:]\n\n#################选择6个比较重要的参数当做训练集,重新创建训练集##############################\n\n########创建随机森林模型###################\nfrom sklearn.ensemble import RandomForestRegressor\n\nrf = RandomForestRegressor(random_state = 42)\n\nfrom pprint import pprint\n\n# 打印所有参数\npprint(rf.get_params())\n\n# {'bootstrap': True,#是否随机采样\n# 'criterion': 'mse',#指定目标方程 损失的计算方法 熵值 回归 mse计算误差\n# 'max_depth': None,# 树的最大深度 重要\n# 'max_features': 'auto',\n# 'max_leaf_nodes': None, 最大叶子节点 重要\n# 'min_impurity_decrease': 0.0,\n# 'min_impurity_split': None,\n# 'min_samples_leaf': 1, 信息增益 重要\n# 'min_samples_split': 2, 最小分裂次数 重要\n# 'min_weight_fraction_leaf': 0.0,\n# 'n_estimators': 'warn',\n# 'n_jobs': None, #多少核CPU 去跑\n# 'oob_score': False,\n# 'random_state': 42,\n# 'verbose': 0,\n# 'warm_start': False}\n\nfrom sklearn.model_selection import RandomizedSearchCV# 随机最好\n# 建立树的个数\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n# 最大特征的选择方式\nmax_features = ['auto', 'sqrt']\n# 树的最大深度 10 20 none\nmax_depth = [int(x) for x in np.linspace(10, 20, num = 2)]\nmax_depth.append(None)\n# 节点最小分裂所需样本个数\nmin_samples_split = [2, 5, 10]\n# 叶子节点最小样本数,任何分裂不能让其子节点样本数少于此值\nmin_samples_leaf = [1, 2, 4]\n# 样本采样方法\nbootstrap = [True, False]\n\n# Random grid\nrandom_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n\nrf = RandomForestRegressor()# 创建模型\n#随机寻找参数 cv:交叉验证 , n_iter 随机100次,scoring:评估方法,verbose:打印信息,n_jobs:所以cpu去跑\nrf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid,\n n_iter = 100, scoring='neg_mean_absolute_error',\n cv = 3, verbose=2, random_state=42, n_jobs=-1)\n\n\n\n\n# 执行寻找操作\n# rf_random.fit(train_features, train_labels)\n# print(rf_random.best_params_)\nbest_params = {'n_estimators': 1800, 'min_samples_split': 10, 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None, 'bootstrap': True}\n\n\ndef evaluate(model, test_features, test_labels): #评估\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n\n print('平均气温误差.',np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\n\n#################使用默认参数##########################\n# 平均气温误差. 3.91697080292\n# Accuracy = 93.36%.\nbase_model = RandomForestRegressor( random_state = 42) #使用默认的参数\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\n#################使用默认参数##########################\n\n\n#################使用最好参数##########################\n# 平均气温误差. 3.7141472957\n# Accuracy = 93.73%.\nbest_random = RandomForestRegressor(n_estimators=1800,min_samples_split=10,random_state = 42,min_samples_leaf=4,max_features='auto',max_depth=None,bootstrap=True)\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\n#################使用最好参数##########################\n\n################在随机最好的参数进行微调######################\n# 平均气温误差. 3.69222090145\n# Accuracy = 93.77%.\nfrom sklearn.model_selection import GridSearchCV# 地毯式搜索\n\nparam_grid = {'n_estimators': [1000, 1200, 1400, 1600],\n 'min_samples_split': [3, 5, 7],\n 'min_samples_leaf': [2,3, 4, 5,6],\n 'max_features': ['auto'],\n 'max_depth': [None],\n 'bootstrap': [True]}\n\n\n\nrf = RandomForestRegressor()\n\n# 网络搜索\ngrid_search = GridSearchCV(estimator = rf, param_grid = param_grid,\n scoring = 'neg_mean_absolute_error', cv = 3,\n n_jobs = -1, verbose = 2)\ngrid_search.fit(train_features, train_labels)\nbest_grid = grid_search.best_estimator_\nevaluate(best_grid, test_features, test_labels)\n################在随机最好的参数进行微调######################\n\n\n########创建随机森林模型###################",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from celery.task.schedules import crontab
from celery.decorators import periodic_task
from celery.utils.log import get_task_logger
from bbapp.scripts.getScores import doScoresScrape, fixScores
logger = get_task_logger(__name__)
@periodic_task(
run_every=(crontab(minute='*/10')),
name="scrape_espn_feed",
ignore_result=True
)
def scrape_espn_feed():
"""
Saves latest image from Flickr
"""
thescores = doScoresScrape()
fixScores(thescores, 'MLB')
logger.info("Scores scraped")
|
normal
|
{
"blob_id": "a9a067ee3b176d2f2ca558b69ce2bc598bb31d22",
"index": 4501,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@periodic_task(run_every=crontab(minute='*/10'), name='scrape_espn_feed',\n ignore_result=True)\ndef scrape_espn_feed():\n \"\"\"\n Saves latest image from Flickr\n \"\"\"\n thescores = doScoresScrape()\n fixScores(thescores, 'MLB')\n logger.info('Scores scraped')\n",
"step-3": "<mask token>\nlogger = get_task_logger(__name__)\n\n\n@periodic_task(run_every=crontab(minute='*/10'), name='scrape_espn_feed',\n ignore_result=True)\ndef scrape_espn_feed():\n \"\"\"\n Saves latest image from Flickr\n \"\"\"\n thescores = doScoresScrape()\n fixScores(thescores, 'MLB')\n logger.info('Scores scraped')\n",
"step-4": "from celery.task.schedules import crontab\nfrom celery.decorators import periodic_task\nfrom celery.utils.log import get_task_logger\nfrom bbapp.scripts.getScores import doScoresScrape, fixScores\nlogger = get_task_logger(__name__)\n\n\n@periodic_task(run_every=crontab(minute='*/10'), name='scrape_espn_feed',\n ignore_result=True)\ndef scrape_espn_feed():\n \"\"\"\n Saves latest image from Flickr\n \"\"\"\n thescores = doScoresScrape()\n fixScores(thescores, 'MLB')\n logger.info('Scores scraped')\n",
"step-5": "from celery.task.schedules import crontab\nfrom celery.decorators import periodic_task\nfrom celery.utils.log import get_task_logger\n\n\nfrom bbapp.scripts.getScores import doScoresScrape, fixScores\n\nlogger = get_task_logger(__name__)\n\n\n@periodic_task(\n run_every=(crontab(minute='*/10')),\n name=\"scrape_espn_feed\",\n ignore_result=True\n)\ndef scrape_espn_feed():\n \"\"\"\n Saves latest image from Flickr\n \"\"\"\n thescores = doScoresScrape()\n fixScores(thescores, 'MLB')\n logger.info(\"Scores scraped\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
# library to create window in the terminal
import curses
# initialized curses by returning a window object
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(True)
curses.curs_set(0)
height, width = stdscr.getmaxyx()
# create a new window of a given size
window = curses.newwin(height, width, 0, 0)
window.keypad(1)
window.timeout(100)
# snake's form
snk_x = width/4
snk_y = height/2
# initialize snake's size to 3
snake = [
[snk_y, snk_x],
[snk_y, snk_x-1],
[snk_y, snk_x-2]
]
# food's size
food = [height/2, width/2]
# add first food in the window
window.addch(int(food[0]), int(food[1]), curses.ACS_PI)
# snake initializes direction to right
key = curses.KEY_RIGHT
# main of snake game
while True:
next_key = window.getch()
key = key if next_key == -1 else next_key
if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0] in snake[1:]:
curses.endwin()
quit()
new_head = [snake[0][0], snake[0][1]]
if key == curses.KEY_DOWN:
new_head[0] += 1
if key == curses.KEY_UP:
new_head[0] -= 1
if key == curses.KEY_LEFT:
new_head[1] -= 1
if key == curses.KEY_RIGHT:
new_head[1] += 1
snake.insert(0, new_head)
if snake[0] == food:
food = None
while food is None:
nf = [ random.randint(1, height-1), random.randint(1, width-1)]
food = nf if nf not in snake else None
window.addch(food[0], food[1], curses.ACS_PI)
else:
tail = snake.pop()
window.addch(int(tail[0]), int(tail[1]), ' ')
window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)
|
normal
|
{
"blob_id": "153d37b58a10847aae1fa7dbec4c7576c3d97fb2",
"index": 3407,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncurses.noecho()\ncurses.cbreak()\nstdscr.keypad(True)\ncurses.curs_set(0)\n<mask token>\nwindow.keypad(1)\nwindow.timeout(100)\n<mask token>\nwindow.addch(int(food[0]), int(food[1]), curses.ACS_PI)\n<mask token>\nwhile True:\n next_key = window.getch()\n key = key if next_key == -1 else next_key\n if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0\n ] in snake[1:]:\n curses.endwin()\n quit()\n new_head = [snake[0][0], snake[0][1]]\n if key == curses.KEY_DOWN:\n new_head[0] += 1\n if key == curses.KEY_UP:\n new_head[0] -= 1\n if key == curses.KEY_LEFT:\n new_head[1] -= 1\n if key == curses.KEY_RIGHT:\n new_head[1] += 1\n snake.insert(0, new_head)\n if snake[0] == food:\n food = None\n while food is None:\n nf = [random.randint(1, height - 1), random.randint(1, width - 1)]\n food = nf if nf not in snake else None\n window.addch(food[0], food[1], curses.ACS_PI)\n else:\n tail = snake.pop()\n window.addch(int(tail[0]), int(tail[1]), ' ')\n window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)\n",
"step-3": "<mask token>\nstdscr = curses.initscr()\ncurses.noecho()\ncurses.cbreak()\nstdscr.keypad(True)\ncurses.curs_set(0)\nheight, width = stdscr.getmaxyx()\nwindow = curses.newwin(height, width, 0, 0)\nwindow.keypad(1)\nwindow.timeout(100)\nsnk_x = width / 4\nsnk_y = height / 2\nsnake = [[snk_y, snk_x], [snk_y, snk_x - 1], [snk_y, snk_x - 2]]\nfood = [height / 2, width / 2]\nwindow.addch(int(food[0]), int(food[1]), curses.ACS_PI)\nkey = curses.KEY_RIGHT\nwhile True:\n next_key = window.getch()\n key = key if next_key == -1 else next_key\n if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0\n ] in snake[1:]:\n curses.endwin()\n quit()\n new_head = [snake[0][0], snake[0][1]]\n if key == curses.KEY_DOWN:\n new_head[0] += 1\n if key == curses.KEY_UP:\n new_head[0] -= 1\n if key == curses.KEY_LEFT:\n new_head[1] -= 1\n if key == curses.KEY_RIGHT:\n new_head[1] += 1\n snake.insert(0, new_head)\n if snake[0] == food:\n food = None\n while food is None:\n nf = [random.randint(1, height - 1), random.randint(1, width - 1)]\n food = nf if nf not in snake else None\n window.addch(food[0], food[1], curses.ACS_PI)\n else:\n tail = snake.pop()\n window.addch(int(tail[0]), int(tail[1]), ' ')\n window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)\n",
"step-4": "import random\nimport curses\nstdscr = curses.initscr()\ncurses.noecho()\ncurses.cbreak()\nstdscr.keypad(True)\ncurses.curs_set(0)\nheight, width = stdscr.getmaxyx()\nwindow = curses.newwin(height, width, 0, 0)\nwindow.keypad(1)\nwindow.timeout(100)\nsnk_x = width / 4\nsnk_y = height / 2\nsnake = [[snk_y, snk_x], [snk_y, snk_x - 1], [snk_y, snk_x - 2]]\nfood = [height / 2, width / 2]\nwindow.addch(int(food[0]), int(food[1]), curses.ACS_PI)\nkey = curses.KEY_RIGHT\nwhile True:\n next_key = window.getch()\n key = key if next_key == -1 else next_key\n if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0\n ] in snake[1:]:\n curses.endwin()\n quit()\n new_head = [snake[0][0], snake[0][1]]\n if key == curses.KEY_DOWN:\n new_head[0] += 1\n if key == curses.KEY_UP:\n new_head[0] -= 1\n if key == curses.KEY_LEFT:\n new_head[1] -= 1\n if key == curses.KEY_RIGHT:\n new_head[1] += 1\n snake.insert(0, new_head)\n if snake[0] == food:\n food = None\n while food is None:\n nf = [random.randint(1, height - 1), random.randint(1, width - 1)]\n food = nf if nf not in snake else None\n window.addch(food[0], food[1], curses.ACS_PI)\n else:\n tail = snake.pop()\n window.addch(int(tail[0]), int(tail[1]), ' ')\n window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)\n",
"step-5": "import random\r\n\r\n# library to create window in the terminal\r\nimport curses \r\n\r\n# initialized curses by returning a window object\r\nstdscr = curses.initscr()\r\ncurses.noecho()\r\ncurses.cbreak()\r\nstdscr.keypad(True)\r\ncurses.curs_set(0)\r\nheight, width = stdscr.getmaxyx()\r\n\r\n# create a new window of a given size\r\nwindow = curses.newwin(height, width, 0, 0)\r\nwindow.keypad(1)\r\nwindow.timeout(100)\r\n\r\n# snake's form\r\nsnk_x = width/4\r\nsnk_y = height/2\r\n\r\n# initialize snake's size to 3\r\nsnake = [\r\n [snk_y, snk_x],\r\n [snk_y, snk_x-1],\r\n [snk_y, snk_x-2]\r\n]\r\n\r\n# food's size\r\nfood = [height/2, width/2]\r\n\r\n# add first food in the window\r\nwindow.addch(int(food[0]), int(food[1]), curses.ACS_PI)\r\n\r\n# snake initializes direction to right\r\nkey = curses.KEY_RIGHT\r\n\r\n# main of snake game \r\nwhile True:\r\n next_key = window.getch()\r\n key = key if next_key == -1 else next_key\r\n\r\n if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0] in snake[1:]:\r\n curses.endwin()\r\n quit()\r\n\r\n new_head = [snake[0][0], snake[0][1]]\r\n\r\n if key == curses.KEY_DOWN:\r\n new_head[0] += 1\r\n if key == curses.KEY_UP:\r\n new_head[0] -= 1\r\n if key == curses.KEY_LEFT:\r\n new_head[1] -= 1\r\n if key == curses.KEY_RIGHT:\r\n new_head[1] += 1\r\n\r\n snake.insert(0, new_head)\r\n\r\n if snake[0] == food:\r\n food = None\r\n while food is None:\r\n nf = [ random.randint(1, height-1), random.randint(1, width-1)]\r\n food = nf if nf not in snake else None\r\n window.addch(food[0], food[1], curses.ACS_PI)\r\n else:\r\n tail = snake.pop()\r\n window.addch(int(tail[0]), int(tail[1]), ' ')\r\n \r\n window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)\r\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import Callable, List, Optional
import numpy as np
import lab1.src.grad.grad_step_strategy as st
import lab1.src.grad.stop_criteria as sc
DEFAULT_EPSILON = 1e-9
DEFAULT_MAX_ITERATIONS = 1e5
def gradient_descent(f: Callable[[np.ndarray], float],
f_grad: Callable[[np.ndarray], np.ndarray],
start: np.ndarray,
step_strategy: st.StepStrategy,
stop_criteria: sc.StopCriteria,
eps_strategy: float = DEFAULT_EPSILON,
eps_stop_criteria: float = DEFAULT_EPSILON,
max_iterations_strategy=DEFAULT_MAX_ITERATIONS,
max_iterations_criteria=DEFAULT_MAX_ITERATIONS,
trajectory: Optional[List] = None):
strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy, max_iterations_strategy)
criteria = sc.get_stop_criteria(stop_criteria, f, f_grad, eps_stop_criteria, max_iterations_criteria)
cur_x = start
iters = 0
if trajectory is not None:
trajectory.append(cur_x)
while True:
iters += 1
cur_grad = f_grad(cur_x)
step = strategy.next_step(cur_x)
next_x = cur_x - step * cur_grad
if criteria.should_stop(cur_x, next_x):
return cur_x, iters
cur_x = next_x
if trajectory is not None:
trajectory.append(cur_x)
if iters == max_iterations_criteria:
return cur_x, iters
if __name__ == '__main__':
def foo(p):
return p[0] ** 2 + p[1] ** 2
def foo_grad(p):
x, y = p[0], p[1]
return np.array([2 * x, 2 * y])
res, _ = gradient_descent(foo,
foo_grad,
start=np.array([3, 4]),
step_strategy=st.StepStrategy.DIVIDE_STEP,
stop_criteria=sc.StopCriteria.BY_GRAD)
print(res)
|
normal
|
{
"blob_id": "919e1f8a4b021d75496f3bcff369261a09362a65",
"index": 3645,
"step-1": "<mask token>\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.\n StopCriteria.BY_GRAD)\n print(res)\n",
"step-3": "<mask token>\nDEFAULT_EPSILON = 1e-09\nDEFAULT_MAX_ITERATIONS = 100000.0\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.\n StopCriteria.BY_GRAD)\n print(res)\n",
"step-4": "from typing import Callable, List, Optional\nimport numpy as np\nimport lab1.src.grad.grad_step_strategy as st\nimport lab1.src.grad.stop_criteria as sc\nDEFAULT_EPSILON = 1e-09\nDEFAULT_MAX_ITERATIONS = 100000.0\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.\n StopCriteria.BY_GRAD)\n print(res)\n",
"step-5": "from typing import Callable, List, Optional\nimport numpy as np\n\nimport lab1.src.grad.grad_step_strategy as st\nimport lab1.src.grad.stop_criteria as sc\n\n\nDEFAULT_EPSILON = 1e-9\nDEFAULT_MAX_ITERATIONS = 1e5\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float],\n f_grad: Callable[[np.ndarray], np.ndarray],\n start: np.ndarray,\n step_strategy: st.StepStrategy,\n stop_criteria: sc.StopCriteria,\n eps_strategy: float = DEFAULT_EPSILON,\n eps_stop_criteria: float = DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS,\n max_iterations_criteria=DEFAULT_MAX_ITERATIONS,\n trajectory: Optional[List] = None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy, max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad, eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n\n if trajectory is not None:\n trajectory.append(cur_x)\n\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n\n\n res, _ = gradient_descent(foo,\n foo_grad,\n start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP,\n stop_criteria=sc.StopCriteria.BY_GRAD)\n print(res)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python 3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 PanXu, Inc. All Rights Reserved
#
"""
测试 label index decoder
Authors: PanXu
Date: 2020/07/05 15:10:00
"""
import pytest
import torch
from easytext.tests import ASSERT
from easytext.data import LabelVocabulary
from easytext.modules import ConditionalRandomField
from easytext.label_decoder import CRFLabelIndexDecoder
class CRFData:
"""
测试用的 crf 数据
"""
def __init__(self):
bio_labels = [["O", "I-X", "B-X", "I-Y", "B-Y"]]
self.label_vocabulary = LabelVocabulary(labels=bio_labels,
padding=LabelVocabulary.PADDING)
self.logits = torch.tensor([
[[0, 0, .5, .5, .2], [0, 0, .3, .3, .1], [0, 0, .9, 10, 1]],
[[0, 0, .2, .5, .2], [0, 0, 3, .3, .1], [0, 0, .9, 1, 1]],
], dtype=torch.float)
self.tags = torch.tensor([
[2, 3, 4],
[3, 2, 2]
], dtype=torch.long)
self.transitions = torch.tensor([
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.8, 0.3, 0.1, 0.7, 0.9],
[-0.3, 2.1, -5.6, 3.4, 4.0],
[0.2, 0.4, 0.6, -0.3, -0.4],
[1.0, 1.0, 1.0, 1.0, 1.0]
], dtype=torch.float)
self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6], dtype=torch.float)
self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4], dtype=torch.float)
# Use the CRF Module with fixed transitions to compute the log_likelihood
self.crf = ConditionalRandomField(5)
self.crf.transitions = torch.nn.Parameter(self.transitions)
self.crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)
self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)
# constraint crf
constraints = {(0, 0), (0, 1),
(1, 1), (1, 2),
(2, 2), (2, 3),
(3, 3), (3, 4),
(4, 4), (4, 0)}
# Add the transitions to the end tag
# and from the start tag.
for i in range(5):
constraints.add((5, i))
constraints.add((i, 6))
constraint_crf = ConditionalRandomField(num_tags=5, constraints=constraints)
constraint_crf.transitions = torch.nn.Parameter(self.transitions)
constraint_crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)
constraint_crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)
self.constraint_crf = constraint_crf
@pytest.fixture(scope="class")
def crf_data():
"""
产生测试用的 crf data
:return:
"""
return CRFData()
def test_crf_label_index_decoder(crf_data):
"""
测试 crf label index decoder
:param crf_data: crf data
:return:
"""
mask = torch.tensor([
[1, 1, 1],
[1, 1, 0]
], dtype=torch.long)
crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.crf,
label_vocabulary=crf_data.label_vocabulary)
label_indices = crf_label_index_decoder(logits=crf_data.logits,
mask=mask)
padding_index = crf_data.label_vocabulary.padding_index
expect = [[2, 4, 3], [4, 2, padding_index]]
ASSERT.assertListEqual(expect, label_indices.tolist())
def test_crf_label_index_decoder_with_constraint(crf_data):
mask = torch.tensor([
[1, 1, 1],
[1, 1, 0]
], dtype=torch.uint8)
crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.constraint_crf,
label_vocabulary=crf_data.label_vocabulary)
label_indices = crf_label_index_decoder(logits=crf_data.logits,
mask=mask)
padding_index = crf_data.label_vocabulary.padding_index
expect = [[2, 3, 3], [2, 3, padding_index]]
ASSERT.assertListEqual(expect, label_indices.tolist())
|
normal
|
{
"blob_id": "f64138ee5a64f09deb72b47b86bd7795acddad4d",
"index": 9980,
"step-1": "<mask token>\n\n\nclass CRFData:\n \"\"\"\n 测试用的 crf 数据\n \"\"\"\n\n def __init__(self):\n bio_labels = [['O', 'I-X', 'B-X', 'I-Y', 'B-Y']]\n self.label_vocabulary = LabelVocabulary(labels=bio_labels, padding=\n LabelVocabulary.PADDING)\n self.logits = torch.tensor([[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3,\n 0.1], [0, 0, 0.9, 10, 1]], [[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, \n 0.3, 0.1], [0, 0, 0.9, 1, 1]]], dtype=torch.float)\n self.tags = torch.tensor([[2, 3, 4], [3, 2, 2]], dtype=torch.long)\n self.transitions = torch.tensor([[0.1, 0.2, 0.3, 0.4, 0.5], [0.8, \n 0.3, 0.1, 0.7, 0.9], [-0.3, 2.1, -5.6, 3.4, 4.0], [0.2, 0.4, \n 0.6, -0.3, -0.4], [1.0, 1.0, 1.0, 1.0, 1.0]], dtype=torch.float)\n self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6\n ], dtype=torch.float)\n self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4\n ], dtype=torch.float)\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n constraints = {(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 3), (3, \n 3), (3, 4), (4, 4), (4, 0)}\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n constraint_crf = ConditionalRandomField(num_tags=5, constraints=\n constraints)\n constraint_crf.transitions = torch.nn.Parameter(self.transitions)\n constraint_crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n constraint_crf.end_transitions = torch.nn.Parameter(self.\n transitions_to_end)\n self.constraint_crf = constraint_crf\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CRFData:\n \"\"\"\n 测试用的 crf 数据\n \"\"\"\n\n def __init__(self):\n bio_labels = [['O', 'I-X', 'B-X', 'I-Y', 'B-Y']]\n self.label_vocabulary = LabelVocabulary(labels=bio_labels, padding=\n LabelVocabulary.PADDING)\n self.logits = torch.tensor([[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3,\n 0.1], [0, 0, 0.9, 10, 1]], [[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, \n 0.3, 0.1], [0, 0, 0.9, 1, 1]]], dtype=torch.float)\n self.tags = torch.tensor([[2, 3, 4], [3, 2, 2]], dtype=torch.long)\n self.transitions = torch.tensor([[0.1, 0.2, 0.3, 0.4, 0.5], [0.8, \n 0.3, 0.1, 0.7, 0.9], [-0.3, 2.1, -5.6, 3.4, 4.0], [0.2, 0.4, \n 0.6, -0.3, -0.4], [1.0, 1.0, 1.0, 1.0, 1.0]], dtype=torch.float)\n self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6\n ], dtype=torch.float)\n self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4\n ], dtype=torch.float)\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n constraints = {(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 3), (3, \n 3), (3, 4), (4, 4), (4, 0)}\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n constraint_crf = ConditionalRandomField(num_tags=5, constraints=\n constraints)\n constraint_crf.transitions = torch.nn.Parameter(self.transitions)\n constraint_crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n constraint_crf.end_transitions = torch.nn.Parameter(self.\n transitions_to_end)\n self.constraint_crf = constraint_crf\n\n\[email protected](scope='class')\ndef crf_data():\n \"\"\"\n 产生测试用的 crf data\n :return:\n \"\"\"\n return CRFData()\n\n\ndef test_crf_label_index_decoder(crf_data):\n \"\"\"\n 测试 crf label index decoder\n :param crf_data: crf data\n :return:\n \"\"\"\n mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.long)\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.crf,\n label_vocabulary=crf_data.label_vocabulary)\n label_indices = crf_label_index_decoder(logits=crf_data.logits, mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 4, 3], [4, 2, padding_index]]\n ASSERT.assertListEqual(expect, label_indices.tolist())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CRFData:\n \"\"\"\n 测试用的 crf 数据\n \"\"\"\n\n def __init__(self):\n bio_labels = [['O', 'I-X', 'B-X', 'I-Y', 'B-Y']]\n self.label_vocabulary = LabelVocabulary(labels=bio_labels, padding=\n LabelVocabulary.PADDING)\n self.logits = torch.tensor([[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3,\n 0.1], [0, 0, 0.9, 10, 1]], [[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, \n 0.3, 0.1], [0, 0, 0.9, 1, 1]]], dtype=torch.float)\n self.tags = torch.tensor([[2, 3, 4], [3, 2, 2]], dtype=torch.long)\n self.transitions = torch.tensor([[0.1, 0.2, 0.3, 0.4, 0.5], [0.8, \n 0.3, 0.1, 0.7, 0.9], [-0.3, 2.1, -5.6, 3.4, 4.0], [0.2, 0.4, \n 0.6, -0.3, -0.4], [1.0, 1.0, 1.0, 1.0, 1.0]], dtype=torch.float)\n self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6\n ], dtype=torch.float)\n self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4\n ], dtype=torch.float)\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n constraints = {(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 3), (3, \n 3), (3, 4), (4, 4), (4, 0)}\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n constraint_crf = ConditionalRandomField(num_tags=5, constraints=\n constraints)\n constraint_crf.transitions = torch.nn.Parameter(self.transitions)\n constraint_crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n constraint_crf.end_transitions = torch.nn.Parameter(self.\n transitions_to_end)\n self.constraint_crf = constraint_crf\n\n\[email protected](scope='class')\ndef crf_data():\n \"\"\"\n 产生测试用的 crf data\n :return:\n \"\"\"\n return CRFData()\n\n\ndef test_crf_label_index_decoder(crf_data):\n \"\"\"\n 测试 crf label index decoder\n :param crf_data: crf data\n :return:\n \"\"\"\n mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.long)\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.crf,\n label_vocabulary=crf_data.label_vocabulary)\n label_indices = crf_label_index_decoder(logits=crf_data.logits, mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 4, 3], [4, 2, padding_index]]\n ASSERT.assertListEqual(expect, label_indices.tolist())\n\n\ndef test_crf_label_index_decoder_with_constraint(crf_data):\n mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.uint8)\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.\n constraint_crf, label_vocabulary=crf_data.label_vocabulary)\n label_indices = crf_label_index_decoder(logits=crf_data.logits, mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 3, 3], [2, 3, padding_index]]\n ASSERT.assertListEqual(expect, label_indices.tolist())\n",
"step-4": "<mask token>\nimport pytest\nimport torch\nfrom easytext.tests import ASSERT\nfrom easytext.data import LabelVocabulary\nfrom easytext.modules import ConditionalRandomField\nfrom easytext.label_decoder import CRFLabelIndexDecoder\n\n\nclass CRFData:\n \"\"\"\n 测试用的 crf 数据\n \"\"\"\n\n def __init__(self):\n bio_labels = [['O', 'I-X', 'B-X', 'I-Y', 'B-Y']]\n self.label_vocabulary = LabelVocabulary(labels=bio_labels, padding=\n LabelVocabulary.PADDING)\n self.logits = torch.tensor([[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3,\n 0.1], [0, 0, 0.9, 10, 1]], [[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, \n 0.3, 0.1], [0, 0, 0.9, 1, 1]]], dtype=torch.float)\n self.tags = torch.tensor([[2, 3, 4], [3, 2, 2]], dtype=torch.long)\n self.transitions = torch.tensor([[0.1, 0.2, 0.3, 0.4, 0.5], [0.8, \n 0.3, 0.1, 0.7, 0.9], [-0.3, 2.1, -5.6, 3.4, 4.0], [0.2, 0.4, \n 0.6, -0.3, -0.4], [1.0, 1.0, 1.0, 1.0, 1.0]], dtype=torch.float)\n self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6\n ], dtype=torch.float)\n self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4\n ], dtype=torch.float)\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n constraints = {(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 3), (3, \n 3), (3, 4), (4, 4), (4, 0)}\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n constraint_crf = ConditionalRandomField(num_tags=5, constraints=\n constraints)\n constraint_crf.transitions = torch.nn.Parameter(self.transitions)\n constraint_crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n constraint_crf.end_transitions = torch.nn.Parameter(self.\n transitions_to_end)\n self.constraint_crf = constraint_crf\n\n\[email protected](scope='class')\ndef crf_data():\n \"\"\"\n 产生测试用的 crf data\n :return:\n \"\"\"\n return CRFData()\n\n\ndef test_crf_label_index_decoder(crf_data):\n \"\"\"\n 测试 crf label index decoder\n :param crf_data: crf data\n :return:\n \"\"\"\n mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.long)\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.crf,\n label_vocabulary=crf_data.label_vocabulary)\n label_indices = crf_label_index_decoder(logits=crf_data.logits, mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 4, 3], [4, 2, padding_index]]\n ASSERT.assertListEqual(expect, label_indices.tolist())\n\n\ndef test_crf_label_index_decoder_with_constraint(crf_data):\n mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.uint8)\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.\n constraint_crf, label_vocabulary=crf_data.label_vocabulary)\n label_indices = crf_label_index_decoder(logits=crf_data.logits, mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 3, 3], [2, 3, padding_index]]\n ASSERT.assertListEqual(expect, label_indices.tolist())\n",
"step-5": "#!/usr/bin/env python 3\n# -*- coding: utf-8 -*-\n\n#\n# Copyright (c) 2020 PanXu, Inc. All Rights Reserved\n#\n\"\"\"\n测试 label index decoder\n\nAuthors: PanXu\nDate: 2020/07/05 15:10:00\n\"\"\"\nimport pytest\nimport torch\n\nfrom easytext.tests import ASSERT\n\nfrom easytext.data import LabelVocabulary\nfrom easytext.modules import ConditionalRandomField\nfrom easytext.label_decoder import CRFLabelIndexDecoder\n\n\nclass CRFData:\n \"\"\"\n 测试用的 crf 数据\n \"\"\"\n\n def __init__(self):\n bio_labels = [[\"O\", \"I-X\", \"B-X\", \"I-Y\", \"B-Y\"]]\n\n self.label_vocabulary = LabelVocabulary(labels=bio_labels,\n padding=LabelVocabulary.PADDING)\n\n self.logits = torch.tensor([\n [[0, 0, .5, .5, .2], [0, 0, .3, .3, .1], [0, 0, .9, 10, 1]],\n [[0, 0, .2, .5, .2], [0, 0, 3, .3, .1], [0, 0, .9, 1, 1]],\n ], dtype=torch.float)\n\n self.tags = torch.tensor([\n [2, 3, 4],\n [3, 2, 2]\n ], dtype=torch.long)\n\n self.transitions = torch.tensor([\n [0.1, 0.2, 0.3, 0.4, 0.5],\n [0.8, 0.3, 0.1, 0.7, 0.9],\n [-0.3, 2.1, -5.6, 3.4, 4.0],\n [0.2, 0.4, 0.6, -0.3, -0.4],\n [1.0, 1.0, 1.0, 1.0, 1.0]\n ], dtype=torch.float)\n\n self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6], dtype=torch.float)\n self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4], dtype=torch.float)\n\n # Use the CRF Module with fixed transitions to compute the log_likelihood\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n\n # constraint crf\n constraints = {(0, 0), (0, 1),\n (1, 1), (1, 2),\n (2, 2), (2, 3),\n (3, 3), (3, 4),\n (4, 4), (4, 0)}\n\n # Add the transitions to the end tag\n # and from the start tag.\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n\n constraint_crf = ConditionalRandomField(num_tags=5, constraints=constraints)\n constraint_crf.transitions = torch.nn.Parameter(self.transitions)\n constraint_crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)\n constraint_crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n self.constraint_crf = constraint_crf\n\n\[email protected](scope=\"class\")\ndef crf_data():\n \"\"\"\n 产生测试用的 crf data\n :return:\n \"\"\"\n return CRFData()\n\n\ndef test_crf_label_index_decoder(crf_data):\n \"\"\"\n 测试 crf label index decoder\n :param crf_data: crf data\n :return:\n \"\"\"\n mask = torch.tensor([\n [1, 1, 1],\n [1, 1, 0]\n ], dtype=torch.long)\n\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.crf,\n label_vocabulary=crf_data.label_vocabulary)\n\n label_indices = crf_label_index_decoder(logits=crf_data.logits,\n mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 4, 3], [4, 2, padding_index]]\n\n ASSERT.assertListEqual(expect, label_indices.tolist())\n\n\ndef test_crf_label_index_decoder_with_constraint(crf_data):\n mask = torch.tensor([\n [1, 1, 1],\n [1, 1, 0]\n ], dtype=torch.uint8)\n\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.constraint_crf,\n label_vocabulary=crf_data.label_vocabulary)\n\n label_indices = crf_label_index_decoder(logits=crf_data.logits,\n mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 3, 3], [2, 3, padding_index]]\n\n ASSERT.assertListEqual(expect, label_indices.tolist())\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
def parse(filename):
t1, t2 = open(filename).read().strip().split("\n\n")
return tuple(map(lambda x: list(map(int, x.split("\n")[1:])), [t1, t2]))
def score(deck):
res = 0
for i in range(len(deck)):
res += deck[i] * (len(deck)-i)
return res
def solution1(deck1, deck2):
while len(deck1) > 0 and len(deck2) > 0:
p1, p2 = deck1[0], deck2[0]
if p1 > p2:
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return score(deck1)
return score(deck2)
def can_recurse(deck1, deck2):
p1, p2 = deck1[0], deck2[0]
return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1
def combat(deck1, deck2):
db = set()
while len(deck1) > 0 and len(deck2) > 0:
key = (tuple(deck1), tuple(deck2))
if key in db:
return "p1", score(deck1)
db.add(key)
p1, p2 = deck1[0], deck2[0]
if can_recurse(deck1, deck2):
winner, _ = combat(deck1[1:p1+1], deck2[1:p2+1])
else:
winner = "p1" if p1 > p2 else "p2"
if winner == "p1":
deck1 = deck1[1:] + [p1, p2]
deck2 = deck2[1:]
else:
deck1 = deck1[1:]
deck2 = deck2[1:] + [p2, p1]
if len(deck1) > 0:
return "p1", score(deck1)
return "p2", score(deck2)
def solution2(deck1, deck2):
return combat(deck1, deck2)[1]
def main():
print(solution1(*parse("sample.txt")))
print(solution1(*parse("input.txt")))
print(solution2(*parse("sample.txt")))
print(solution2(*parse("input.txt")))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "508d016161131481ace41f3d3bda005423125fe5",
"index": 5635,
"step-1": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\n<mask token>\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\n<mask token>\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\n<mask token>\n",
"step-2": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\n<mask token>\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\ndef solution2(deck1, deck2):\n return combat(deck1, deck2)[1]\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\n<mask token>\n",
"step-3": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\n<mask token>\n\n\ndef can_recurse(deck1, deck2):\n p1, p2 = deck1[0], deck2[0]\n return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\ndef solution2(deck1, deck2):\n return combat(deck1, deck2)[1]\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\n<mask token>\n",
"step-4": "def parse(filename):\n t1, t2 = open(filename).read().strip().split('\\n\\n')\n return tuple(map(lambda x: list(map(int, x.split('\\n')[1:])), [t1, t2]))\n\n\ndef score(deck):\n res = 0\n for i in range(len(deck)):\n res += deck[i] * (len(deck) - i)\n return res\n\n\ndef solution1(deck1, deck2):\n while len(deck1) > 0 and len(deck2) > 0:\n p1, p2 = deck1[0], deck2[0]\n if p1 > p2:\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return score(deck1)\n return score(deck2)\n\n\ndef can_recurse(deck1, deck2):\n p1, p2 = deck1[0], deck2[0]\n return p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1\n\n\ndef combat(deck1, deck2):\n db = set()\n while len(deck1) > 0 and len(deck2) > 0:\n key = tuple(deck1), tuple(deck2)\n if key in db:\n return 'p1', score(deck1)\n db.add(key)\n p1, p2 = deck1[0], deck2[0]\n if can_recurse(deck1, deck2):\n winner, _ = combat(deck1[1:p1 + 1], deck2[1:p2 + 1])\n else:\n winner = 'p1' if p1 > p2 else 'p2'\n if winner == 'p1':\n deck1 = deck1[1:] + [p1, p2]\n deck2 = deck2[1:]\n else:\n deck1 = deck1[1:]\n deck2 = deck2[1:] + [p2, p1]\n if len(deck1) > 0:\n return 'p1', score(deck1)\n return 'p2', score(deck2)\n\n\ndef solution2(deck1, deck2):\n return combat(deck1, deck2)[1]\n\n\ndef main():\n print(solution1(*parse('sample.txt')))\n print(solution1(*parse('input.txt')))\n print(solution2(*parse('sample.txt')))\n print(solution2(*parse('input.txt')))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "def parse(filename):\n\tt1, t2 = open(filename).read().strip().split(\"\\n\\n\")\n\treturn tuple(map(lambda x: list(map(int, x.split(\"\\n\")[1:])), [t1, t2]))\n\ndef score(deck):\n\tres = 0\n\tfor i in range(len(deck)):\n\t\tres += deck[i] * (len(deck)-i)\n\treturn res\n\ndef solution1(deck1, deck2):\n\twhile len(deck1) > 0 and len(deck2) > 0:\n\t\tp1, p2 = deck1[0], deck2[0]\n\t\tif p1 > p2:\n\t\t\tdeck1 = deck1[1:] + [p1, p2]\n\t\t\tdeck2 = deck2[1:]\n\t\telse:\n\t\t\tdeck1 = deck1[1:]\n\t\t\tdeck2 = deck2[1:] + [p2, p1]\n\tif len(deck1) > 0:\n\t\treturn score(deck1)\n\treturn score(deck2)\n\ndef can_recurse(deck1, deck2):\n\tp1, p2 = deck1[0], deck2[0]\n\treturn p1 <= len(deck1) - 1 and p2 <= len(deck2) - 1\n\ndef combat(deck1, deck2):\n\tdb = set()\n\twhile len(deck1) > 0 and len(deck2) > 0:\n\t\tkey = (tuple(deck1), tuple(deck2))\n\t\tif key in db:\n\t\t\treturn \"p1\", score(deck1)\n\t\tdb.add(key)\n\n\t\tp1, p2 = deck1[0], deck2[0]\n\n\t\tif can_recurse(deck1, deck2):\n\t\t\twinner, _ = combat(deck1[1:p1+1], deck2[1:p2+1])\n\t\telse:\n\t\t\twinner = \"p1\" if p1 > p2 else \"p2\"\n\n\t\tif winner == \"p1\":\n\t\t\tdeck1 = deck1[1:] + [p1, p2]\n\t\t\tdeck2 = deck2[1:]\n\t\telse:\n\t\t\tdeck1 = deck1[1:]\n\t\t\tdeck2 = deck2[1:] + [p2, p1]\n\n\tif len(deck1) > 0:\n\t\treturn \"p1\", score(deck1)\n\treturn \"p2\", score(deck2)\n\ndef solution2(deck1, deck2):\n\treturn combat(deck1, deck2)[1]\n\ndef main():\n\tprint(solution1(*parse(\"sample.txt\")))\n\tprint(solution1(*parse(\"input.txt\")))\n\n\tprint(solution2(*parse(\"sample.txt\")))\n\tprint(solution2(*parse(\"input.txt\")))\n\nif __name__ == \"__main__\":\n\tmain()\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
#!/usr/bin/python3.8
# -*- coding: utf-8 -*-
__version__ = "0.2.2"
__author__ = 'Anton Vanke <[email protected]>'
class Gobang:
"""
五子棋
=====
一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :
new(): 新局
printcb(): 打印棋盘
player(): 获取当前应落子 ID (轮走方)
sortstep(): 处理总步表
loadstep(): 将 step 步表的内容载入棋盘
recall(): 前进后退的操作
move(): 落子
iswin(): 判断是否获胜
"""
# 棋盘的边长
SIDE = 15
def new(self):
"""新局"""
self.__init__()
def printcb(self):
"""打印棋盘"""
print("\033[7;32;40m+ ", end="")
for c in range(65, 80):
print(chr(c), end=" ")
print("\033[0m\n")
for row in range(len(self.chessboard)):
print("\033[7;32;40m" + chr(row + 97), end="\033[0m ")
for i in self.chessboard[row]:
if i == 0:
print(i, end=" ")
elif i == 1:
print("\033[31m{}\033[0m".format(i), end=" ")
elif i == 2:
print("\033[34m{}\033[0m".format(i), end=" ")
print("\n")
def player(self):
"""获取玩家ID"""
return (len(self.step) % 2) + 1
def sortstep(self):
"""将总步表分配给黑白子"""
self.white, self.black = {}, {}
for s in self.step.items():
if s[0] % 2 == 1:
self.black.update({s[0]: s[1]})
else:
self.white.update({s[0]: s[1]})
def loadstep(self):
""" 载入步表
将 self.step 载入到棋盘上
"""
try:
self.chessboard = [[0 for i in range(self.SIDE)]
for j in range(self.SIDE)]
step_list = list(self.step.values()).copy()
for i in range(len(step_list)):
self.chessboard[ord(step_list[i][0]) -
97][ord(step_list[i][1]) - 97] = (i % 2) + 1
self.sortstep()
return True
except TypeError:
return False
def recall(self, s=-1):
""" 悔棋
"""
if s == -1:
try:
if len(self.max_step) < len(self.step):
self.max_step = self.step.copy()
if len(self.step) == 0:
raise KeyError
except KeyError:
return False
else:
self.step.popitem()
return self.loadstep()
# 重下
elif s == 1:
if len(self.max_step) > len(self.step):
self.step.update(
{len(self.step) + 1: self.max_step[len(self.step) + 1]})
return self.loadstep()
else:
return False
def move(self, row: int = 7, column: int = 7, **kwgs):
"""移動棋盘
row: 棋盘的行号
column: 棋盘的列号
"""
if 's' in kwgs:
row = ord(kwgs['s'][0].lower()) - 97
column = ord(kwgs['s'][1].lower()) - 97
# 判斷是否在棋盤上
if 0 <= row < self.SIDE and 0 <= column < self.SIDE:
# 判斷該位置上是否有子落過
if self.chessboard[row][column] == 0:
self.chessboard[row][column] = self.player()
self.step[len(self.step) +
1] = chr(row + 97) + chr(column + 97)
self.sortstep()
return True
return False
def iswin(self):
"""判断是否结束
"""
step_set_ls = []
cb = self.chessboard
# 将步表转换为列表
for s in self.step.values():
step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))
# print(step_set_ls)
for r, c in step_set_ls:
try:
# 判断 -- 行有 5 子
if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][
c + 1] == cb[r][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
# 判断 | 有 5 子
if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[
r + 1][c] == cb[r + 2][c] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
# 判断 \ 有 5 子
if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[
r + 1][c + 1] == cb[r + 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
# 判断 / 列有 5 子
if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[
r - 1][c + 1] == cb[r - 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
return False, 0
def __init__(self):
# 棋盤
self.chessboard = [[0 for i in range(self.SIDE)]
for j in range(self.SIDE)]
# 總步表
self.step = {}
# 单局最长步表
self.max_step = {}
# 黑子步表
self.black = {}
# 白子步表
self.white = {}
def _test():
a = Gobang()
# 输入步表
a.step = {
1: 'no',
2: 'oo',
3: 'mn',
4: 'nn',
5: 'lm',
6: 'mm',
7: 'kl',
8: 'll',
}
# 加载
a.loadstep()
# 落子
a.move(9, 10)
# 打印棋盘
a.printcb()
# 输出输赢
print(a.iswin())
a.new()
a.printcb()
if __name__ == "__main__":
_test()
|
normal
|
{
"blob_id": "e0394bfed51cd0af9bca06867e9b556b226f37d1",
"index": 1720,
"step-1": "<mask token>\n\n\nclass Gobang:\n <mask token>\n <mask token>\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n <mask token>\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n <mask token>\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Gobang:\n <mask token>\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in\n range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]\n [1]) - 97] = i % 2 + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int=7, column: int=7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97\n )\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Gobang:\n \"\"\"\n 五子棋\n =====\n 一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :\n\n new(): 新局\n printcb(): 打印棋盘\n player(): 获取当前应落子 ID (轮走方)\n sortstep(): 处理总步表\n loadstep(): 将 step 步表的内容载入棋盘\n recall(): 前进后退的操作\n move(): 落子\n iswin(): 判断是否获胜\n \"\"\"\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in\n range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]\n [1]) - 97] = i % 2 + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int=7, column: int=7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97\n )\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\ndef _test():\n a = Gobang()\n a.step = {(1): 'no', (2): 'oo', (3): 'mn', (4): 'nn', (5): 'lm', (6):\n 'mm', (7): 'kl', (8): 'll'}\n a.loadstep()\n a.move(9, 10)\n a.printcb()\n print(a.iswin())\n a.new()\n a.printcb()\n\n\nif __name__ == '__main__':\n _test()\n",
"step-4": "__version__ = '0.2.2'\n__author__ = 'Anton Vanke <[email protected]>'\n\n\nclass Gobang:\n \"\"\"\n 五子棋\n =====\n 一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :\n\n new(): 新局\n printcb(): 打印棋盘\n player(): 获取当前应落子 ID (轮走方)\n sortstep(): 处理总步表\n loadstep(): 将 step 步表的内容载入棋盘\n recall(): 前进后退的操作\n move(): 落子\n iswin(): 判断是否获胜\n \"\"\"\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in\n range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]\n [1]) - 97] = i % 2 + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int=7, column: int=7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97\n )\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\ndef _test():\n a = Gobang()\n a.step = {(1): 'no', (2): 'oo', (3): 'mn', (4): 'nn', (5): 'lm', (6):\n 'mm', (7): 'kl', (8): 'll'}\n a.loadstep()\n a.move(9, 10)\n a.printcb()\n print(a.iswin())\n a.new()\n a.printcb()\n\n\nif __name__ == '__main__':\n _test()\n",
"step-5": "#!/usr/bin/python3.8\n# -*- coding: utf-8 -*-\n__version__ = \"0.2.2\"\n__author__ = 'Anton Vanke <[email protected]>'\n\n\nclass Gobang:\n \"\"\"\n 五子棋\n =====\n 一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :\n\n new(): 新局\n printcb(): 打印棋盘\n player(): 获取当前应落子 ID (轮走方)\n sortstep(): 处理总步表\n loadstep(): 将 step 步表的内容载入棋盘\n recall(): 前进后退的操作\n move(): 落子\n iswin(): 判断是否获胜\n \"\"\"\n # 棋盘的边长\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print(\"\\033[7;32;40m+ \", end=\"\")\n for c in range(65, 80):\n print(chr(c), end=\" \")\n print(\"\\033[0m\\n\")\n for row in range(len(self.chessboard)):\n print(\"\\033[7;32;40m\" + chr(row + 97), end=\"\\033[0m \")\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=\" \")\n elif i == 1:\n print(\"\\033[31m{}\\033[0m\".format(i), end=\" \")\n elif i == 2:\n print(\"\\033[34m{}\\033[0m\".format(i), end=\" \")\n print(\"\\n\")\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return (len(self.step) % 2) + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[0 for i in range(self.SIDE)]\n for j in range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) -\n 97][ord(step_list[i][1]) - 97] = (i % 2) + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n # 重下\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update(\n {len(self.step) + 1: self.max_step[len(self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int = 7, column: int = 7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n # 判斷是否在棋盤上\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n # 判斷該位置上是否有子落過\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) +\n 1] = chr(row + 97) + chr(column + 97)\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n # 将步表转换为列表\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n # print(step_set_ls)\n for r, c in step_set_ls:\n try:\n # 判断 -- 行有 5 子\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][\n c + 1] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n # 判断 | 有 5 子\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[\n r + 1][c] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n # 判断 \\ 有 5 子\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[\n r + 1][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n # 判断 / 列有 5 子\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[\n r - 1][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n # 棋盤\n self.chessboard = [[0 for i in range(self.SIDE)]\n for j in range(self.SIDE)]\n # 總步表\n self.step = {}\n # 单局最长步表\n self.max_step = {}\n # 黑子步表\n self.black = {}\n # 白子步表\n self.white = {}\n\n\ndef _test():\n a = Gobang()\n # 输入步表\n a.step = {\n 1: 'no',\n 2: 'oo',\n 3: 'mn',\n 4: 'nn',\n 5: 'lm',\n 6: 'mm',\n 7: 'kl',\n 8: 'll',\n }\n # 加载\n a.loadstep()\n # 落子\n a.move(9, 10)\n # 打印棋盘\n a.printcb()\n # 输出输赢\n print(a.iswin())\n a.new()\n a.printcb()\n\n\nif __name__ == \"__main__\":\n _test()\n",
"step-ids": [
8,
11,
14,
15,
16
]
}
|
[
8,
11,
14,
15,
16
] |
import os, subprocess
os.environ['FLASK_APP'] = "app/app.py"
os.environ['FLASK_DEBUG'] = "1"
# for LSTM instead: https://storage.googleapis.com/jacobdanovitch/twtc/lstm.tar.gz
# Will have to change app.py to accept only attention_weights
subprocess.call('./serve_model.sh')
subprocess.call(['flask', 'run'])
|
normal
|
{
"blob_id": "cbad5d6f381e788a2f064aac0a5d468f40b39c93",
"index": 3696,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsubprocess.call('./serve_model.sh')\nsubprocess.call(['flask', 'run'])\n",
"step-3": "<mask token>\nos.environ['FLASK_APP'] = 'app/app.py'\nos.environ['FLASK_DEBUG'] = '1'\nsubprocess.call('./serve_model.sh')\nsubprocess.call(['flask', 'run'])\n",
"step-4": "import os, subprocess\nos.environ['FLASK_APP'] = 'app/app.py'\nos.environ['FLASK_DEBUG'] = '1'\nsubprocess.call('./serve_model.sh')\nsubprocess.call(['flask', 'run'])\n",
"step-5": "import os, subprocess\n\nos.environ['FLASK_APP'] = \"app/app.py\"\nos.environ['FLASK_DEBUG'] = \"1\"\n\n# for LSTM instead: https://storage.googleapis.com/jacobdanovitch/twtc/lstm.tar.gz\n# Will have to change app.py to accept only attention_weights\n\n\nsubprocess.call('./serve_model.sh')\nsubprocess.call(['flask', 'run'])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
# from .models import Product, Client
from .models import Board
admin.site.register(Board)
# admin.site.register(Product)
# # admin.site.register(Price)
# admin.site.register(Client)
# # Register your models here.
|
normal
|
{
"blob_id": "ea323a8398ceff8496e7f8d0f365d50f3115e954",
"index": 5228,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Board)\n",
"step-3": "from django.contrib import admin\nfrom .models import Board\nadmin.site.register(Board)\n",
"step-4": "from django.contrib import admin\n# from .models import Product, Client\nfrom .models import Board\n\nadmin.site.register(Board)\n\n# admin.site.register(Product)\n# # admin.site.register(Price)\n# admin.site.register(Client)\n# # Register your models here.\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
from django.urls import path
from petsApp import views
urlpatterns = [
path('user/<int:id>/', views.getUser),
path('user/addImage/', views.addImage),
path('user/getImage/<int:id>/', views.getImage),
path('user/signup/', views.signUp),
path('user/login/', views.logIn),
path('user/logout/', views.logOut),
path('user/addInvoice/', views.addInvoice),
path('pets/', views.pets), # toto
path('pets/search/', views.searchPet), # toto
path('pets/addFond/<int:id>', views.addFond),
path('pets/fond/<pet>/', views.getFond),
path('pets/delete/', views.delPet),
path('invoice/', views.invoice),
]
|
normal
|
{
"blob_id": "2458b8169029b3af501b650d548925770b0da74e",
"index": 6656,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('user/<int:id>/', views.getUser), path('user/addImage/',\n views.addImage), path('user/getImage/<int:id>/', views.getImage), path(\n 'user/signup/', views.signUp), path('user/login/', views.logIn), path(\n 'user/logout/', views.logOut), path('user/addInvoice/', views.\n addInvoice), path('pets/', views.pets), path('pets/search/', views.\n searchPet), path('pets/addFond/<int:id>', views.addFond), path(\n 'pets/fond/<pet>/', views.getFond), path('pets/delete/', views.delPet),\n path('invoice/', views.invoice)]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom petsApp import views\nurlpatterns = [path('user/<int:id>/', views.getUser), path('user/addImage/',\n views.addImage), path('user/getImage/<int:id>/', views.getImage), path(\n 'user/signup/', views.signUp), path('user/login/', views.logIn), path(\n 'user/logout/', views.logOut), path('user/addInvoice/', views.\n addInvoice), path('pets/', views.pets), path('pets/search/', views.\n searchPet), path('pets/addFond/<int:id>', views.addFond), path(\n 'pets/fond/<pet>/', views.getFond), path('pets/delete/', views.delPet),\n path('invoice/', views.invoice)]\n",
"step-4": "\nfrom django.contrib import admin\nfrom django.urls import path\nfrom petsApp import views\n\nurlpatterns = [\n path('user/<int:id>/', views.getUser),\n path('user/addImage/', views.addImage),\n path('user/getImage/<int:id>/', views.getImage),\n path('user/signup/', views.signUp),\n path('user/login/', views.logIn),\n path('user/logout/', views.logOut),\n path('user/addInvoice/', views.addInvoice),\n path('pets/', views.pets), # toto\n path('pets/search/', views.searchPet), # toto\n path('pets/addFond/<int:id>', views.addFond),\n path('pets/fond/<pet>/', views.getFond),\n path('pets/delete/', views.delPet),\n path('invoice/', views.invoice),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from numpy import *
from numpy.linalg import*
preco = array(eval(input("Alimentos: ")))
alimento = array([[ 2, 1 ,4 ],
[1 , 2 , 0],
[2 , 3 , 2 ]])
r = dot(inv(alimento),preco.T) #
print("estafilococo: ", round(r[0] , 1))
print("salmonela: ", round(r[1], 1))
print("coli: ", round(r[2], 1))
if r[0] == min(r):
print("estafilococo")
elif r[1] == min(r):
print("salmonela")
elif r[2]== min(r) :
print("coli")
|
normal
|
{
"blob_id": "0f3e12f35cc29a71be5b8e6d367908e31c200c38",
"index": 3896,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('estafilococo: ', round(r[0], 1))\nprint('salmonela: ', round(r[1], 1))\nprint('coli: ', round(r[2], 1))\nif r[0] == min(r):\n print('estafilococo')\nelif r[1] == min(r):\n print('salmonela')\nelif r[2] == min(r):\n print('coli')\n",
"step-3": "<mask token>\npreco = array(eval(input('Alimentos: ')))\nalimento = array([[2, 1, 4], [1, 2, 0], [2, 3, 2]])\nr = dot(inv(alimento), preco.T)\nprint('estafilococo: ', round(r[0], 1))\nprint('salmonela: ', round(r[1], 1))\nprint('coli: ', round(r[2], 1))\nif r[0] == min(r):\n print('estafilococo')\nelif r[1] == min(r):\n print('salmonela')\nelif r[2] == min(r):\n print('coli')\n",
"step-4": "from numpy import *\nfrom numpy.linalg import *\npreco = array(eval(input('Alimentos: ')))\nalimento = array([[2, 1, 4], [1, 2, 0], [2, 3, 2]])\nr = dot(inv(alimento), preco.T)\nprint('estafilococo: ', round(r[0], 1))\nprint('salmonela: ', round(r[1], 1))\nprint('coli: ', round(r[2], 1))\nif r[0] == min(r):\n print('estafilococo')\nelif r[1] == min(r):\n print('salmonela')\nelif r[2] == min(r):\n print('coli')\n",
"step-5": "from numpy import *\nfrom numpy.linalg import*\n\npreco = array(eval(input(\"Alimentos: \")))\nalimento = array([[ 2, 1 ,4 ],\n\t\t\t\t\t\t[1 , 2 , 0], \n\t\t\t\t\t\t[2 , 3 , 2 ]])\n\nr = dot(inv(alimento),preco.T) # \n\n\nprint(\"estafilococo: \", round(r[0] , 1))\nprint(\"salmonela: \", round(r[1], 1))\nprint(\"coli: \", round(r[2], 1))\n\n\nif r[0] == min(r):\n print(\"estafilococo\")\nelif r[1] == min(r):\n print(\"salmonela\")\nelif r[2]== min(r) :\n print(\"coli\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import os
import numpy as np
import math
sys.path.append("../")
from sir.improveagent import *
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
#from sklearn.neighbors import BallTree
from scipy.spatial import KDTree
from scipy.spatial import cKDTree
from scipy.spatial.distance import pdist
import networkx as nx
p = Person()
def run_Simulation2(k,N=100,T=10,start = 1,p=0.5,q=0.08,startcenter = False,startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N-start]
pop = [Person() for i in range(N)]
##we need to change the code for the case start people infected
for i in range(start):
pop[i].get_infected();
if(startcenter):
resetcenter(start,pop)
if(startcorner):
resetcorner(start,pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
#may have problem here
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand()< k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [i/N for i in recover]
newsuspect = [s/N for s in suspect]
newinfect = [i/N for i in infect]
plt.plot(range(T+1),newrecover,label = "r: percentage of removed ")
plt.plot(range(T+1),newsuspect,label = "s: percentage of susceptible")
plt.plot(range(T+1),newinfect,label = "i: percentage of infected")
plt.xlabel("T")
plt.ylabel("percentage")
plt.title("Percentage of Population, Discrete")
plt.legend()
plt.show()
#We run a simulation here,use the default value of p and q
run_Simulation2(0.6,N=20000,T = 30,start=10)
def checkinfectb(k,N,T,start=1,p=0.5,q=0.08,startcenter = False,startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N-start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected();
if(startcenter):
resetcenter(start,pop)
if(startcorner):
resetcorner(start,pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand()<k:
pop[j].get_recovered()
return np.array([(count_infect(pop)+count_recover(pop))/N,count_infect(pop)/N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02,0.1,0.02)
plist = np.arange(0.1,1,0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])
plt.plot(np.hstack((plist1,plist)),infectlist)
plt.title("centerplot")
plt.xlabel("p")
plt.ylabel("total number of individuals infected")
plt.title("Total Number of Individuals Infected vs p")
plt.show()
plotcenterrange()
"""
def plotrandomcornerrange():
plist1 = np.arange(0.02,0.1,0.02)
plist = np.arange(0.1,1,0.1)
infectlist = []
infectlist2 = []
infectlist3 = []
for i in plist1:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0])
infectlist2.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)))[0])
infectlist3.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter = True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0])
infectlist2.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)))[0])
infectlist3.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter = True)[0])
plt.plot(np.hstack((plist1,plist)),infectlist,label = "corner")
plt.plot(np.hstack((plist1,plist)),infectlist2,label = "random")
plt.plot(np.hstack((plist1,plist)),infectlist3,label = "center")
plt.title("Change from random corner center")
plt.xlabel("change of p")
plt.ylabel("change of total infected people")
plt.legend()
plt.show()
"""
#plotrandomcornerrange()
#no need for us to use this function
valuecorner = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0]
valuecenter = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0]
valuerandom = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)))[0]
print("p = 0.05, starting randomly, the total infected number is "+ str(valuerandom))
print("p = 0.05, starting from corner, the total infected number is "+ str(valuecorner))
print("p = 0.05, starting from center, the total infected number is "+ str(valuecenter))
|
normal
|
{
"blob_id": "92317996f884befd646138cd3a3dc3f8345679f4",
"index": 2122,
"step-1": "<mask token>\n\n\ndef run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=\n False, startcorner=False):\n \"\"\"\n run the simulation for the pop\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [(i / N) for i in recover]\n newsuspect = [(s / N) for s in suspect]\n newinfect = [(i / N) for i in infect]\n plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')\n plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')\n plt.plot(range(T + 1), newinfect, label='i: percentage of infected')\n plt.xlabel('T')\n plt.ylabel('percentage')\n plt.title('Percentage of Population, Discrete')\n plt.legend()\n plt.show()\n\n\n<mask token>\n\n\ndef checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,\n startcorner=False):\n \"\"\"\n we use this function for checking the total infected people\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop) + count_recover(pop)) / N, \n count_infect(pop) / N])\n\n\ndef plotcenterrange():\n \"\"\"\n show how the total infected people i change with p start from center\n \"\"\"\n plist1 = np.arange(0.02, 0.1, 0.02)\n plist = np.arange(0.1, 1, 0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n plt.plot(np.hstack((plist1, plist)), infectlist)\n plt.title('centerplot')\n plt.xlabel('p')\n plt.ylabel('total number of individuals infected')\n plt.title('Total Number of Individuals Infected vs p')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('../')\n<mask token>\n\n\ndef run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=\n False, startcorner=False):\n \"\"\"\n run the simulation for the pop\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [(i / N) for i in recover]\n newsuspect = [(s / N) for s in suspect]\n newinfect = [(i / N) for i in infect]\n plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')\n plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')\n plt.plot(range(T + 1), newinfect, label='i: percentage of infected')\n plt.xlabel('T')\n plt.ylabel('percentage')\n plt.title('Percentage of Population, Discrete')\n plt.legend()\n plt.show()\n\n\nrun_Simulation2(0.6, N=20000, T=30, start=10)\n\n\ndef checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,\n startcorner=False):\n \"\"\"\n we use this function for checking the total infected people\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop) + count_recover(pop)) / N, \n count_infect(pop) / N])\n\n\ndef plotcenterrange():\n \"\"\"\n show how the total infected people i change with p start from center\n \"\"\"\n plist1 = np.arange(0.02, 0.1, 0.02)\n plist = np.arange(0.1, 1, 0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n plt.plot(np.hstack((plist1, plist)), infectlist)\n plt.title('centerplot')\n plt.xlabel('p')\n plt.ylabel('total number of individuals infected')\n plt.title('Total Number of Individuals Infected vs p')\n plt.show()\n\n\nplotcenterrange()\n<mask token>\nprint('p = 0.05, starting randomly, the total infected number is ' + str(\n valuerandom))\nprint('p = 0.05, starting from corner, the total infected number is ' + str\n (valuecorner))\nprint('p = 0.05, starting from center, the total infected number is ' + str\n (valuecenter))\n",
"step-3": "<mask token>\nsys.path.append('../')\n<mask token>\np = Person()\n\n\ndef run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=\n False, startcorner=False):\n \"\"\"\n run the simulation for the pop\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [(i / N) for i in recover]\n newsuspect = [(s / N) for s in suspect]\n newinfect = [(i / N) for i in infect]\n plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')\n plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')\n plt.plot(range(T + 1), newinfect, label='i: percentage of infected')\n plt.xlabel('T')\n plt.ylabel('percentage')\n plt.title('Percentage of Population, Discrete')\n plt.legend()\n plt.show()\n\n\nrun_Simulation2(0.6, N=20000, T=30, start=10)\n\n\ndef checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,\n startcorner=False):\n \"\"\"\n we use this function for checking the total infected people\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop) + count_recover(pop)) / N, \n count_infect(pop) / N])\n\n\ndef plotcenterrange():\n \"\"\"\n show how the total infected people i change with p start from center\n \"\"\"\n plist1 = np.arange(0.02, 0.1, 0.02)\n plist = np.arange(0.1, 1, 0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n plt.plot(np.hstack((plist1, plist)), infectlist)\n plt.title('centerplot')\n plt.xlabel('p')\n plt.ylabel('total number of individuals infected')\n plt.title('Total Number of Individuals Infected vs p')\n plt.show()\n\n\nplotcenterrange()\n<mask token>\nvaluecorner = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)), startcorner=True)[0]\nvaluecenter = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)), startcenter=True)[0]\nvaluerandom = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)))[0]\nprint('p = 0.05, starting randomly, the total infected number is ' + str(\n valuerandom))\nprint('p = 0.05, starting from corner, the total infected number is ' + str\n (valuecorner))\nprint('p = 0.05, starting from center, the total infected number is ' + str\n (valuecenter))\n",
"step-4": "import sys\nimport os\nimport numpy as np\nimport math\nsys.path.append('../')\nfrom sir.improveagent import *\nimport numpy as np\nimport numpy.linalg as la\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import KDTree\nfrom scipy.spatial import cKDTree\nfrom scipy.spatial.distance import pdist\nimport networkx as nx\np = Person()\n\n\ndef run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=\n False, startcorner=False):\n \"\"\"\n run the simulation for the pop\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [(i / N) for i in recover]\n newsuspect = [(s / N) for s in suspect]\n newinfect = [(i / N) for i in infect]\n plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')\n plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')\n plt.plot(range(T + 1), newinfect, label='i: percentage of infected')\n plt.xlabel('T')\n plt.ylabel('percentage')\n plt.title('Percentage of Population, Discrete')\n plt.legend()\n plt.show()\n\n\nrun_Simulation2(0.6, N=20000, T=30, start=10)\n\n\ndef checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,\n startcorner=False):\n \"\"\"\n we use this function for checking the total infected people\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop) + count_recover(pop)) / N, \n count_infect(pop) / N])\n\n\ndef plotcenterrange():\n \"\"\"\n show how the total infected people i change with p start from center\n \"\"\"\n plist1 = np.arange(0.02, 0.1, 0.02)\n plist = np.arange(0.1, 1, 0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n plt.plot(np.hstack((plist1, plist)), infectlist)\n plt.title('centerplot')\n plt.xlabel('p')\n plt.ylabel('total number of individuals infected')\n plt.title('Total Number of Individuals Infected vs p')\n plt.show()\n\n\nplotcenterrange()\n<mask token>\nvaluecorner = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)), startcorner=True)[0]\nvaluecenter = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)), startcenter=True)[0]\nvaluerandom = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)))[0]\nprint('p = 0.05, starting randomly, the total infected number is ' + str(\n valuerandom))\nprint('p = 0.05, starting from corner, the total infected number is ' + str\n (valuecorner))\nprint('p = 0.05, starting from center, the total infected number is ' + str\n (valuecenter))\n",
"step-5": "import sys\nimport os\nimport numpy as np\nimport math\nsys.path.append(\"../\")\nfrom sir.improveagent import *\nimport numpy as np\nimport numpy.linalg as la\nimport matplotlib.pyplot as plt\n#from sklearn.neighbors import BallTree\nfrom scipy.spatial import KDTree\nfrom scipy.spatial import cKDTree\nfrom scipy.spatial.distance import pdist\nimport networkx as nx\n\np = Person()\n\ndef run_Simulation2(k,N=100,T=10,start = 1,p=0.5,q=0.08,startcenter = False,startcorner=False):\n \"\"\"\n run the simulation for the pop\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N-start]\n pop = [Person() for i in range(N)]\n ##we need to change the code for the case start people infected\n for i in range(start):\n pop[i].get_infected();\n if(startcenter):\n resetcenter(start,pop)\n if(startcorner):\n resetcorner(start,pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n #may have problem here\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand()< k:\n pop[j].get_recovered()\n\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [i/N for i in recover]\n newsuspect = [s/N for s in suspect]\n newinfect = [i/N for i in infect]\n plt.plot(range(T+1),newrecover,label = \"r: percentage of removed \")\n plt.plot(range(T+1),newsuspect,label = \"s: percentage of susceptible\")\n plt.plot(range(T+1),newinfect,label = \"i: percentage of infected\")\n plt.xlabel(\"T\")\n plt.ylabel(\"percentage\")\n plt.title(\"Percentage of Population, Discrete\")\n plt.legend()\n plt.show()\n\n\n#We run a simulation here,use the default value of p and q\nrun_Simulation2(0.6,N=20000,T = 30,start=10)\n\ndef checkinfectb(k,N,T,start=1,p=0.5,q=0.08,startcenter = False,startcorner=False):\n \"\"\"\n we use this function for checking the total infected people\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N-start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected();\n if(startcenter):\n resetcenter(start,pop)\n if(startcorner):\n resetcorner(start,pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand()<k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop)+count_recover(pop))/N,count_infect(pop)/N])\n\n\n\ndef plotcenterrange():\n \"\"\"\n show how the total infected people i change with p start from center\n \"\"\"\n plist1 = np.arange(0.02,0.1,0.02)\n plist = np.arange(0.1,1,0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])\n plt.plot(np.hstack((plist1,plist)),infectlist)\n plt.title(\"centerplot\")\n plt.xlabel(\"p\")\n plt.ylabel(\"total number of individuals infected\")\n plt.title(\"Total Number of Individuals Infected vs p\")\n plt.show()\n\nplotcenterrange()\n\n\n\n\"\"\"\ndef plotrandomcornerrange():\n\n plist1 = np.arange(0.02,0.1,0.02)\n plist = np.arange(0.1,1,0.1)\n infectlist = []\n infectlist2 = []\n infectlist3 = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0])\n infectlist2.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)))[0])\n infectlist3.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter = True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0])\n infectlist2.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)))[0])\n infectlist3.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter = True)[0])\n plt.plot(np.hstack((plist1,plist)),infectlist,label = \"corner\")\n plt.plot(np.hstack((plist1,plist)),infectlist2,label = \"random\")\n plt.plot(np.hstack((plist1,plist)),infectlist3,label = \"center\")\n plt.title(\"Change from random corner center\")\n plt.xlabel(\"change of p\")\n plt.ylabel(\"change of total infected people\")\n plt.legend()\n plt.show()\n\n\"\"\"\n#plotrandomcornerrange()\n#no need for us to use this function\n\nvaluecorner = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0]\nvaluecenter = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0]\nvaluerandom = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)))[0]\nprint(\"p = 0.05, starting randomly, the total infected number is \"+ str(valuerandom))\nprint(\"p = 0.05, starting from corner, the total infected number is \"+ str(valuecorner))\nprint(\"p = 0.05, starting from center, the total infected number is \"+ str(valuecenter))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import logging
import argparse
import getpass
import errno
import re
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import dns.resolver
class Mail(object):
def __init__(self, recipient=None, sender=None, subject=None, body=None):
self.recipient = recipient
self.sender = sender or '{}@example.com'.format(getpass.getuser())
self.subject = subject or 'Sir! My sir!'
self.body = body or 'A message from their majesty.'
self.verbose = False
@property
def domain(self):
m = re.match(r'.+@(\w+\.\w+)', self.recipient)
if m:
return m.group(1)
else:
raise ValueError('Unable to get recipient domain')
@property
def message(self):
m = MIMEMultipart('alternative')
m['Subject'] = self.subject
m['From'] = self.sender
m['To'] = self.recipient
m.attach(MIMEText(self.body, 'plain'))
return m
def send(self):
"""
Sends an email to a single recipient straight to his MTA.
Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.
"""
answers = dns.resolver.query(self.domain, 'MX')
try:
for answer in answers:
ex = answer.exchange.to_text()
server = smtplib.SMTP(ex)
server.set_debuglevel(self.verbose)
server.sendmail(self.sender, [self.recipient], self.message.as_string())
server.quit()
except OSError as e:
if e.errno is errno.ENETUNREACH:
print('Looks like port 25 is blocked')
raise e
class App(object):
def run(self):
mail = Mail()
self.parse(mail)
mail.send()
@classmethod
def parse(cls, mail):
parser = argparse.ArgumentParser(prog='lumpy', description=mail.send.__doc__)
arg = parser.add_argument
arg('--from', '-f', nargs='?', dest='sender')
arg('recipient')
arg('--subject', '-s', nargs='?')
arg('--body', '-b', nargs='?')
arg('--verbose', '-v', action='store_true')
parser.parse_args(namespace=mail)
if __name__ == "__main__":
App().run()
|
normal
|
{
"blob_id": "3a678f9b5274f008a510a23b2358fe2a506c3221",
"index": 4061,
"step-1": "<mask token>\n\n\nclass Mail(object):\n <mask token>\n <mask token>\n\n @property\n def message(self):\n m = MIMEMultipart('alternative')\n m['Subject'] = self.subject\n m['From'] = self.sender\n m['To'] = self.recipient\n m.attach(MIMEText(self.body, 'plain'))\n return m\n <mask token>\n\n\nclass App(object):\n\n def run(self):\n mail = Mail()\n self.parse(mail)\n mail.send()\n\n @classmethod\n def parse(cls, mail):\n parser = argparse.ArgumentParser(prog='lumpy', description=mail.\n send.__doc__)\n arg = parser.add_argument\n arg('--from', '-f', nargs='?', dest='sender')\n arg('recipient')\n arg('--subject', '-s', nargs='?')\n arg('--body', '-b', nargs='?')\n arg('--verbose', '-v', action='store_true')\n parser.parse_args(namespace=mail)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Mail(object):\n <mask token>\n\n @property\n def domain(self):\n m = re.match('.+@(\\\\w+\\\\.\\\\w+)', self.recipient)\n if m:\n return m.group(1)\n else:\n raise ValueError('Unable to get recipient domain')\n\n @property\n def message(self):\n m = MIMEMultipart('alternative')\n m['Subject'] = self.subject\n m['From'] = self.sender\n m['To'] = self.recipient\n m.attach(MIMEText(self.body, 'plain'))\n return m\n <mask token>\n\n\nclass App(object):\n\n def run(self):\n mail = Mail()\n self.parse(mail)\n mail.send()\n\n @classmethod\n def parse(cls, mail):\n parser = argparse.ArgumentParser(prog='lumpy', description=mail.\n send.__doc__)\n arg = parser.add_argument\n arg('--from', '-f', nargs='?', dest='sender')\n arg('recipient')\n arg('--subject', '-s', nargs='?')\n arg('--body', '-b', nargs='?')\n arg('--verbose', '-v', action='store_true')\n parser.parse_args(namespace=mail)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Mail(object):\n <mask token>\n\n @property\n def domain(self):\n m = re.match('.+@(\\\\w+\\\\.\\\\w+)', self.recipient)\n if m:\n return m.group(1)\n else:\n raise ValueError('Unable to get recipient domain')\n\n @property\n def message(self):\n m = MIMEMultipart('alternative')\n m['Subject'] = self.subject\n m['From'] = self.sender\n m['To'] = self.recipient\n m.attach(MIMEText(self.body, 'plain'))\n return m\n\n def send(self):\n \"\"\"\n Sends an email to a single recipient straight to his MTA.\n Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.\n \"\"\"\n answers = dns.resolver.query(self.domain, 'MX')\n try:\n for answer in answers:\n ex = answer.exchange.to_text()\n server = smtplib.SMTP(ex)\n server.set_debuglevel(self.verbose)\n server.sendmail(self.sender, [self.recipient], self.message\n .as_string())\n server.quit()\n except OSError as e:\n if e.errno is errno.ENETUNREACH:\n print('Looks like port 25 is blocked')\n raise e\n\n\nclass App(object):\n\n def run(self):\n mail = Mail()\n self.parse(mail)\n mail.send()\n\n @classmethod\n def parse(cls, mail):\n parser = argparse.ArgumentParser(prog='lumpy', description=mail.\n send.__doc__)\n arg = parser.add_argument\n arg('--from', '-f', nargs='?', dest='sender')\n arg('recipient')\n arg('--subject', '-s', nargs='?')\n arg('--body', '-b', nargs='?')\n arg('--verbose', '-v', action='store_true')\n parser.parse_args(namespace=mail)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Mail(object):\n\n def __init__(self, recipient=None, sender=None, subject=None, body=None):\n self.recipient = recipient\n self.sender = sender or '{}@example.com'.format(getpass.getuser())\n self.subject = subject or 'Sir! My sir!'\n self.body = body or 'A message from their majesty.'\n self.verbose = False\n\n @property\n def domain(self):\n m = re.match('.+@(\\\\w+\\\\.\\\\w+)', self.recipient)\n if m:\n return m.group(1)\n else:\n raise ValueError('Unable to get recipient domain')\n\n @property\n def message(self):\n m = MIMEMultipart('alternative')\n m['Subject'] = self.subject\n m['From'] = self.sender\n m['To'] = self.recipient\n m.attach(MIMEText(self.body, 'plain'))\n return m\n\n def send(self):\n \"\"\"\n Sends an email to a single recipient straight to his MTA.\n Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.\n \"\"\"\n answers = dns.resolver.query(self.domain, 'MX')\n try:\n for answer in answers:\n ex = answer.exchange.to_text()\n server = smtplib.SMTP(ex)\n server.set_debuglevel(self.verbose)\n server.sendmail(self.sender, [self.recipient], self.message\n .as_string())\n server.quit()\n except OSError as e:\n if e.errno is errno.ENETUNREACH:\n print('Looks like port 25 is blocked')\n raise e\n\n\nclass App(object):\n\n def run(self):\n mail = Mail()\n self.parse(mail)\n mail.send()\n\n @classmethod\n def parse(cls, mail):\n parser = argparse.ArgumentParser(prog='lumpy', description=mail.\n send.__doc__)\n arg = parser.add_argument\n arg('--from', '-f', nargs='?', dest='sender')\n arg('recipient')\n arg('--subject', '-s', nargs='?')\n arg('--body', '-b', nargs='?')\n arg('--verbose', '-v', action='store_true')\n parser.parse_args(namespace=mail)\n\n\n<mask token>\n",
"step-5": "import logging\nimport argparse\nimport getpass\nimport errno\nimport re\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nimport dns.resolver\n\nclass Mail(object):\n\n def __init__(self, recipient=None, sender=None, subject=None, body=None):\n self.recipient = recipient\n self.sender = sender or '{}@example.com'.format(getpass.getuser())\n self.subject = subject or 'Sir! My sir!'\n self.body = body or 'A message from their majesty.'\n self.verbose = False\n\n @property\n def domain(self):\n m = re.match(r'.+@(\\w+\\.\\w+)', self.recipient)\n if m:\n return m.group(1)\n else:\n raise ValueError('Unable to get recipient domain')\n\n @property\n def message(self):\n m = MIMEMultipart('alternative')\n m['Subject'] = self.subject\n m['From'] = self.sender\n m['To'] = self.recipient\n m.attach(MIMEText(self.body, 'plain'))\n return m\n\n def send(self):\n \"\"\"\n Sends an email to a single recipient straight to his MTA.\n Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.\n \"\"\"\n answers = dns.resolver.query(self.domain, 'MX')\n try:\n for answer in answers:\n ex = answer.exchange.to_text()\n server = smtplib.SMTP(ex)\n server.set_debuglevel(self.verbose)\n server.sendmail(self.sender, [self.recipient], self.message.as_string())\n server.quit()\n except OSError as e:\n if e.errno is errno.ENETUNREACH:\n print('Looks like port 25 is blocked')\n raise e\n\n\nclass App(object):\n\n def run(self):\n mail = Mail()\n self.parse(mail)\n mail.send()\n\n @classmethod\n def parse(cls, mail):\n parser = argparse.ArgumentParser(prog='lumpy', description=mail.send.__doc__)\n arg = parser.add_argument\n\n arg('--from', '-f', nargs='?', dest='sender')\n arg('recipient')\n arg('--subject', '-s', nargs='?')\n arg('--body', '-b', nargs='?')\n arg('--verbose', '-v', action='store_true')\n \n parser.parse_args(namespace=mail)\n\n\nif __name__ == \"__main__\":\n App().run()\n",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
import json
import jieba
import util
from pypinyin import pinyin, Style
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
def getSongName(self):
return self.songName
def getArtistName(self):
return self.artistName
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName)+"-"+ util.sanitizeName(self.songName)
def storeToFileSystem(self, filename, append):
file = open(filename, ("w+","a+")[append],encoding="utf8")
json.dump(self.__dict__, file, indent=4, ensure_ascii=False)
file.close()
def write(self):
file = open(self.getSongName(), "w+")
file.write(self.getLyric())
file.close()
|
normal
|
{
"blob_id": "fa3cec0781b9ca5c1d99a7500748104d7cdce631",
"index": 130,
"step-1": "<mask token>\n\n\nclass Song:\n\n def __init__(self, songName, artistName, lyric):\n self.songName = songName\n self.artistName = artistName\n self.lyric = lyric\n self.phrasePinyinDict = util.lyricToPinYi(self.lyric)\n <mask token>\n\n def getArtistName(self):\n return self.artistName\n\n def getLyric(self):\n return self.lyric\n\n def getName(self):\n return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(\n self.songName)\n <mask token>\n\n def write(self):\n file = open(self.getSongName(), 'w+')\n file.write(self.getLyric())\n file.close()\n",
"step-2": "<mask token>\n\n\nclass Song:\n\n def __init__(self, songName, artistName, lyric):\n self.songName = songName\n self.artistName = artistName\n self.lyric = lyric\n self.phrasePinyinDict = util.lyricToPinYi(self.lyric)\n\n def getSongName(self):\n return self.songName\n\n def getArtistName(self):\n return self.artistName\n\n def getLyric(self):\n return self.lyric\n\n def getName(self):\n return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(\n self.songName)\n <mask token>\n\n def write(self):\n file = open(self.getSongName(), 'w+')\n file.write(self.getLyric())\n file.close()\n",
"step-3": "<mask token>\n\n\nclass Song:\n\n def __init__(self, songName, artistName, lyric):\n self.songName = songName\n self.artistName = artistName\n self.lyric = lyric\n self.phrasePinyinDict = util.lyricToPinYi(self.lyric)\n\n def getSongName(self):\n return self.songName\n\n def getArtistName(self):\n return self.artistName\n\n def getLyric(self):\n return self.lyric\n\n def getName(self):\n return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(\n self.songName)\n\n def storeToFileSystem(self, filename, append):\n file = open(filename, ('w+', 'a+')[append], encoding='utf8')\n json.dump(self.__dict__, file, indent=4, ensure_ascii=False)\n file.close()\n\n def write(self):\n file = open(self.getSongName(), 'w+')\n file.write(self.getLyric())\n file.close()\n",
"step-4": "import json\nimport jieba\nimport util\nfrom pypinyin import pinyin, Style\n\n\nclass Song:\n\n def __init__(self, songName, artistName, lyric):\n self.songName = songName\n self.artistName = artistName\n self.lyric = lyric\n self.phrasePinyinDict = util.lyricToPinYi(self.lyric)\n\n def getSongName(self):\n return self.songName\n\n def getArtistName(self):\n return self.artistName\n\n def getLyric(self):\n return self.lyric\n\n def getName(self):\n return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(\n self.songName)\n\n def storeToFileSystem(self, filename, append):\n file = open(filename, ('w+', 'a+')[append], encoding='utf8')\n json.dump(self.__dict__, file, indent=4, ensure_ascii=False)\n file.close()\n\n def write(self):\n file = open(self.getSongName(), 'w+')\n file.write(self.getLyric())\n file.close()\n",
"step-5": "import json\nimport jieba\nimport util\nfrom pypinyin import pinyin, Style\n\nclass Song:\n def __init__(self, songName, artistName, lyric):\n self.songName = songName\n self.artistName = artistName\n self.lyric = lyric\n self.phrasePinyinDict = util.lyricToPinYi(self.lyric)\n\n def getSongName(self):\n return self.songName\n def getArtistName(self):\n return self.artistName\n def getLyric(self):\n return self.lyric\n def getName(self):\n return util.sanitizeName(self.artistName)+\"-\"+ util.sanitizeName(self.songName)\n\n def storeToFileSystem(self, filename, append):\n file = open(filename, (\"w+\",\"a+\")[append],encoding=\"utf8\")\n json.dump(self.__dict__, file, indent=4, ensure_ascii=False)\n file.close()\n\n def write(self):\n file = open(self.getSongName(), \"w+\")\n file.write(self.getLyric())\n file.close()\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
import discord
from discord.ext import commands
import datetime
from discord.utils import get
from discord import User
class Sinner(commands.Converter):
async def convert(self, ctx, argument):
argument = await commands.MemberConverter().convert(ctx, argument)
permission = argument.guild_permissions.manage_messages
if not permission:
return argument
else:
raise commands.BadArgument("You cannot punish other staff members")
class Redeemed(commands.Converter):
async def convert(self, ctx, argument):
argument = await commands.MemberConverter().convert(ctx, argument)
muted = discord.utils.get(ctx.guild.roles, name="Muted")
if muted in argument.roles:
return argument
else:
raise commands.BadArgument("The user was not muted.")
async def mute(ctx, user, reason="No reason"):
role = discord.utils.get(ctx.guild.roles, name="Muted")
if not role:
try:
muted = await ctx.guild.create_role(name="Muted", reason="To use for muting")
for channel in ctx.guild.channels:
await channel.set_permissions(muted, send_messages=False,
read_message_history=False,
read_messages=False)
except discord.Forbidden:
return await ctx.send("I have no permissions to make a muted role")
await user.add_roles(muted)
await ctx.send(f"{user.mention} has been muted for {reason}")
else:
await user.add_roles(role)
await ctx.send(f"{user.mention} has been muted for {reason}")
channel = ctx.bot.get_channel(718865797006753892)
await channel.send(f"{user.mention}, welcome to the bad kids club.")
class Moderation(commands.Cog):
"""Moderation Commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(name="ban")
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *, reason="No reason"):
"""Bans someone"""
if member == None or member == ctx.message.author:
await ctx.send("You cannot ban yourself!")
return
try:
memberid = await self.bot.fetch_user(int(member))
await member.ban(reason=reason) or await memberid.ban(reason=reason)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.")
else:
embed = discord.Embed(title=f"`{ctx.author}` banned {member}", colour=member.color, timestamp=datetime.datetime.utcnow())
embed.add_field(name="● Details:", value=f" - Reason: {reason}")
embed.set_footer(icon_url=f"{ctx.author.avatar_url}", text=f"{ctx.author.top_role.name} ")
await ctx.send(embed=embed)
print(ctx.author.name, 'used the command ban')
@commands.command()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, member, *, reason="No reason"):
print("unbanned")
if member == None or member == ctx.message.author:
await ctx.send("You cannot unban yourself!")
return
try:
member = await self.bot.fetch_user(int(member))
await ctx.guild.unban(member, reason=reason)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.")
else:
await ctx.send(f"`{member}` was unbanned by **{ctx.author.name}**.")
print(ctx.author.name, 'used the command unban')
@commands.command(name="kick")
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason="No reason"):
"""Kicks someone"""
if member == None or member == ctx.message.author:
await ctx.send("You cannot kick yourself!")
return
try:
await member.kick(reason=reason)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.")
else:
embed = discord.Embed(title=f"`{ctx.author}` kicked {member}", colour=member.color, timestamp=datetime.datetime.utcnow())
embed.add_field(name="● Details:", value=f" - Reason: {reason}")
embed.set_footer(icon_url=f"{ctx.author.avatar_url}", text=f"{ctx.author.top_role.name} ")
await ctx.send(embed=embed)
print(ctx.author.name, 'used the command kick')
@commands.command(name="clear")
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount: int):
"""Clears messages."""
channel = ctx.channel
try:
await channel.purge(limit=amount+1)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.")
else:
await ctx.send(f"{amount} messages deleted.")
@clear.error
async def clear_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to specify an amount of messages, i can't purge air...")
if isinstance(error, commands.BadArgument):
await ctx.send("Give me a valid number.")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permission to use this command.")
raise error
@kick.error
async def kick_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who to kick.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permission to use this command.")
raise error
@ban.error
async def ban_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who to ban.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?.")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permission to use this command.")
raise error
@commands.command()
async def mute(self, ctx, user: Sinner, reason=None):
"""Mutes a user."""
if member == None or member == ctx.message.author:
await ctx.send("You cannot mute yourself!")
return
await mute(ctx, user, reason or "treason")
@commands.command()
async def unmute(self, ctx, user: Redeemed):
"""Unmutes a muted user"""
if member == None or member == ctx.message.author:
await ctx.send("You cannot unmute yourself!")
return
await user.remove_roles(discord.utils.get(ctx.guild.roles, name="Muted"))
await ctx.send(f"{user.mention} has been unmuted")
@mute.error
async def mute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who do you want to mute.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permissions to use this command.")
@unmute.error
async def unmute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who do you want to unmute.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permissions to use this command.")
@unban.error
async def unban_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who do you want to unban.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permissions to use this command.")
def setup(bot):
bot.add_cog(Moderation(bot))
|
normal
|
{
"blob_id": "16cd89a43a1985276bd14d85ad8ddb990c4d82c3",
"index": 6136,
"step-1": "<mask token>\n\n\nclass Redeemed(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n muted = discord.utils.get(ctx.guild.roles, name='Muted')\n if muted in argument.roles:\n return argument\n else:\n raise commands.BadArgument('The user was not muted.')\n\n\n<mask token>\n\n\nclass Moderation(commands.Cog):\n \"\"\"Moderation Commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='ban')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Bans someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot ban yourself!')\n return\n try:\n memberid = await self.bot.fetch_user(int(member))\n await member.ban(reason=reason) or await memberid.ban(reason=reason\n )\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` banned {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command ban')\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, *, reason='No reason'):\n print('unbanned')\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unban yourself!')\n return\n try:\n member = await self.bot.fetch_user(int(member))\n await ctx.guild.unban(member, reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'`{member}` was unbanned by **{ctx.author.name}**.'\n )\n print(ctx.author.name, 'used the command unban')\n\n @commands.command(name='kick')\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Kicks someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot kick yourself!')\n return\n try:\n await member.kick(reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` kicked {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command kick')\n\n @commands.command(name='clear')\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n \"\"\"Clears messages.\"\"\"\n channel = ctx.channel\n try:\n await channel.purge(limit=amount + 1)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'{amount} messages deleted.')\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n \"You need to specify an amount of messages, i can't purge air...\"\n )\n if isinstance(error, commands.BadArgument):\n await ctx.send('Give me a valid number.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @kick.error\n async def kick_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to kick.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @ban.error\n async def ban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to ban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @commands.command()\n async def mute(self, ctx, user: Sinner, reason=None):\n \"\"\"Mutes a user.\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot mute yourself!')\n return\n await mute(ctx, user, reason or 'treason')\n\n @commands.command()\n async def unmute(self, ctx, user: Redeemed):\n \"\"\"Unmutes a muted user\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unmute yourself!')\n return\n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\n 'Muted'))\n await ctx.send(f'{user.mention} has been unmuted')\n\n @mute.error\n async def mute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to mute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unmute.error\n async def unmute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unmute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unban.error\n async def unban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sinner(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n permission = argument.guild_permissions.manage_messages\n if not permission:\n return argument\n else:\n raise commands.BadArgument('You cannot punish other staff members')\n\n\nclass Redeemed(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n muted = discord.utils.get(ctx.guild.roles, name='Muted')\n if muted in argument.roles:\n return argument\n else:\n raise commands.BadArgument('The user was not muted.')\n\n\n<mask token>\n\n\nclass Moderation(commands.Cog):\n \"\"\"Moderation Commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='ban')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Bans someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot ban yourself!')\n return\n try:\n memberid = await self.bot.fetch_user(int(member))\n await member.ban(reason=reason) or await memberid.ban(reason=reason\n )\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` banned {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command ban')\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, *, reason='No reason'):\n print('unbanned')\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unban yourself!')\n return\n try:\n member = await self.bot.fetch_user(int(member))\n await ctx.guild.unban(member, reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'`{member}` was unbanned by **{ctx.author.name}**.'\n )\n print(ctx.author.name, 'used the command unban')\n\n @commands.command(name='kick')\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Kicks someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot kick yourself!')\n return\n try:\n await member.kick(reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` kicked {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command kick')\n\n @commands.command(name='clear')\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n \"\"\"Clears messages.\"\"\"\n channel = ctx.channel\n try:\n await channel.purge(limit=amount + 1)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'{amount} messages deleted.')\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n \"You need to specify an amount of messages, i can't purge air...\"\n )\n if isinstance(error, commands.BadArgument):\n await ctx.send('Give me a valid number.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @kick.error\n async def kick_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to kick.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @ban.error\n async def ban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to ban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @commands.command()\n async def mute(self, ctx, user: Sinner, reason=None):\n \"\"\"Mutes a user.\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot mute yourself!')\n return\n await mute(ctx, user, reason or 'treason')\n\n @commands.command()\n async def unmute(self, ctx, user: Redeemed):\n \"\"\"Unmutes a muted user\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unmute yourself!')\n return\n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\n 'Muted'))\n await ctx.send(f'{user.mention} has been unmuted')\n\n @mute.error\n async def mute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to mute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unmute.error\n async def unmute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unmute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unban.error\n async def unban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Sinner(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n permission = argument.guild_permissions.manage_messages\n if not permission:\n return argument\n else:\n raise commands.BadArgument('You cannot punish other staff members')\n\n\nclass Redeemed(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n muted = discord.utils.get(ctx.guild.roles, name='Muted')\n if muted in argument.roles:\n return argument\n else:\n raise commands.BadArgument('The user was not muted.')\n\n\nasync def mute(ctx, user, reason='No reason'):\n role = discord.utils.get(ctx.guild.roles, name='Muted')\n if not role:\n try:\n muted = await ctx.guild.create_role(name='Muted', reason=\n 'To use for muting')\n for channel in ctx.guild.channels:\n await channel.set_permissions(muted, send_messages=False,\n read_message_history=False, read_messages=False)\n except discord.Forbidden:\n return await ctx.send('I have no permissions to make a muted role')\n await user.add_roles(muted)\n await ctx.send(f'{user.mention} has been muted for {reason}')\n else:\n await user.add_roles(role)\n await ctx.send(f'{user.mention} has been muted for {reason}')\n channel = ctx.bot.get_channel(718865797006753892)\n await channel.send(f'{user.mention}, welcome to the bad kids club.')\n\n\nclass Moderation(commands.Cog):\n \"\"\"Moderation Commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='ban')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Bans someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot ban yourself!')\n return\n try:\n memberid = await self.bot.fetch_user(int(member))\n await member.ban(reason=reason) or await memberid.ban(reason=reason\n )\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` banned {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command ban')\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, *, reason='No reason'):\n print('unbanned')\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unban yourself!')\n return\n try:\n member = await self.bot.fetch_user(int(member))\n await ctx.guild.unban(member, reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'`{member}` was unbanned by **{ctx.author.name}**.'\n )\n print(ctx.author.name, 'used the command unban')\n\n @commands.command(name='kick')\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Kicks someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot kick yourself!')\n return\n try:\n await member.kick(reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` kicked {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command kick')\n\n @commands.command(name='clear')\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n \"\"\"Clears messages.\"\"\"\n channel = ctx.channel\n try:\n await channel.purge(limit=amount + 1)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'{amount} messages deleted.')\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n \"You need to specify an amount of messages, i can't purge air...\"\n )\n if isinstance(error, commands.BadArgument):\n await ctx.send('Give me a valid number.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @kick.error\n async def kick_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to kick.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @ban.error\n async def ban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to ban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @commands.command()\n async def mute(self, ctx, user: Sinner, reason=None):\n \"\"\"Mutes a user.\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot mute yourself!')\n return\n await mute(ctx, user, reason or 'treason')\n\n @commands.command()\n async def unmute(self, ctx, user: Redeemed):\n \"\"\"Unmutes a muted user\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unmute yourself!')\n return\n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\n 'Muted'))\n await ctx.send(f'{user.mention} has been unmuted')\n\n @mute.error\n async def mute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to mute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unmute.error\n async def unmute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unmute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unban.error\n async def unban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n\ndef setup(bot):\n bot.add_cog(Moderation(bot))\n",
"step-4": "import discord\nfrom discord.ext import commands\nimport datetime\nfrom discord.utils import get\nfrom discord import User\n\n\nclass Sinner(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n permission = argument.guild_permissions.manage_messages\n if not permission:\n return argument\n else:\n raise commands.BadArgument('You cannot punish other staff members')\n\n\nclass Redeemed(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n muted = discord.utils.get(ctx.guild.roles, name='Muted')\n if muted in argument.roles:\n return argument\n else:\n raise commands.BadArgument('The user was not muted.')\n\n\nasync def mute(ctx, user, reason='No reason'):\n role = discord.utils.get(ctx.guild.roles, name='Muted')\n if not role:\n try:\n muted = await ctx.guild.create_role(name='Muted', reason=\n 'To use for muting')\n for channel in ctx.guild.channels:\n await channel.set_permissions(muted, send_messages=False,\n read_message_history=False, read_messages=False)\n except discord.Forbidden:\n return await ctx.send('I have no permissions to make a muted role')\n await user.add_roles(muted)\n await ctx.send(f'{user.mention} has been muted for {reason}')\n else:\n await user.add_roles(role)\n await ctx.send(f'{user.mention} has been muted for {reason}')\n channel = ctx.bot.get_channel(718865797006753892)\n await channel.send(f'{user.mention}, welcome to the bad kids club.')\n\n\nclass Moderation(commands.Cog):\n \"\"\"Moderation Commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='ban')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Bans someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot ban yourself!')\n return\n try:\n memberid = await self.bot.fetch_user(int(member))\n await member.ban(reason=reason) or await memberid.ban(reason=reason\n )\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` banned {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command ban')\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, *, reason='No reason'):\n print('unbanned')\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unban yourself!')\n return\n try:\n member = await self.bot.fetch_user(int(member))\n await ctx.guild.unban(member, reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'`{member}` was unbanned by **{ctx.author.name}**.'\n )\n print(ctx.author.name, 'used the command unban')\n\n @commands.command(name='kick')\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Kicks someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot kick yourself!')\n return\n try:\n await member.kick(reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` kicked {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command kick')\n\n @commands.command(name='clear')\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n \"\"\"Clears messages.\"\"\"\n channel = ctx.channel\n try:\n await channel.purge(limit=amount + 1)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'{amount} messages deleted.')\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n \"You need to specify an amount of messages, i can't purge air...\"\n )\n if isinstance(error, commands.BadArgument):\n await ctx.send('Give me a valid number.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @kick.error\n async def kick_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to kick.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @ban.error\n async def ban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to ban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @commands.command()\n async def mute(self, ctx, user: Sinner, reason=None):\n \"\"\"Mutes a user.\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot mute yourself!')\n return\n await mute(ctx, user, reason or 'treason')\n\n @commands.command()\n async def unmute(self, ctx, user: Redeemed):\n \"\"\"Unmutes a muted user\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unmute yourself!')\n return\n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\n 'Muted'))\n await ctx.send(f'{user.mention} has been unmuted')\n\n @mute.error\n async def mute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to mute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unmute.error\n async def unmute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unmute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unban.error\n async def unban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n\ndef setup(bot):\n bot.add_cog(Moderation(bot))\n",
"step-5": "import discord\nfrom discord.ext import commands\nimport datetime\nfrom discord.utils import get\nfrom discord import User\n\nclass Sinner(commands.Converter):\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n permission = argument.guild_permissions.manage_messages \n if not permission:\n return argument \n else:\n raise commands.BadArgument(\"You cannot punish other staff members\") \n\n\nclass Redeemed(commands.Converter):\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n muted = discord.utils.get(ctx.guild.roles, name=\"Muted\") \n if muted in argument.roles:\n return argument\n else:\n raise commands.BadArgument(\"The user was not muted.\") \n \n\nasync def mute(ctx, user, reason=\"No reason\"):\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\") \n if not role: \n try: \n muted = await ctx.guild.create_role(name=\"Muted\", reason=\"To use for muting\")\n for channel in ctx.guild.channels: \n await channel.set_permissions(muted, send_messages=False,\n read_message_history=False,\n read_messages=False)\n except discord.Forbidden:\n return await ctx.send(\"I have no permissions to make a muted role\")\n await user.add_roles(muted) \n await ctx.send(f\"{user.mention} has been muted for {reason}\")\n else:\n await user.add_roles(role) \n await ctx.send(f\"{user.mention} has been muted for {reason}\")\n channel = ctx.bot.get_channel(718865797006753892)\n await channel.send(f\"{user.mention}, welcome to the bad kids club.\")\n\nclass Moderation(commands.Cog):\n \"\"\"Moderation Commands\"\"\"\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"ban\")\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason=\"No reason\"):\n \"\"\"Bans someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot ban yourself!\")\n return\n try:\n memberid = await self.bot.fetch_user(int(member)) \n await member.ban(reason=reason) or await memberid.ban(reason=reason)\n except discord.Forbidden:\n await ctx.send(f\"It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.\") \n else:\n embed = discord.Embed(title=f\"`{ctx.author}` banned {member}\", colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name=\"● Details:\", value=f\" - Reason: {reason}\")\n embed.set_footer(icon_url=f\"{ctx.author.avatar_url}\", text=f\"{ctx.author.top_role.name} \")\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command ban')\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, *, reason=\"No reason\"):\n print(\"unbanned\")\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot unban yourself!\")\n return\n try:\n member = await self.bot.fetch_user(int(member))\n await ctx.guild.unban(member, reason=reason)\n except discord.Forbidden:\n await ctx.send(f\"It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.\")\n else:\n await ctx.send(f\"`{member}` was unbanned by **{ctx.author.name}**.\")\n print(ctx.author.name, 'used the command unban')\n\n @commands.command(name=\"kick\")\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason=\"No reason\"):\n \"\"\"Kicks someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot kick yourself!\")\n return\n try:\n await member.kick(reason=reason)\n except discord.Forbidden: \n await ctx.send(f\"It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.\")\n else:\n embed = discord.Embed(title=f\"`{ctx.author}` kicked {member}\", colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name=\"● Details:\", value=f\" - Reason: {reason}\")\n embed.set_footer(icon_url=f\"{ctx.author.avatar_url}\", text=f\"{ctx.author.top_role.name} \")\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command kick')\n\n\n @commands.command(name=\"clear\")\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n \"\"\"Clears messages.\"\"\"\n channel = ctx.channel\n try:\n await channel.purge(limit=amount+1)\n except discord.Forbidden:\n await ctx.send(f\"It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.\")\n else:\n await ctx.send(f\"{amount} messages deleted.\")\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to specify an amount of messages, i can't purge air...\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Give me a valid number.\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permission to use this command.\") \n\n raise error \n\n @kick.error\n async def kick_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to tell me who to kick.\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Is that a person?\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permission to use this command.\") \n\n raise error \n\n\n @ban.error\n async def ban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to tell me who to ban.\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Is that a person?.\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permission to use this command.\") \n\n raise error\n\n @commands.command()\n async def mute(self, ctx, user: Sinner, reason=None):\n \"\"\"Mutes a user.\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot mute yourself!\")\n return \n await mute(ctx, user, reason or \"treason\")\n\n @commands.command()\n async def unmute(self, ctx, user: Redeemed):\n \"\"\"Unmutes a muted user\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot unmute yourself!\")\n return \n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\"Muted\"))\n await ctx.send(f\"{user.mention} has been unmuted\")\n\n\n @mute.error\n async def mute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to tell me who do you want to mute.\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Is that a person?\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permissions to use this command.\")\n\n @unmute.error\n async def unmute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to tell me who do you want to unmute.\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Is that a person?\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permissions to use this command.\")\n\n @unban.error\n async def unban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to tell me who do you want to unban.\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Is that a person?\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permissions to use this command.\")\n\ndef setup(bot):\n bot.add_cog(Moderation(bot))\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
# https://py.checkio.org/blog/design-patterns-part-1/
class ImageOpener(object):
@staticmethod
def open(filename):
raise NotImplementedError()
class PNGImageOpener(ImageOpener):
@staticmethod
def open(filename):
print('PNG: open with Paint')
class JPEGImageOpener(ImageOpener):
@staticmethod
def open(filename):
print('JPG/JPEG: open with ImageViewer')
class SVGImageOpener(ImageOpener):
@staticmethod
def open(filename):
print('SVG: open with Illustrator')
class UnknownImageOpener(ImageOpener):
@staticmethod
def open(filename):
print("You don't hame program for %s extension" %filename.split('.')[-1].upper())
class Image(object):
@classmethod
def open_file(cls, filename):
ext = filename.split('.')[-1]
if ext == 'png':
opener = PNGImageOpener
elif ext in ('jpg', 'jpeg'):
opener = JPEGImageOpener
elif ext == 'svg':
opener = SVGImageOpener
else:
opener = UnknownImageOpener
bytearray = opener.open(filename)
return cls(bytearray, filename)
def __init__(self, byterange, filename):
self._byterange = byterange
self._filename = filename
Image.open_file('picture.png')
Image.open_file('picture.jpg')
Image.open_file('picture.svg')
Image.open_file('picture.raw')
|
normal
|
{
"blob_id": "c199b2f87b7a4ac820001dab13f24fdd287a1575",
"index": 3507,
"step-1": "<mask token>\n\n\nclass UnknownImageOpener(ImageOpener):\n\n @staticmethod\n def open(filename):\n print(\"You don't hame program for %s extension\" % filename.split(\n '.')[-1].upper())\n\n\nclass Image(object):\n\n @classmethod\n def open_file(cls, filename):\n ext = filename.split('.')[-1]\n if ext == 'png':\n opener = PNGImageOpener\n elif ext in ('jpg', 'jpeg'):\n opener = JPEGImageOpener\n elif ext == 'svg':\n opener = SVGImageOpener\n else:\n opener = UnknownImageOpener\n bytearray = opener.open(filename)\n return cls(bytearray, filename)\n\n def __init__(self, byterange, filename):\n self._byterange = byterange\n self._filename = filename\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SVGImageOpener(ImageOpener):\n <mask token>\n\n\nclass UnknownImageOpener(ImageOpener):\n\n @staticmethod\n def open(filename):\n print(\"You don't hame program for %s extension\" % filename.split(\n '.')[-1].upper())\n\n\nclass Image(object):\n\n @classmethod\n def open_file(cls, filename):\n ext = filename.split('.')[-1]\n if ext == 'png':\n opener = PNGImageOpener\n elif ext in ('jpg', 'jpeg'):\n opener = JPEGImageOpener\n elif ext == 'svg':\n opener = SVGImageOpener\n else:\n opener = UnknownImageOpener\n bytearray = opener.open(filename)\n return cls(bytearray, filename)\n\n def __init__(self, byterange, filename):\n self._byterange = byterange\n self._filename = filename\n\n\n<mask token>\n",
"step-3": "class ImageOpener(object):\n <mask token>\n\n\nclass PNGImageOpener(ImageOpener):\n\n @staticmethod\n def open(filename):\n print('PNG: open with Paint')\n\n\nclass JPEGImageOpener(ImageOpener):\n\n @staticmethod\n def open(filename):\n print('JPG/JPEG: open with ImageViewer')\n\n\nclass SVGImageOpener(ImageOpener):\n\n @staticmethod\n def open(filename):\n print('SVG: open with Illustrator')\n\n\nclass UnknownImageOpener(ImageOpener):\n\n @staticmethod\n def open(filename):\n print(\"You don't hame program for %s extension\" % filename.split(\n '.')[-1].upper())\n\n\nclass Image(object):\n\n @classmethod\n def open_file(cls, filename):\n ext = filename.split('.')[-1]\n if ext == 'png':\n opener = PNGImageOpener\n elif ext in ('jpg', 'jpeg'):\n opener = JPEGImageOpener\n elif ext == 'svg':\n opener = SVGImageOpener\n else:\n opener = UnknownImageOpener\n bytearray = opener.open(filename)\n return cls(bytearray, filename)\n\n def __init__(self, byterange, filename):\n self._byterange = byterange\n self._filename = filename\n\n\n<mask token>\n",
"step-4": "class ImageOpener(object):\n\n @staticmethod\n def open(filename):\n raise NotImplementedError()\n\n\nclass PNGImageOpener(ImageOpener):\n\n @staticmethod\n def open(filename):\n print('PNG: open with Paint')\n\n\nclass JPEGImageOpener(ImageOpener):\n\n @staticmethod\n def open(filename):\n print('JPG/JPEG: open with ImageViewer')\n\n\nclass SVGImageOpener(ImageOpener):\n\n @staticmethod\n def open(filename):\n print('SVG: open with Illustrator')\n\n\nclass UnknownImageOpener(ImageOpener):\n\n @staticmethod\n def open(filename):\n print(\"You don't hame program for %s extension\" % filename.split(\n '.')[-1].upper())\n\n\nclass Image(object):\n\n @classmethod\n def open_file(cls, filename):\n ext = filename.split('.')[-1]\n if ext == 'png':\n opener = PNGImageOpener\n elif ext in ('jpg', 'jpeg'):\n opener = JPEGImageOpener\n elif ext == 'svg':\n opener = SVGImageOpener\n else:\n opener = UnknownImageOpener\n bytearray = opener.open(filename)\n return cls(bytearray, filename)\n\n def __init__(self, byterange, filename):\n self._byterange = byterange\n self._filename = filename\n\n\n<mask token>\n",
"step-5": "# https://py.checkio.org/blog/design-patterns-part-1/\n\nclass ImageOpener(object):\n @staticmethod\n def open(filename):\n raise NotImplementedError()\n\n\nclass PNGImageOpener(ImageOpener):\n @staticmethod\n def open(filename):\n print('PNG: open with Paint')\n\n\nclass JPEGImageOpener(ImageOpener):\n @staticmethod\n def open(filename):\n print('JPG/JPEG: open with ImageViewer')\n\n\nclass SVGImageOpener(ImageOpener):\n @staticmethod\n def open(filename):\n print('SVG: open with Illustrator')\n\n\nclass UnknownImageOpener(ImageOpener):\n @staticmethod\n def open(filename):\n print(\"You don't hame program for %s extension\" %filename.split('.')[-1].upper())\n\n\nclass Image(object):\n @classmethod\n def open_file(cls, filename):\n ext = filename.split('.')[-1]\n if ext == 'png':\n opener = PNGImageOpener\n elif ext in ('jpg', 'jpeg'):\n opener = JPEGImageOpener\n elif ext == 'svg':\n opener = SVGImageOpener\n else:\n opener = UnknownImageOpener\n bytearray = opener.open(filename)\n return cls(bytearray, filename)\n\n def __init__(self, byterange, filename):\n self._byterange = byterange\n self._filename = filename\n\n\nImage.open_file('picture.png')\nImage.open_file('picture.jpg')\nImage.open_file('picture.svg')\nImage.open_file('picture.raw')\n",
"step-ids": [
5,
6,
12,
13,
15
]
}
|
[
5,
6,
12,
13,
15
] |
from tkinter import *
import mathcalc as c
root= Tk()
root.title("CALCULATOR")
ent=Entry(root,width=35)
ent.grid(row=0,column=0,columnspan=3,padx=10,pady=10)
#ent.grid(row=0,column=0)
ch=''
num=ent.get()
def clicked(num):
current=ent.get()
ent.delete(0,END)
ent.insert(0,str(current)+str(num))
def click_clear():
ent.delete(0,END)
def add():
global ch
ch='+'
clicked('+')
def subtract():
global ch
ch='-'
clicked('-')
def multiply():
global ch
ch='*'
clicked('*')
def divide():
global ch
ch='/'
clicked('/')
def equals():
f_num,s_num=ent.get().split(ch)
res=c.calculate(float(f_num),float(s_num),ch)
ent.delete(0,END)
ent.insert(0,res)
#buttons
but1=Button(root,text="1",padx=40,pady=20,command=lambda: clicked(1))
but2=Button(root,text="2",padx=40,pady=20,command=lambda: clicked(2))
but3=Button(root,text="3",padx=40,pady=20,command=lambda: clicked(3))
but4=Button(root,text="4",padx=40,pady=20,command=lambda: clicked(4))
but5=Button(root,text="5",padx=40,pady=20,command=lambda: clicked(5))
but6=Button(root,text="6",padx=40,pady=20,command=lambda: clicked(6))
but7=Button(root,text="7",padx=40,pady=20,command=lambda: clicked(7))
but8=Button(root,text="8",padx=40,pady=20,command=lambda: clicked(8))
but9=Button(root,text="9",padx=40,pady=20,command=lambda: clicked(9))
but0=Button(root,text="0",padx=40,pady=20,command=lambda: clicked(0))
but_plus=Button(root,text="+",padx=39,pady=20,command=add)
but_sub=Button(root,text="-",padx=40,pady=20,command=subtract)
but_mul=Button(root,text="*",padx=40,pady=20,command=multiply)
but_div=Button(root,text="/",padx=40,pady=20,command=divide)
but_eq=Button(root,text="=",padx=89,pady=20,command=equals)
but_clr=Button(root,text="C",padx=89,pady=20,command=click_clear)
#button place
but7.grid(row=1,column=0)
but8.grid(row=1,column=1)
but9.grid(row=1,column=2)
but4.grid(row=2,column=0)
but5.grid(row=2,column=1)
but6.grid(row=2,column=2)
but1.grid(row=3,column=0)
but2.grid(row=3,column=1)
but3.grid(row=3,column=2)
but0.grid(row=4,column=0)
but_plus.grid(row=5,column=0)
but_sub.grid(row=6,column=0)
but_mul.grid(row=6,column=1)
but_div.grid(row=6,column=2)
but_eq.grid(row=4,column=1,columnspan=2)
but_clr.grid(row=5,column=1,columnspan=2)
root.mainloop()
|
normal
|
{
"blob_id": "bdd9ebfa9a2f14d57efd527ca88032bfb0160a5e",
"index": 7504,
"step-1": "<mask token>\n\n\ndef clicked(num):\n current = ent.get()\n ent.delete(0, END)\n ent.insert(0, str(current) + str(num))\n\n\ndef click_clear():\n ent.delete(0, END)\n\n\ndef add():\n global ch\n ch = '+'\n clicked('+')\n\n\ndef subtract():\n global ch\n ch = '-'\n clicked('-')\n\n\ndef multiply():\n global ch\n ch = '*'\n clicked('*')\n\n\ndef divide():\n global ch\n ch = '/'\n clicked('/')\n\n\ndef equals():\n f_num, s_num = ent.get().split(ch)\n res = c.calculate(float(f_num), float(s_num), ch)\n ent.delete(0, END)\n ent.insert(0, res)\n\n\n<mask token>\n",
"step-2": "<mask token>\nroot.title('CALCULATOR')\n<mask token>\nent.grid(row=0, column=0, columnspan=3, padx=10, pady=10)\n<mask token>\n\n\ndef clicked(num):\n current = ent.get()\n ent.delete(0, END)\n ent.insert(0, str(current) + str(num))\n\n\ndef click_clear():\n ent.delete(0, END)\n\n\ndef add():\n global ch\n ch = '+'\n clicked('+')\n\n\ndef subtract():\n global ch\n ch = '-'\n clicked('-')\n\n\ndef multiply():\n global ch\n ch = '*'\n clicked('*')\n\n\ndef divide():\n global ch\n ch = '/'\n clicked('/')\n\n\ndef equals():\n f_num, s_num = ent.get().split(ch)\n res = c.calculate(float(f_num), float(s_num), ch)\n ent.delete(0, END)\n ent.insert(0, res)\n\n\n<mask token>\nbut7.grid(row=1, column=0)\nbut8.grid(row=1, column=1)\nbut9.grid(row=1, column=2)\nbut4.grid(row=2, column=0)\nbut5.grid(row=2, column=1)\nbut6.grid(row=2, column=2)\nbut1.grid(row=3, column=0)\nbut2.grid(row=3, column=1)\nbut3.grid(row=3, column=2)\nbut0.grid(row=4, column=0)\nbut_plus.grid(row=5, column=0)\nbut_sub.grid(row=6, column=0)\nbut_mul.grid(row=6, column=1)\nbut_div.grid(row=6, column=2)\nbut_eq.grid(row=4, column=1, columnspan=2)\nbut_clr.grid(row=5, column=1, columnspan=2)\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = Tk()\nroot.title('CALCULATOR')\nent = Entry(root, width=35)\nent.grid(row=0, column=0, columnspan=3, padx=10, pady=10)\nch = ''\nnum = ent.get()\n\n\ndef clicked(num):\n current = ent.get()\n ent.delete(0, END)\n ent.insert(0, str(current) + str(num))\n\n\ndef click_clear():\n ent.delete(0, END)\n\n\ndef add():\n global ch\n ch = '+'\n clicked('+')\n\n\ndef subtract():\n global ch\n ch = '-'\n clicked('-')\n\n\ndef multiply():\n global ch\n ch = '*'\n clicked('*')\n\n\ndef divide():\n global ch\n ch = '/'\n clicked('/')\n\n\ndef equals():\n f_num, s_num = ent.get().split(ch)\n res = c.calculate(float(f_num), float(s_num), ch)\n ent.delete(0, END)\n ent.insert(0, res)\n\n\nbut1 = Button(root, text='1', padx=40, pady=20, command=lambda : clicked(1))\nbut2 = Button(root, text='2', padx=40, pady=20, command=lambda : clicked(2))\nbut3 = Button(root, text='3', padx=40, pady=20, command=lambda : clicked(3))\nbut4 = Button(root, text='4', padx=40, pady=20, command=lambda : clicked(4))\nbut5 = Button(root, text='5', padx=40, pady=20, command=lambda : clicked(5))\nbut6 = Button(root, text='6', padx=40, pady=20, command=lambda : clicked(6))\nbut7 = Button(root, text='7', padx=40, pady=20, command=lambda : clicked(7))\nbut8 = Button(root, text='8', padx=40, pady=20, command=lambda : clicked(8))\nbut9 = Button(root, text='9', padx=40, pady=20, command=lambda : clicked(9))\nbut0 = Button(root, text='0', padx=40, pady=20, command=lambda : clicked(0))\nbut_plus = Button(root, text='+', padx=39, pady=20, command=add)\nbut_sub = Button(root, text='-', padx=40, pady=20, command=subtract)\nbut_mul = Button(root, text='*', padx=40, pady=20, command=multiply)\nbut_div = Button(root, text='/', padx=40, pady=20, command=divide)\nbut_eq = Button(root, text='=', padx=89, pady=20, command=equals)\nbut_clr = Button(root, text='C', padx=89, pady=20, command=click_clear)\nbut7.grid(row=1, column=0)\nbut8.grid(row=1, column=1)\nbut9.grid(row=1, column=2)\nbut4.grid(row=2, column=0)\nbut5.grid(row=2, column=1)\nbut6.grid(row=2, column=2)\nbut1.grid(row=3, column=0)\nbut2.grid(row=3, column=1)\nbut3.grid(row=3, column=2)\nbut0.grid(row=4, column=0)\nbut_plus.grid(row=5, column=0)\nbut_sub.grid(row=6, column=0)\nbut_mul.grid(row=6, column=1)\nbut_div.grid(row=6, column=2)\nbut_eq.grid(row=4, column=1, columnspan=2)\nbut_clr.grid(row=5, column=1, columnspan=2)\nroot.mainloop()\n",
"step-4": "from tkinter import *\nimport mathcalc as c\nroot = Tk()\nroot.title('CALCULATOR')\nent = Entry(root, width=35)\nent.grid(row=0, column=0, columnspan=3, padx=10, pady=10)\nch = ''\nnum = ent.get()\n\n\ndef clicked(num):\n current = ent.get()\n ent.delete(0, END)\n ent.insert(0, str(current) + str(num))\n\n\ndef click_clear():\n ent.delete(0, END)\n\n\ndef add():\n global ch\n ch = '+'\n clicked('+')\n\n\ndef subtract():\n global ch\n ch = '-'\n clicked('-')\n\n\ndef multiply():\n global ch\n ch = '*'\n clicked('*')\n\n\ndef divide():\n global ch\n ch = '/'\n clicked('/')\n\n\ndef equals():\n f_num, s_num = ent.get().split(ch)\n res = c.calculate(float(f_num), float(s_num), ch)\n ent.delete(0, END)\n ent.insert(0, res)\n\n\nbut1 = Button(root, text='1', padx=40, pady=20, command=lambda : clicked(1))\nbut2 = Button(root, text='2', padx=40, pady=20, command=lambda : clicked(2))\nbut3 = Button(root, text='3', padx=40, pady=20, command=lambda : clicked(3))\nbut4 = Button(root, text='4', padx=40, pady=20, command=lambda : clicked(4))\nbut5 = Button(root, text='5', padx=40, pady=20, command=lambda : clicked(5))\nbut6 = Button(root, text='6', padx=40, pady=20, command=lambda : clicked(6))\nbut7 = Button(root, text='7', padx=40, pady=20, command=lambda : clicked(7))\nbut8 = Button(root, text='8', padx=40, pady=20, command=lambda : clicked(8))\nbut9 = Button(root, text='9', padx=40, pady=20, command=lambda : clicked(9))\nbut0 = Button(root, text='0', padx=40, pady=20, command=lambda : clicked(0))\nbut_plus = Button(root, text='+', padx=39, pady=20, command=add)\nbut_sub = Button(root, text='-', padx=40, pady=20, command=subtract)\nbut_mul = Button(root, text='*', padx=40, pady=20, command=multiply)\nbut_div = Button(root, text='/', padx=40, pady=20, command=divide)\nbut_eq = Button(root, text='=', padx=89, pady=20, command=equals)\nbut_clr = Button(root, text='C', padx=89, pady=20, command=click_clear)\nbut7.grid(row=1, column=0)\nbut8.grid(row=1, column=1)\nbut9.grid(row=1, column=2)\nbut4.grid(row=2, column=0)\nbut5.grid(row=2, column=1)\nbut6.grid(row=2, column=2)\nbut1.grid(row=3, column=0)\nbut2.grid(row=3, column=1)\nbut3.grid(row=3, column=2)\nbut0.grid(row=4, column=0)\nbut_plus.grid(row=5, column=0)\nbut_sub.grid(row=6, column=0)\nbut_mul.grid(row=6, column=1)\nbut_div.grid(row=6, column=2)\nbut_eq.grid(row=4, column=1, columnspan=2)\nbut_clr.grid(row=5, column=1, columnspan=2)\nroot.mainloop()\n",
"step-5": "from tkinter import *\r\nimport mathcalc as c \r\nroot= Tk()\r\nroot.title(\"CALCULATOR\")\r\nent=Entry(root,width=35)\r\nent.grid(row=0,column=0,columnspan=3,padx=10,pady=10)\r\n#ent.grid(row=0,column=0)\r\nch=''\r\nnum=ent.get()\r\ndef clicked(num):\r\n\tcurrent=ent.get()\r\n\tent.delete(0,END)\r\n\tent.insert(0,str(current)+str(num))\r\ndef click_clear():\r\n\tent.delete(0,END)\r\n\r\ndef add():\r\n\tglobal ch\r\n\tch='+' \r\n\tclicked('+')\r\n\r\ndef subtract():\r\n\tglobal ch\r\n\tch='-' \r\n\tclicked('-')\r\n\r\ndef multiply():\r\n\tglobal ch\r\n\tch='*' \r\n\tclicked('*')\r\n\r\ndef divide():\r\n\tglobal ch\r\n\tch='/' \r\n\tclicked('/')\r\ndef equals():\r\n\tf_num,s_num=ent.get().split(ch)\r\n\tres=c.calculate(float(f_num),float(s_num),ch)\r\n\tent.delete(0,END)\r\n\tent.insert(0,res)\r\n\r\n#buttons\r\nbut1=Button(root,text=\"1\",padx=40,pady=20,command=lambda: clicked(1))\r\nbut2=Button(root,text=\"2\",padx=40,pady=20,command=lambda: clicked(2))\r\nbut3=Button(root,text=\"3\",padx=40,pady=20,command=lambda: clicked(3))\r\nbut4=Button(root,text=\"4\",padx=40,pady=20,command=lambda: clicked(4))\r\nbut5=Button(root,text=\"5\",padx=40,pady=20,command=lambda: clicked(5))\r\nbut6=Button(root,text=\"6\",padx=40,pady=20,command=lambda: clicked(6))\r\nbut7=Button(root,text=\"7\",padx=40,pady=20,command=lambda: clicked(7))\r\nbut8=Button(root,text=\"8\",padx=40,pady=20,command=lambda: clicked(8))\r\nbut9=Button(root,text=\"9\",padx=40,pady=20,command=lambda: clicked(9))\r\nbut0=Button(root,text=\"0\",padx=40,pady=20,command=lambda: clicked(0))\r\n\r\nbut_plus=Button(root,text=\"+\",padx=39,pady=20,command=add)\r\nbut_sub=Button(root,text=\"-\",padx=40,pady=20,command=subtract)\r\nbut_mul=Button(root,text=\"*\",padx=40,pady=20,command=multiply)\r\nbut_div=Button(root,text=\"/\",padx=40,pady=20,command=divide)\r\nbut_eq=Button(root,text=\"=\",padx=89,pady=20,command=equals)\r\nbut_clr=Button(root,text=\"C\",padx=89,pady=20,command=click_clear)\r\n#button place\r\nbut7.grid(row=1,column=0)\r\nbut8.grid(row=1,column=1)\r\nbut9.grid(row=1,column=2)\r\n\r\nbut4.grid(row=2,column=0)\r\nbut5.grid(row=2,column=1)\r\nbut6.grid(row=2,column=2)\r\n\r\nbut1.grid(row=3,column=0)\r\nbut2.grid(row=3,column=1)\r\nbut3.grid(row=3,column=2)\r\n\r\nbut0.grid(row=4,column=0)\r\nbut_plus.grid(row=5,column=0)\r\nbut_sub.grid(row=6,column=0)\r\nbut_mul.grid(row=6,column=1)\r\nbut_div.grid(row=6,column=2)\r\nbut_eq.grid(row=4,column=1,columnspan=2)\r\nbut_clr.grid(row=5,column=1,columnspan=2)\r\nroot.mainloop()\r\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
import datetime
import time
import boto3
from botocore.config import Config
# FinSpace class with Spark bindings
class SparkFinSpace(FinSpace):
import pyspark
def __init__(
self,
spark: pyspark.sql.session.SparkSession = None,
config = Config(retries = {'max_attempts': 0, 'mode': 'standard'}),
dev_overrides: dict = None
):
FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)
self.spark = spark # used on Spark cluster for reading views, creating changesets from DataFrames
def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):
resp = self.client.get_user_ingestion_info()
upload_location = resp['ingestionPath']
# data_frame.write.option('header', 'true').csv(upload_location)
data_frame.write.parquet(upload_location)
return upload_location
def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame, dataset_id: str, change_type: str, wait_for_completion=True):
print("Uploading data...")
upload_location = self.upload_dataframe(data_frame)
print("Data upload finished. Ingesting data...")
return self.ingest_from_s3(upload_location, dataset_id, change_type, wait_for_completion, format_type='parquet', format_params={})
def read_view_as_spark(
self,
dataset_id: str,
view_id: str
):
# TODO: switch to DescribeMatz when available in HFS
views = self.list_views(dataset_id=dataset_id, max_results=50)
filtered = [v for v in views if v['id'] == view_id]
if len(filtered) == 0:
raise Exception('No such view found')
if len(filtered) > 1:
raise Exception('Internal Server error')
view = filtered[0]
# 0. Ensure view is ready to be read
if (view['status'] != 'SUCCESS'):
status = view['status']
print(f'view run status is not ready: {status}. Returning empty.')
return
glue_db_name = view['destinationTypeProperties']['databaseName']
glue_table_name = view['destinationTypeProperties']['tableName']
# Query Glue table directly with catalog function of spark
return self.spark.table(f"`{glue_db_name}`.`{glue_table_name}`")
def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame):
from pyspark.sql.types import StructType
# for translation to FinSpace's schema
# 'STRING'|'CHAR'|'INTEGER'|'TINYINT'|'SMALLINT'|'BIGINT'|'FLOAT'|'DOUBLE'|'DATE'|'DATETIME'|'BOOLEAN'|'BINARY'
DoubleType = "DOUBLE"
FloatType = "FLOAT"
DateType = "DATE"
StringType = "STRING"
IntegerType = "INTEGER"
LongType = "BIGINT"
BooleanType = "BOOLEAN"
TimestampType = "DATETIME"
hab_columns = []
items = [i for i in data_frame.schema]
switcher = {
"BinaryType" : StringType,
"BooleanType" : BooleanType,
"ByteType" : IntegerType,
"DateType" : DateType,
"DoubleType" : FloatType,
"IntegerType" : IntegerType,
"LongType" : IntegerType,
"NullType" : StringType,
"ShortType" : IntegerType,
"StringType" : StringType,
"TimestampType" : TimestampType,
}
for i in items:
# print( f"name: {i.name} type: {i.dataType}" )
habType = switcher.get( str(i.dataType), StringType)
hab_columns.append({
"dataType" : habType,
"name" : i.name,
"description" : ""
})
return( hab_columns )
|
normal
|
{
"blob_id": "4f4af4caf81397542e9cd94c50b54303e2f81881",
"index": 3926,
"step-1": "<mask token>\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n <mask token>\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n\n def __init__(self, spark: pyspark.sql.session.SparkSession=None, config\n =Config(retries={'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict=None):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n\n def read_view_as_spark(self, dataset_id: str, view_id: str):\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n if view['status'] != 'SUCCESS':\n status = view['status']\n print(f'view run status is not ready: {status}. Returning empty.')\n return\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n\n def __init__(self, spark: pyspark.sql.session.SparkSession=None, config\n =Config(retries={'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict=None):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n\n def read_view_as_spark(self, dataset_id: str, view_id: str):\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n if view['status'] != 'SUCCESS':\n status = view['status']\n print(f'view run status is not ready: {status}. Returning empty.')\n return\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')\n\n def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame\n ):\n from pyspark.sql.types import StructType\n DoubleType = 'DOUBLE'\n FloatType = 'FLOAT'\n DateType = 'DATE'\n StringType = 'STRING'\n IntegerType = 'INTEGER'\n LongType = 'BIGINT'\n BooleanType = 'BOOLEAN'\n TimestampType = 'DATETIME'\n hab_columns = []\n items = [i for i in data_frame.schema]\n switcher = {'BinaryType': StringType, 'BooleanType': BooleanType,\n 'ByteType': IntegerType, 'DateType': DateType, 'DoubleType':\n FloatType, 'IntegerType': IntegerType, 'LongType': IntegerType,\n 'NullType': StringType, 'ShortType': IntegerType, 'StringType':\n StringType, 'TimestampType': TimestampType}\n for i in items:\n habType = switcher.get(str(i.dataType), StringType)\n hab_columns.append({'dataType': habType, 'name': i.name,\n 'description': ''})\n return hab_columns\n",
"step-4": "import datetime\nimport time\nimport boto3\nfrom botocore.config import Config\n\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n\n def __init__(self, spark: pyspark.sql.session.SparkSession=None, config\n =Config(retries={'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict=None):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark\n\n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n data_frame.write.parquet(upload_location)\n return upload_location\n\n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame,\n dataset_id: str, change_type: str, wait_for_completion=True):\n print('Uploading data...')\n upload_location = self.upload_dataframe(data_frame)\n print('Data upload finished. Ingesting data...')\n return self.ingest_from_s3(upload_location, dataset_id, change_type,\n wait_for_completion, format_type='parquet', format_params={})\n\n def read_view_as_spark(self, dataset_id: str, view_id: str):\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n if view['status'] != 'SUCCESS':\n status = view['status']\n print(f'view run status is not ready: {status}. Returning empty.')\n return\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n return self.spark.table(f'`{glue_db_name}`.`{glue_table_name}`')\n\n def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame\n ):\n from pyspark.sql.types import StructType\n DoubleType = 'DOUBLE'\n FloatType = 'FLOAT'\n DateType = 'DATE'\n StringType = 'STRING'\n IntegerType = 'INTEGER'\n LongType = 'BIGINT'\n BooleanType = 'BOOLEAN'\n TimestampType = 'DATETIME'\n hab_columns = []\n items = [i for i in data_frame.schema]\n switcher = {'BinaryType': StringType, 'BooleanType': BooleanType,\n 'ByteType': IntegerType, 'DateType': DateType, 'DoubleType':\n FloatType, 'IntegerType': IntegerType, 'LongType': IntegerType,\n 'NullType': StringType, 'ShortType': IntegerType, 'StringType':\n StringType, 'TimestampType': TimestampType}\n for i in items:\n habType = switcher.get(str(i.dataType), StringType)\n hab_columns.append({'dataType': habType, 'name': i.name,\n 'description': ''})\n return hab_columns\n",
"step-5": "import datetime\nimport time\nimport boto3\nfrom botocore.config import Config\n\n# FinSpace class with Spark bindings\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n def __init__(\n self, \n spark: pyspark.sql.session.SparkSession = None,\n config = Config(retries = {'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict = None\n ):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark # used on Spark cluster for reading views, creating changesets from DataFrames\n \n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n# data_frame.write.option('header', 'true').csv(upload_location)\n data_frame.write.parquet(upload_location)\n return upload_location\n \n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame, dataset_id: str, change_type: str, wait_for_completion=True):\n print(\"Uploading data...\")\n upload_location = self.upload_dataframe(data_frame)\n \n print(\"Data upload finished. Ingesting data...\")\n \n return self.ingest_from_s3(upload_location, dataset_id, change_type, wait_for_completion, format_type='parquet', format_params={})\n \n def read_view_as_spark(\n self,\n dataset_id: str,\n view_id: str\n ):\n # TODO: switch to DescribeMatz when available in HFS\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n \n # 0. Ensure view is ready to be read\n if (view['status'] != 'SUCCESS'): \n status = view['status'] \n print(f'view run status is not ready: {status}. Returning empty.')\n return\n\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n \n # Query Glue table directly with catalog function of spark\n return self.spark.table(f\"`{glue_db_name}`.`{glue_table_name}`\")\n \n def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame):\n from pyspark.sql.types import StructType\n\n # for translation to FinSpace's schema\n # 'STRING'|'CHAR'|'INTEGER'|'TINYINT'|'SMALLINT'|'BIGINT'|'FLOAT'|'DOUBLE'|'DATE'|'DATETIME'|'BOOLEAN'|'BINARY'\n DoubleType = \"DOUBLE\"\n FloatType = \"FLOAT\"\n DateType = \"DATE\"\n StringType = \"STRING\"\n IntegerType = \"INTEGER\"\n LongType = \"BIGINT\"\n BooleanType = \"BOOLEAN\"\n TimestampType = \"DATETIME\"\n \n hab_columns = []\n\n items = [i for i in data_frame.schema] \n\n switcher = {\n \"BinaryType\" : StringType,\n \"BooleanType\" : BooleanType,\n \"ByteType\" : IntegerType,\n \"DateType\" : DateType,\n \"DoubleType\" : FloatType,\n \"IntegerType\" : IntegerType,\n \"LongType\" : IntegerType,\n \"NullType\" : StringType,\n \"ShortType\" : IntegerType,\n \"StringType\" : StringType,\n \"TimestampType\" : TimestampType,\n }\n\n \n for i in items:\n# print( f\"name: {i.name} type: {i.dataType}\" )\n\n habType = switcher.get( str(i.dataType), StringType)\n\n hab_columns.append({\n \"dataType\" : habType, \n \"name\" : i.name,\n \"description\" : \"\"\n })\n\n return( hab_columns )\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from layers import TrueSkillFactorGraph
from math import e, sqrt
from numerics import atLeast, _Vector, _DiagonalMatrix, Matrix
from objects import SkillCalculator, SupportedOptions, argumentNotNone, \
getPartialPlayPercentage, sortByRank
class FactorGraphTrueSkillCalculator(SkillCalculator):
def __init__(self):
super(FactorGraphTrueSkillCalculator, self).__init__(SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE, atLeast(2), atLeast(1))
def calculateNewRatings(self, gameInfo, teams, teamRanks):
argumentNotNone(gameInfo, "gameInfo")
self._validateTeamCountAndPlayersCountPerTeam(teams)
teams, teamRanks = sortByRank(teams, teamRanks)
factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)
factorGraph.buildGraph()
factorGraph.runSchedule()
return factorGraph.getUpdatedRatings()
def calculateMatchQuality(self, gameInfo, teams):
skillsMatrix = self._getPlayerCovarianceMatrix(teams)
meanVector = self._getPlayerMeansVector(teams)
meanVectorTranspose = meanVector.transpose
playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(teams, meanVector.rows)
playerTeamAssignmentsMatrixTranspose = playerTeamAssignmentsMatrix.transpose
betaSquared = gameInfo.beta**2.0
start = meanVectorTranspose * playerTeamAssignmentsMatrix
aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose) * playerTeamAssignmentsMatrix
aTSA = playerTeamAssignmentsMatrixTranspose * skillsMatrix * playerTeamAssignmentsMatrix
middle = aTa + aTSA
middleInverse = middle.inverse
end = playerTeamAssignmentsMatrixTranspose * meanVector
expPartMatrix = (start * middleInverse * end) * -0.5
expPart = expPartMatrix.determinant
sqrtPartNumerator = aTa.determinant
sqrtPartDenominator = middle.determinant
sqrtPart = sqrtPartNumerator / sqrtPartDenominator
result = (e**expPart) * sqrt(sqrtPart)
return result
def _getPlayerMeansVector(self, teamAssignmentsList):
return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.mean))
def _getPlayerCovarianceMatrix(self, teamAssignmentsList):
return _DiagonalMatrix(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.standardDeviation**2.0))
def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):
playerRatingValues = list()
for currentTeam in teamAssigmentsList:
for currentRating in currentTeam.values:
playerRatingValues.append(playerRatingFunction(currentRating))
return playerRatingValues
def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList, totalPlayers):
playerAssignments = list()
totalPreviousPlayers = 0
for i in range(len(teamAssignmentsList)):
currentTeam = teamAssignmentsList[i]
currentRowValues = [0] * totalPreviousPlayers
playerAssignments.append(currentRowValues)
for currentRating in currentTeam:
currentRowValues.append(getPartialPlayPercentage(currentRating[0]))
totalPreviousPlayers += 1
nextTeam = teamAssignmentsList[i + 1]
for nextTeamPlayerPair in nextTeam:
currentRowValues.append(-1 * getPartialPlayPercentage(nextTeamPlayerPair[0]))
return Matrix(totalPlayers, len(teamAssignmentsList) - 1, playerAssignments)
|
normal
|
{
"blob_id": "009be282e45d191eb8f4d7d2986a2f182d64c1dd",
"index": 2935,
"step-1": "<mask token>\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n <mask token>\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n <mask token>\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n\n def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,\n totalPlayers):\n playerAssignments = list()\n totalPreviousPlayers = 0\n for i in range(len(teamAssignmentsList)):\n currentTeam = teamAssignmentsList[i]\n currentRowValues = [0] * totalPreviousPlayers\n playerAssignments.append(currentRowValues)\n for currentRating in currentTeam:\n currentRowValues.append(getPartialPlayPercentage(\n currentRating[0]))\n totalPreviousPlayers += 1\n nextTeam = teamAssignmentsList[i + 1]\n for nextTeamPlayerPair in nextTeam:\n currentRowValues.append(-1 * getPartialPlayPercentage(\n nextTeamPlayerPair[0]))\n return Matrix(totalPlayers, len(teamAssignmentsList) - 1,\n playerAssignments)\n",
"step-3": "<mask token>\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n\n def calculateNewRatings(self, gameInfo, teams, teamRanks):\n argumentNotNone(gameInfo, 'gameInfo')\n self._validateTeamCountAndPlayersCountPerTeam(teams)\n teams, teamRanks = sortByRank(teams, teamRanks)\n factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)\n factorGraph.buildGraph()\n factorGraph.runSchedule()\n return factorGraph.getUpdatedRatings()\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n\n def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,\n totalPlayers):\n playerAssignments = list()\n totalPreviousPlayers = 0\n for i in range(len(teamAssignmentsList)):\n currentTeam = teamAssignmentsList[i]\n currentRowValues = [0] * totalPreviousPlayers\n playerAssignments.append(currentRowValues)\n for currentRating in currentTeam:\n currentRowValues.append(getPartialPlayPercentage(\n currentRating[0]))\n totalPreviousPlayers += 1\n nextTeam = teamAssignmentsList[i + 1]\n for nextTeamPlayerPair in nextTeam:\n currentRowValues.append(-1 * getPartialPlayPercentage(\n nextTeamPlayerPair[0]))\n return Matrix(totalPlayers, len(teamAssignmentsList) - 1,\n playerAssignments)\n",
"step-4": "from layers import TrueSkillFactorGraph\nfrom math import e, sqrt\nfrom numerics import atLeast, _Vector, _DiagonalMatrix, Matrix\nfrom objects import SkillCalculator, SupportedOptions, argumentNotNone, getPartialPlayPercentage, sortByRank\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n\n def calculateNewRatings(self, gameInfo, teams, teamRanks):\n argumentNotNone(gameInfo, 'gameInfo')\n self._validateTeamCountAndPlayersCountPerTeam(teams)\n teams, teamRanks = sortByRank(teams, teamRanks)\n factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)\n factorGraph.buildGraph()\n factorGraph.runSchedule()\n return factorGraph.getUpdatedRatings()\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n\n def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,\n totalPlayers):\n playerAssignments = list()\n totalPreviousPlayers = 0\n for i in range(len(teamAssignmentsList)):\n currentTeam = teamAssignmentsList[i]\n currentRowValues = [0] * totalPreviousPlayers\n playerAssignments.append(currentRowValues)\n for currentRating in currentTeam:\n currentRowValues.append(getPartialPlayPercentage(\n currentRating[0]))\n totalPreviousPlayers += 1\n nextTeam = teamAssignmentsList[i + 1]\n for nextTeamPlayerPair in nextTeam:\n currentRowValues.append(-1 * getPartialPlayPercentage(\n nextTeamPlayerPair[0]))\n return Matrix(totalPlayers, len(teamAssignmentsList) - 1,\n playerAssignments)\n",
"step-5": "from layers import TrueSkillFactorGraph\nfrom math import e, sqrt\nfrom numerics import atLeast, _Vector, _DiagonalMatrix, Matrix\nfrom objects import SkillCalculator, SupportedOptions, argumentNotNone, \\\n\tgetPartialPlayPercentage, sortByRank\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\tdef __init__(self):\n\t\tsuper(FactorGraphTrueSkillCalculator, self).__init__(SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE, atLeast(2), atLeast(1))\n\t\n\tdef calculateNewRatings(self, gameInfo, teams, teamRanks):\n\t\targumentNotNone(gameInfo, \"gameInfo\")\n\t\tself._validateTeamCountAndPlayersCountPerTeam(teams)\n\t\tteams, teamRanks = sortByRank(teams, teamRanks)\n\t\t\n\t\tfactorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)\n\t\tfactorGraph.buildGraph()\n\t\tfactorGraph.runSchedule()\t\n\t\t\n\t\treturn factorGraph.getUpdatedRatings()\n\t\t\n\tdef calculateMatchQuality(self, gameInfo, teams):\n\t\tskillsMatrix = self._getPlayerCovarianceMatrix(teams)\n\t\tmeanVector = self._getPlayerMeansVector(teams)\n\t\tmeanVectorTranspose = meanVector.transpose\n\t\t\n\t\tplayerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(teams, meanVector.rows)\n\t\tplayerTeamAssignmentsMatrixTranspose = playerTeamAssignmentsMatrix.transpose\n\t\t\n\t\tbetaSquared = gameInfo.beta**2.0\n\t\t\n\t\tstart = meanVectorTranspose * playerTeamAssignmentsMatrix\n\t\taTa = (betaSquared * playerTeamAssignmentsMatrixTranspose) * playerTeamAssignmentsMatrix\n\t\taTSA = playerTeamAssignmentsMatrixTranspose * skillsMatrix * playerTeamAssignmentsMatrix\n\t\tmiddle = aTa + aTSA\n\t\t\n\t\tmiddleInverse = middle.inverse\n\t\t\n\t\tend = playerTeamAssignmentsMatrixTranspose * meanVector\n\t\t\n\t\texpPartMatrix = (start * middleInverse * end) * -0.5\n\t\texpPart = expPartMatrix.determinant\n\t\t\n\t\tsqrtPartNumerator = aTa.determinant\n\t\tsqrtPartDenominator = middle.determinant\n\t\tsqrtPart = sqrtPartNumerator / sqrtPartDenominator\n\t\t\n\t\tresult = (e**expPart) * sqrt(sqrtPart)\n\t\t\n\t\treturn result\n\t\t\n\tdef _getPlayerMeansVector(self, teamAssignmentsList):\n\t\treturn _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.mean))\n\t\t\n\tdef _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n\t\treturn _DiagonalMatrix(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.standardDeviation**2.0))\n\t\t\n\tdef _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n\t\tplayerRatingValues = list()\n\t\tfor currentTeam in teamAssigmentsList:\n\t\t\tfor currentRating in currentTeam.values:\n\t\t\t\tplayerRatingValues.append(playerRatingFunction(currentRating))\n\t\treturn playerRatingValues\n\t\n\tdef _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList, totalPlayers):\n\t\tplayerAssignments = list()\n\t\ttotalPreviousPlayers = 0\n\t\t\n\t\tfor i in range(len(teamAssignmentsList)):\n\t\t\tcurrentTeam = teamAssignmentsList[i]\n\t\t\tcurrentRowValues = [0] * totalPreviousPlayers\n\t\t\tplayerAssignments.append(currentRowValues)\n\t\t\t\n\t\t\tfor currentRating in currentTeam:\n\t\t\t\tcurrentRowValues.append(getPartialPlayPercentage(currentRating[0]))\n\t\t\t\ttotalPreviousPlayers += 1\n\t\t\t\t\n\t\t\tnextTeam = teamAssignmentsList[i + 1]\n\t\t\tfor nextTeamPlayerPair in nextTeam:\n\t\t\t\tcurrentRowValues.append(-1 * getPartialPlayPercentage(nextTeamPlayerPair[0]))\n\t\t\t\t\n\t\treturn Matrix(totalPlayers, len(teamAssignmentsList) - 1, playerAssignments)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# getting a sample of data to parse for the keys of the players
import requests
import xml.etree.ElementTree as ET
currentPlayerInfoUrl="http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=1&LeagueID=00&Season=2015-16"
r=requests.get(currentPlayerInfoUrl)
if r.status_code == requests.codes.ok:
with open('currentPlayerDump.json','w') as f:
for line in r.text:
f.write(line)
|
normal
|
{
"blob_id": "68f8b301d86659f9d76de443b0afe93fd7f7e8c2",
"index": 6588,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif r.status_code == requests.codes.ok:\n with open('currentPlayerDump.json', 'w') as f:\n for line in r.text:\n f.write(line)\n",
"step-3": "<mask token>\ncurrentPlayerInfoUrl = (\n 'http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=1&LeagueID=00&Season=2015-16'\n )\nr = requests.get(currentPlayerInfoUrl)\nif r.status_code == requests.codes.ok:\n with open('currentPlayerDump.json', 'w') as f:\n for line in r.text:\n f.write(line)\n",
"step-4": "import requests\nimport xml.etree.ElementTree as ET\ncurrentPlayerInfoUrl = (\n 'http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=1&LeagueID=00&Season=2015-16'\n )\nr = requests.get(currentPlayerInfoUrl)\nif r.status_code == requests.codes.ok:\n with open('currentPlayerDump.json', 'w') as f:\n for line in r.text:\n f.write(line)\n",
"step-5": "# getting a sample of data to parse for the keys of the players\nimport requests\nimport xml.etree.ElementTree as ET\n\ncurrentPlayerInfoUrl=\"http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=1&LeagueID=00&Season=2015-16\"\n\nr=requests.get(currentPlayerInfoUrl)\nif r.status_code == requests.codes.ok:\n\twith open('currentPlayerDump.json','w') as f:\n\t\tfor line in r.text:\n\t\t\tf.write(line)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Authentication views.
login()
Flask view to log a user in.
"""
import functools
from typing import Any, Callable, cast, Dict
from flask import Blueprint, make_response, request, session
from werkzeug.security import check_password_hash as _check_password_hash
from .accesscontrol import PERMISSIONS
from .api import APIError, UserSchema
from .db.models import User
bp = Blueprint("auth", __name__, url_prefix="/api/v1/auth")
_CHECK_HASH_ANYWAY = "pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050" # pylint: disable=line-too-long
check_password_hash: Callable[[str, str], bool] = cast(
Callable[[str, str], bool], _check_password_hash
)
@bp.route("/login", methods=("POST",))
def login() -> Any:
"""Flask view for logging a user in."""
user_dict = UserSchema().load(
request.json, partial=("id", "qualifications") + PERMISSIONS
)
username = user_dict["username"]
password = user_dict["password"]
if is_password_correct(username, password):
user = fetch_user(username)
session["user_id"] = user["id"]
response = make_response(user)
response.set_cookie("is_authenticated", "1")
return response
raise APIError(reason="invalid_user_or_password", status_code=403)
@bp.route("/logout", methods=("POST",))
def logout() -> Any:
"""Flask view to log a user out."""
if "user_id" in session:
del session["user_id"]
response = make_response({"success": True})
response.set_cookie("is_authenticated", max_age=0, expires=0)
return response
def is_password_correct(username: str, password: str) -> bool:
"""Checks whether password is valid for user, tries to avoid timing attacks."""
user = User.query.filter_by(username=username).first()
if user is None:
# We need to prevent timing-based side-channel attacks
# that could be exploited for user enumeration
password_hash = _CHECK_HASH_ANYWAY
else:
password_hash = user.password
return check_password_hash(password_hash, password) and user is not None
def fetch_user(username: str) -> Dict[str, Any]:
"""Look up a user as a dictionary from the DB."""
user = User.query.filter_by(username=username).first()
return cast(Dict[str, Any], UserSchema().dump(user))
def authentication_required(to_be_wrapped: Callable[..., Any]) -> Callable[..., Any]:
"""Wraps a view with a check for whether the user is authenticated."""
@functools.wraps(to_be_wrapped)
def wrapper(*args: Any, **kwargs: Any) -> Any:
user_id = session.get("user_id")
if user_id is None or User.query.get(user_id) is None:
if "user_id" in session:
del session["user_id"]
response = make_response({"reason": "authentication_required"}, 403)
response.set_cookie("is_authenticated", max_age=0, expires=0)
return response
return to_be_wrapped(*args, **kwargs)
return wrapper
|
normal
|
{
"blob_id": "2d36ae916ad257615016ed6c0bc67e506ee313c9",
"index": 1528,
"step-1": "<mask token>\n\n\[email protected]('/login', methods=('POST',))\ndef login() ->Any:\n \"\"\"Flask view for logging a user in.\"\"\"\n user_dict = UserSchema().load(request.json, partial=('id',\n 'qualifications') + PERMISSIONS)\n username = user_dict['username']\n password = user_dict['password']\n if is_password_correct(username, password):\n user = fetch_user(username)\n session['user_id'] = user['id']\n response = make_response(user)\n response.set_cookie('is_authenticated', '1')\n return response\n raise APIError(reason='invalid_user_or_password', status_code=403)\n\n\[email protected]('/logout', methods=('POST',))\ndef logout() ->Any:\n \"\"\"Flask view to log a user out.\"\"\"\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'success': True})\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n\n\ndef is_password_correct(username: str, password: str) ->bool:\n \"\"\"Checks whether password is valid for user, tries to avoid timing attacks.\"\"\"\n user = User.query.filter_by(username=username).first()\n if user is None:\n password_hash = _CHECK_HASH_ANYWAY\n else:\n password_hash = user.password\n return check_password_hash(password_hash, password) and user is not None\n\n\n<mask token>\n\n\ndef authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[\n ..., Any]:\n \"\"\"Wraps a view with a check for whether the user is authenticated.\"\"\"\n\n @functools.wraps(to_be_wrapped)\n def wrapper(*args: Any, **kwargs: Any) ->Any:\n user_id = session.get('user_id')\n if user_id is None or User.query.get(user_id) is None:\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'reason': 'authentication_required'}, 403\n )\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n return to_be_wrapped(*args, **kwargs)\n return wrapper\n",
"step-2": "<mask token>\ncheck_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],\n bool], _check_password_hash)\n\n\[email protected]('/login', methods=('POST',))\ndef login() ->Any:\n \"\"\"Flask view for logging a user in.\"\"\"\n user_dict = UserSchema().load(request.json, partial=('id',\n 'qualifications') + PERMISSIONS)\n username = user_dict['username']\n password = user_dict['password']\n if is_password_correct(username, password):\n user = fetch_user(username)\n session['user_id'] = user['id']\n response = make_response(user)\n response.set_cookie('is_authenticated', '1')\n return response\n raise APIError(reason='invalid_user_or_password', status_code=403)\n\n\[email protected]('/logout', methods=('POST',))\ndef logout() ->Any:\n \"\"\"Flask view to log a user out.\"\"\"\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'success': True})\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n\n\ndef is_password_correct(username: str, password: str) ->bool:\n \"\"\"Checks whether password is valid for user, tries to avoid timing attacks.\"\"\"\n user = User.query.filter_by(username=username).first()\n if user is None:\n password_hash = _CHECK_HASH_ANYWAY\n else:\n password_hash = user.password\n return check_password_hash(password_hash, password) and user is not None\n\n\ndef fetch_user(username: str) ->Dict[str, Any]:\n \"\"\"Look up a user as a dictionary from the DB.\"\"\"\n user = User.query.filter_by(username=username).first()\n return cast(Dict[str, Any], UserSchema().dump(user))\n\n\ndef authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[\n ..., Any]:\n \"\"\"Wraps a view with a check for whether the user is authenticated.\"\"\"\n\n @functools.wraps(to_be_wrapped)\n def wrapper(*args: Any, **kwargs: Any) ->Any:\n user_id = session.get('user_id')\n if user_id is None or User.query.get(user_id) is None:\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'reason': 'authentication_required'}, 403\n )\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n return to_be_wrapped(*args, **kwargs)\n return wrapper\n",
"step-3": "<mask token>\nbp = Blueprint('auth', __name__, url_prefix='/api/v1/auth')\n_CHECK_HASH_ANYWAY = (\n 'pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050'\n )\ncheck_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],\n bool], _check_password_hash)\n\n\[email protected]('/login', methods=('POST',))\ndef login() ->Any:\n \"\"\"Flask view for logging a user in.\"\"\"\n user_dict = UserSchema().load(request.json, partial=('id',\n 'qualifications') + PERMISSIONS)\n username = user_dict['username']\n password = user_dict['password']\n if is_password_correct(username, password):\n user = fetch_user(username)\n session['user_id'] = user['id']\n response = make_response(user)\n response.set_cookie('is_authenticated', '1')\n return response\n raise APIError(reason='invalid_user_or_password', status_code=403)\n\n\[email protected]('/logout', methods=('POST',))\ndef logout() ->Any:\n \"\"\"Flask view to log a user out.\"\"\"\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'success': True})\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n\n\ndef is_password_correct(username: str, password: str) ->bool:\n \"\"\"Checks whether password is valid for user, tries to avoid timing attacks.\"\"\"\n user = User.query.filter_by(username=username).first()\n if user is None:\n password_hash = _CHECK_HASH_ANYWAY\n else:\n password_hash = user.password\n return check_password_hash(password_hash, password) and user is not None\n\n\ndef fetch_user(username: str) ->Dict[str, Any]:\n \"\"\"Look up a user as a dictionary from the DB.\"\"\"\n user = User.query.filter_by(username=username).first()\n return cast(Dict[str, Any], UserSchema().dump(user))\n\n\ndef authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[\n ..., Any]:\n \"\"\"Wraps a view with a check for whether the user is authenticated.\"\"\"\n\n @functools.wraps(to_be_wrapped)\n def wrapper(*args: Any, **kwargs: Any) ->Any:\n user_id = session.get('user_id')\n if user_id is None or User.query.get(user_id) is None:\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'reason': 'authentication_required'}, 403\n )\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n return to_be_wrapped(*args, **kwargs)\n return wrapper\n",
"step-4": "<mask token>\nimport functools\nfrom typing import Any, Callable, cast, Dict\nfrom flask import Blueprint, make_response, request, session\nfrom werkzeug.security import check_password_hash as _check_password_hash\nfrom .accesscontrol import PERMISSIONS\nfrom .api import APIError, UserSchema\nfrom .db.models import User\nbp = Blueprint('auth', __name__, url_prefix='/api/v1/auth')\n_CHECK_HASH_ANYWAY = (\n 'pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050'\n )\ncheck_password_hash: Callable[[str, str], bool] = cast(Callable[[str, str],\n bool], _check_password_hash)\n\n\[email protected]('/login', methods=('POST',))\ndef login() ->Any:\n \"\"\"Flask view for logging a user in.\"\"\"\n user_dict = UserSchema().load(request.json, partial=('id',\n 'qualifications') + PERMISSIONS)\n username = user_dict['username']\n password = user_dict['password']\n if is_password_correct(username, password):\n user = fetch_user(username)\n session['user_id'] = user['id']\n response = make_response(user)\n response.set_cookie('is_authenticated', '1')\n return response\n raise APIError(reason='invalid_user_or_password', status_code=403)\n\n\[email protected]('/logout', methods=('POST',))\ndef logout() ->Any:\n \"\"\"Flask view to log a user out.\"\"\"\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'success': True})\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n\n\ndef is_password_correct(username: str, password: str) ->bool:\n \"\"\"Checks whether password is valid for user, tries to avoid timing attacks.\"\"\"\n user = User.query.filter_by(username=username).first()\n if user is None:\n password_hash = _CHECK_HASH_ANYWAY\n else:\n password_hash = user.password\n return check_password_hash(password_hash, password) and user is not None\n\n\ndef fetch_user(username: str) ->Dict[str, Any]:\n \"\"\"Look up a user as a dictionary from the DB.\"\"\"\n user = User.query.filter_by(username=username).first()\n return cast(Dict[str, Any], UserSchema().dump(user))\n\n\ndef authentication_required(to_be_wrapped: Callable[..., Any]) ->Callable[\n ..., Any]:\n \"\"\"Wraps a view with a check for whether the user is authenticated.\"\"\"\n\n @functools.wraps(to_be_wrapped)\n def wrapper(*args: Any, **kwargs: Any) ->Any:\n user_id = session.get('user_id')\n if user_id is None or User.query.get(user_id) is None:\n if 'user_id' in session:\n del session['user_id']\n response = make_response({'reason': 'authentication_required'}, 403\n )\n response.set_cookie('is_authenticated', max_age=0, expires=0)\n return response\n return to_be_wrapped(*args, **kwargs)\n return wrapper\n",
"step-5": "\"\"\"\nAuthentication views.\n\nlogin()\n Flask view to log a user in.\n\"\"\"\n\nimport functools\nfrom typing import Any, Callable, cast, Dict\n\nfrom flask import Blueprint, make_response, request, session\nfrom werkzeug.security import check_password_hash as _check_password_hash\n\nfrom .accesscontrol import PERMISSIONS\nfrom .api import APIError, UserSchema\nfrom .db.models import User\n\n\nbp = Blueprint(\"auth\", __name__, url_prefix=\"/api/v1/auth\")\n\n_CHECK_HASH_ANYWAY = \"pbkdf2:sha256:150000$tRQtwnYW$80442246fe5dbd649c8a90cd0209f7a3751e8a0ec1327f88f6b331f929642050\" # pylint: disable=line-too-long\n\ncheck_password_hash: Callable[[str, str], bool] = cast(\n Callable[[str, str], bool], _check_password_hash\n)\n\n\[email protected](\"/login\", methods=(\"POST\",))\ndef login() -> Any:\n \"\"\"Flask view for logging a user in.\"\"\"\n user_dict = UserSchema().load(\n request.json, partial=(\"id\", \"qualifications\") + PERMISSIONS\n )\n username = user_dict[\"username\"]\n password = user_dict[\"password\"]\n\n if is_password_correct(username, password):\n user = fetch_user(username)\n session[\"user_id\"] = user[\"id\"]\n response = make_response(user)\n response.set_cookie(\"is_authenticated\", \"1\")\n return response\n\n raise APIError(reason=\"invalid_user_or_password\", status_code=403)\n\n\[email protected](\"/logout\", methods=(\"POST\",))\ndef logout() -> Any:\n \"\"\"Flask view to log a user out.\"\"\"\n if \"user_id\" in session:\n del session[\"user_id\"]\n response = make_response({\"success\": True})\n response.set_cookie(\"is_authenticated\", max_age=0, expires=0)\n return response\n\n\ndef is_password_correct(username: str, password: str) -> bool:\n \"\"\"Checks whether password is valid for user, tries to avoid timing attacks.\"\"\"\n user = User.query.filter_by(username=username).first()\n if user is None:\n # We need to prevent timing-based side-channel attacks\n # that could be exploited for user enumeration\n password_hash = _CHECK_HASH_ANYWAY\n else:\n password_hash = user.password\n\n return check_password_hash(password_hash, password) and user is not None\n\n\ndef fetch_user(username: str) -> Dict[str, Any]:\n \"\"\"Look up a user as a dictionary from the DB.\"\"\"\n user = User.query.filter_by(username=username).first()\n return cast(Dict[str, Any], UserSchema().dump(user))\n\n\ndef authentication_required(to_be_wrapped: Callable[..., Any]) -> Callable[..., Any]:\n \"\"\"Wraps a view with a check for whether the user is authenticated.\"\"\"\n\n @functools.wraps(to_be_wrapped)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n user_id = session.get(\"user_id\")\n if user_id is None or User.query.get(user_id) is None:\n if \"user_id\" in session:\n del session[\"user_id\"]\n response = make_response({\"reason\": \"authentication_required\"}, 403)\n response.set_cookie(\"is_authenticated\", max_age=0, expires=0)\n return response\n return to_be_wrapped(*args, **kwargs)\n\n return wrapper\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import numpy as np
import cv2
def optical_flow_from_video():
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi")
# 设置 ShiTomasi 角点检测的参数
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)
# 设置 lucas kanade 光流场的参数
# maxLevel 为使用图像金字塔的层数
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# 产生随机的颜色值
color = np.random.randint(0, 255, (100, 3))
# 获取第一帧,并寻找其中的角点
_, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# 创建一个掩膜为了后面绘制角点的光流轨迹
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算能够获取的角点的新位置
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# 绘制角点的轨迹
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 0xff == ord("q"):
break
# 更新当前帧和当前角点的位置
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera():
cap = cv2.VideoCapture(0)
# 设置 ShiTomasi 角点检测的参数
feature_params = dict(maxCorners=100, qualityLevel=0.3,
minDistance=7, blockSize=7)
# 设置 lucas kanade 光流场的参数
# maxLevel 为使用图像金字塔的层数
lk_params = dict(winSize=(15, 15), maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# 产生随机的颜色值
color = np.random.randint(0, 255, (100, 3))
# 获取第一帧,并寻找其中的角点
_, old_frame = cap.read()
old_frame = cv2.flip(old_frame, 1)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# 创建一个掩膜为了后面绘制角点的光流轨迹
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算能够获取的角点的新位置
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# 绘制角点的轨迹
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 0xff == ord("q"):
break
# 更新当前帧和当前角点的位置
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera_farneback2():
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
ret, frame1 = cap.read()
frame1 = cv2.flip(frame1, 1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.flip(frame2, 1)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback(flip=True, resize=True):
# cap = cv2.VideoCapture('test.mp4')
# cap = cv2.VideoCapture('test2.ts')
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi")
# cap = cv2.VideoCapture(0)
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_and_write_video():
# cap = cv2.VideoCapture('eccv.avi')
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
i += 1
cv2.imwrite("{}/{}.jpg".format("test2", str(i)), result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_farneback_and_write_video():
def crop(frame):
# start_x = 1400
# end_x = start_x + 600
# start_y = 100
# end_y = start_y + 700
start_x = 800
end_x = start_x + 500
start_y = 1500
end_y = start_y + 500
return frame[start_x:end_x, start_y: end_y]
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
ret, frame1 = cap.read()
frame1 = crop(frame1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
i += 1
if i % 2 != 0:
continue
frame2 = crop(frame2)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3,
winsize=7, iterations=3, poly_n=5, poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
cv2.imwrite("{}/{}.jpg".format("test2", str(i // 3)), result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_2(flip=False, resize=True):
# cap = cv2.VideoCapture('test.mp4')
# cap = cv2.VideoCapture('test2.ts')
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi")
# cap = cv2.VideoCapture(0)
width = 800
height = 500
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3, winsize=8,
iterations=5, poly_n=5, poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(100) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
if __name__ == '__main__':
optical_flow_farneback_and_write_video()
pass
|
normal
|
{
"blob_id": "ae0547aa1af2d4dd73bb60154574e64e74107a58",
"index": 4062,
"step-1": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\n<mask token>\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\n<mask token>\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,\n levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,\n flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(100) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera_farneback2():\n cap = cv2.VideoCapture(0)\n cap.set(3, 640)\n cap.set(4, 480)\n ret, frame1 = cap.read()\n frame1 = cv2.flip(frame1, 1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.flip(frame2, 1)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,\n levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,\n flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(100) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera_farneback2():\n cap = cv2.VideoCapture(0)\n cap.set(3, 640)\n cap.set(4, 480)\n ret, frame1 = cap.read()\n frame1 = cv2.flip(frame1, 1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.flip(frame2, 1)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,\n levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,\n flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(100) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\nif __name__ == '__main__':\n optical_flow_farneback_and_write_video()\n pass\n",
"step-5": "import numpy as np\nimport cv2\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\"/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi\")\n\n # 设置 ShiTomasi 角点检测的参数\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)\n # 设置 lucas kanade 光流场的参数\n # maxLevel 为使用图像金字塔的层数\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n # 产生随机的颜色值\n color = np.random.randint(0, 255, (100, 3))\n\n # 获取第一帧,并寻找其中的角点\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n\n # 创建一个掩膜为了后面绘制角点的光流轨迹\n mask = np.zeros_like(old_frame)\n\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 计算能够获取的角点的新位置\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n # Select good points\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n # 绘制角点的轨迹\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n\n img = cv2.add(frame, mask)\n\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 0xff == ord(\"q\"):\n break\n\n # 更新当前帧和当前角点的位置\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n\n pass\n\n cv2.destroyAllWindows()\n cap.release()\n\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n\n # 设置 ShiTomasi 角点检测的参数\n feature_params = dict(maxCorners=100, qualityLevel=0.3,\n minDistance=7, blockSize=7)\n # 设置 lucas kanade 光流场的参数\n # maxLevel 为使用图像金字塔的层数\n lk_params = dict(winSize=(15, 15), maxLevel=2,\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n # 产生随机的颜色值\n color = np.random.randint(0, 255, (100, 3))\n\n # 获取第一帧,并寻找其中的角点\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n\n # 创建一个掩膜为了后面绘制角点的光流轨迹\n mask = np.zeros_like(old_frame)\n\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 计算能够获取的角点的新位置\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n # Select good points\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n # 绘制角点的轨迹\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n\n img = cv2.add(frame, mask)\n\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 0xff == ord(\"q\"):\n break\n\n # 更新当前帧和当前角点的位置\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n\n pass\n\n cv2.destroyAllWindows()\n cap.release()\n\n pass\n\n\ndef optical_flow_from_camera_farneback2():\n cap = cv2.VideoCapture(0)\n\n cap.set(3, 640)\n cap.set(4, 480)\n\n ret, frame1 = cap.read()\n frame1 = cv2.flip(frame1, 1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.flip(frame2, 1)\n except Exception:\n break\n pass\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n\n pass\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n # cap = cv2.VideoCapture('test.mp4')\n # cap = cv2.VideoCapture('test2.ts')\n cap = cv2.VideoCapture(\"/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi\")\n # cap = cv2.VideoCapture(0)\n\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n cv2.imshow('frame2', rgb)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n # cap = cv2.VideoCapture('eccv.avi')\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n\n ret, frame1 = cap.read()\n\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n i = 0\n\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n i += 1\n cv2.imwrite(\"{}/{}.jpg\".format(\"test2\", str(i)), result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n except Exception:\n break\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n # start_x = 1400\n # end_x = start_x + 600\n # start_y = 100\n # end_y = start_y + 700\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y: end_y]\n\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n i = 0\n\n while True:\n try:\n ret, frame2 = cap.read()\n\n i += 1\n if i % 2 != 0:\n continue\n\n frame2 = crop(frame2)\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3,\n winsize=7, iterations=3, poly_n=5, poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n cv2.imwrite(\"{}/{}.jpg\".format(\"test2\", str(i // 3)), result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n except Exception:\n break\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n # cap = cv2.VideoCapture('test.mp4')\n # cap = cv2.VideoCapture('test2.ts')\n cap = cv2.VideoCapture(\"/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi\")\n # cap = cv2.VideoCapture(0)\n\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3, winsize=8,\n iterations=5, poly_n=5, poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n cv2.imshow('frame2', rgb)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n if cv2.waitKey(100) & 0xff == \"q\":\n break\n prvs = next\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n\n pass\n\n\nif __name__ == '__main__':\n optical_flow_farneback_and_write_video()\n pass\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
import media
import fresh_tomatoes
toy_story = media.Movie("Toy Story",
"A story of a boy and his toys that come to life",
'<p><a href="https://en.wikipedia.org/wiki/File:Toy_Story.jpg#/media/File:Toy_Story.jpg"><img src="https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg" alt="The poster features Woody anxiously holding onto Buzz Lightyear as he flies in Andy\'s room. Below them sitting on the bed are Bo Peep, Mr. Potato Head, Troll, Hamm, Slinky, Sarge and Rex. In the lower right center of the image is the film\'s title. The background shows the cloud wallpaper featured in the bedroom."></a><br>By From <a rel="nofollow" class="external text" href="http://www.impawards.com/1995/toy_story_ver1.html">impawards</a>., <a href="https://en.wikipedia.org/w/index.php?curid=26009601">Link</a></p>',
"https://youtu.be/KYz2wyBy3kc")
avatar = media.Movie("Avatar",
"A marine on an alien planet",
'<p><a href="https://en.wikipedia.org/wiki/File:Avatar-Teaser-Poster.jpg#/media/File:Avatar-Teaser-Poster.jpg"><img src="https://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg" alt="Avatar-Teaser-Poster.jpg"></a><br>By Source, <a href="//en.wikipedia.org/wiki/File:Avatar-Teaser-Poster.jpg" title="Fair use of copyrighted material in the context of Avatar (2009 film)">Fair use</a>, <a href="https://en.wikipedia.org/w/index.php?curid=23732044">Link</a></p>',
"https://youtu.be/5PSNL1qE6VY")
# print(avatar.storyline)
# avatar.show_trailer()
movies = [toy_story, avatar]
fresh_tomatoes.open_movies_page(movies)
# print(media.Movie.__doc__)
# print(media.Movie.__name__)
# print(media.Movie.__module__)
|
normal
|
{
"blob_id": "e2f6e6e872f95471ebbc8b25bde08247fe8f7e61",
"index": 8829,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfresh_tomatoes.open_movies_page(movies)\n",
"step-3": "<mask token>\ntoy_story = media.Movie('Toy Story',\n 'A story of a boy and his toys that come to life',\n '<p><a href=\"https://en.wikipedia.org/wiki/File:Toy_Story.jpg#/media/File:Toy_Story.jpg\"><img src=\"https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\" alt=\"The poster features Woody anxiously holding onto Buzz Lightyear as he flies in Andy\\'s room. Below them sitting on the bed are Bo Peep, Mr. Potato Head, Troll, Hamm, Slinky, Sarge and Rex. In the lower right center of the image is the film\\'s title. The background shows the cloud wallpaper featured in the bedroom.\"></a><br>By From <a rel=\"nofollow\" class=\"external text\" href=\"http://www.impawards.com/1995/toy_story_ver1.html\">impawards</a>., <a href=\"https://en.wikipedia.org/w/index.php?curid=26009601\">Link</a></p>'\n , 'https://youtu.be/KYz2wyBy3kc')\navatar = media.Movie('Avatar', 'A marine on an alien planet',\n '<p><a href=\"https://en.wikipedia.org/wiki/File:Avatar-Teaser-Poster.jpg#/media/File:Avatar-Teaser-Poster.jpg\"><img src=\"https://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg\" alt=\"Avatar-Teaser-Poster.jpg\"></a><br>By Source, <a href=\"//en.wikipedia.org/wiki/File:Avatar-Teaser-Poster.jpg\" title=\"Fair use of copyrighted material in the context of Avatar (2009 film)\">Fair use</a>, <a href=\"https://en.wikipedia.org/w/index.php?curid=23732044\">Link</a></p>'\n , 'https://youtu.be/5PSNL1qE6VY')\nmovies = [toy_story, avatar]\nfresh_tomatoes.open_movies_page(movies)\n",
"step-4": "import media\nimport fresh_tomatoes\ntoy_story = media.Movie('Toy Story',\n 'A story of a boy and his toys that come to life',\n '<p><a href=\"https://en.wikipedia.org/wiki/File:Toy_Story.jpg#/media/File:Toy_Story.jpg\"><img src=\"https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\" alt=\"The poster features Woody anxiously holding onto Buzz Lightyear as he flies in Andy\\'s room. Below them sitting on the bed are Bo Peep, Mr. Potato Head, Troll, Hamm, Slinky, Sarge and Rex. In the lower right center of the image is the film\\'s title. The background shows the cloud wallpaper featured in the bedroom.\"></a><br>By From <a rel=\"nofollow\" class=\"external text\" href=\"http://www.impawards.com/1995/toy_story_ver1.html\">impawards</a>., <a href=\"https://en.wikipedia.org/w/index.php?curid=26009601\">Link</a></p>'\n , 'https://youtu.be/KYz2wyBy3kc')\navatar = media.Movie('Avatar', 'A marine on an alien planet',\n '<p><a href=\"https://en.wikipedia.org/wiki/File:Avatar-Teaser-Poster.jpg#/media/File:Avatar-Teaser-Poster.jpg\"><img src=\"https://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg\" alt=\"Avatar-Teaser-Poster.jpg\"></a><br>By Source, <a href=\"//en.wikipedia.org/wiki/File:Avatar-Teaser-Poster.jpg\" title=\"Fair use of copyrighted material in the context of Avatar (2009 film)\">Fair use</a>, <a href=\"https://en.wikipedia.org/w/index.php?curid=23732044\">Link</a></p>'\n , 'https://youtu.be/5PSNL1qE6VY')\nmovies = [toy_story, avatar]\nfresh_tomatoes.open_movies_page(movies)\n",
"step-5": "import media\nimport fresh_tomatoes\n\ntoy_story = media.Movie(\"Toy Story\",\n \"A story of a boy and his toys that come to life\",\n '<p><a href=\"https://en.wikipedia.org/wiki/File:Toy_Story.jpg#/media/File:Toy_Story.jpg\"><img src=\"https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\" alt=\"The poster features Woody anxiously holding onto Buzz Lightyear as he flies in Andy\\'s room. Below them sitting on the bed are Bo Peep, Mr. Potato Head, Troll, Hamm, Slinky, Sarge and Rex. In the lower right center of the image is the film\\'s title. The background shows the cloud wallpaper featured in the bedroom.\"></a><br>By From <a rel=\"nofollow\" class=\"external text\" href=\"http://www.impawards.com/1995/toy_story_ver1.html\">impawards</a>., <a href=\"https://en.wikipedia.org/w/index.php?curid=26009601\">Link</a></p>',\n \"https://youtu.be/KYz2wyBy3kc\")\n\navatar = media.Movie(\"Avatar\",\n \"A marine on an alien planet\",\n '<p><a href=\"https://en.wikipedia.org/wiki/File:Avatar-Teaser-Poster.jpg#/media/File:Avatar-Teaser-Poster.jpg\"><img src=\"https://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg\" alt=\"Avatar-Teaser-Poster.jpg\"></a><br>By Source, <a href=\"//en.wikipedia.org/wiki/File:Avatar-Teaser-Poster.jpg\" title=\"Fair use of copyrighted material in the context of Avatar (2009 film)\">Fair use</a>, <a href=\"https://en.wikipedia.org/w/index.php?curid=23732044\">Link</a></p>',\n \"https://youtu.be/5PSNL1qE6VY\")\n\n# print(avatar.storyline)\n# avatar.show_trailer()\nmovies = [toy_story, avatar]\nfresh_tomatoes.open_movies_page(movies)\n# print(media.Movie.__doc__)\n# print(media.Movie.__name__)\n# print(media.Movie.__module__)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
school = "Old boy"
def chang_name(name):
global school #声明全局变量
school = "Mage Linux"
print("Before change:", name, school)
name = 'Stack Cong'
age = 33
print("After change:", name)
print("School:", school)
name = "Stack"
chang_name(name)
print(name)
|
normal
|
{
"blob_id": "a9531fb020428e573d189c377652692e301ea4d3",
"index": 3026,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef chang_name(name):\n global school\n school = 'Mage Linux'\n print('Before change:', name, school)\n name = 'Stack Cong'\n age = 33\n print('After change:', name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef chang_name(name):\n global school\n school = 'Mage Linux'\n print('Before change:', name, school)\n name = 'Stack Cong'\n age = 33\n print('After change:', name)\n\n\nprint('School:', school)\n<mask token>\nchang_name(name)\nprint(name)\n",
"step-4": "school = 'Old boy'\n\n\ndef chang_name(name):\n global school\n school = 'Mage Linux'\n print('Before change:', name, school)\n name = 'Stack Cong'\n age = 33\n print('After change:', name)\n\n\nprint('School:', school)\nname = 'Stack'\nchang_name(name)\nprint(name)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nschool = \"Old boy\"\n\ndef chang_name(name):\n global school #声明全局变量\n school = \"Mage Linux\"\n print(\"Before change:\", name, school)\n name = 'Stack Cong'\n age = 33\n print(\"After change:\", name)\n\nprint(\"School:\", school)\nname = \"Stack\"\nchang_name(name)\nprint(name)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
import sys
import random
def has_duplicates(list) :
"""Returns True if there are duplicate in list, false otherwise"""
copy = list[:]
copy.sort()
for item in range(len(list)-1):
if copy[item] == copy[item + 1]:
return True;
return False;
def gen_birthdays(n):
"""returns a list of random bdays of length n"""
list = []
for date in range(n):
list.append(random.randint(1, 365))
return list
def num_matches(students, samples):
"""generates sample bdays for number of students and returns count of how many
had matches"""
count = 0
for i in range(samples):
bday_list = gen_birthdays(students)
if has_duplicates(bday_list):
count += 1
return count
num_students = 23;
num_simulations = 10000
count = num_matches(num_students, num_simulations)
print 'Students: %d' % num_students
print 'Simulations: %d' % num_simulations
print 'Matches: %d' % count
|
normal
|
{
"blob_id": "e34e1e220c6d0fe2dc3d42caaefb04b178cdd120",
"index": 3768,
"step-1": "#!/usr/bin/python\nimport sys\nimport random\n\ndef has_duplicates(list) :\n \"\"\"Returns True if there are duplicate in list, false otherwise\"\"\"\n copy = list[:]\n copy.sort()\n for item in range(len(list)-1):\n if copy[item] == copy[item + 1]:\n return True;\n return False;\n\ndef gen_birthdays(n):\n \"\"\"returns a list of random bdays of length n\"\"\"\n list = []\n for date in range(n):\n list.append(random.randint(1, 365))\n return list\n\ndef num_matches(students, samples):\n \"\"\"generates sample bdays for number of students and returns count of how many\n had matches\"\"\"\n count = 0\n for i in range(samples):\n bday_list = gen_birthdays(students)\n if has_duplicates(bday_list):\n count += 1\n return count\n\nnum_students = 23;\nnum_simulations = 10000\ncount = num_matches(num_students, num_simulations)\n\nprint 'Students: %d' % num_students\nprint 'Simulations: %d' % num_simulations\nprint 'Matches: %d' % count\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Head start.
# ask me for this solution: 6cb9ce6024b5fd41aebb86ccd40d8080
# this line is not needed, just for better output:
from pprint import pprint
# just remove the top line
def count_or_add_trigrams(trigram, trigrams_so_far):
'''
Takes a trigram, and a list of previously seen trigrams
and yields the same list with all discovered and counted
trigrams.
Adds given trigram if not found,
increments the trigram counter if found.
'''
for entry in trigrams_so_far:
test_trigram = entry[0]
if test_trigram == trigram:
entry[1] += 1
break
else:
trigrams_so_far.append([trigram, 1])
return trigrams_so_far
test_trigrams = [
['a', 'b', 'c'],
['d', 'e', 'f'],
['b', 'd', 'e'],
['d', 'e', 'f'],
['a', 'a', 'a'],
['d', 'e', 'f']
]
trigram_count = []
for trigram in test_trigrams:
print('I have been given this trigram:', end=' ')
pprint(trigram)
trigram_count = count_or_add_trigrams(trigram, trigram_count)
print('After finishing this operation, my data looks like:')
pprint(trigram_count)
print('-------------------------------------------------------------')
print('After doing all test trigrams, this is what I have:')
pprint(trigram_count)
|
normal
|
{
"blob_id": "753cc532e4d049bacff33c97de4d80bb9ab8ece8",
"index": 2655,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef count_or_add_trigrams(trigram, trigrams_so_far):\n \"\"\"\n Takes a trigram, and a list of previously seen trigrams\n and yields the same list with all discovered and counted\n trigrams.\n Adds given trigram if not found,\n increments the trigram counter if found.\n \"\"\"\n for entry in trigrams_so_far:\n test_trigram = entry[0]\n if test_trigram == trigram:\n entry[1] += 1\n break\n else:\n trigrams_so_far.append([trigram, 1])\n return trigrams_so_far\n\n\n<mask token>\nfor trigram in test_trigrams:\n print('I have been given this trigram:', end=' ')\n pprint(trigram)\n trigram_count = count_or_add_trigrams(trigram, trigram_count)\n print('After finishing this operation, my data looks like:')\n pprint(trigram_count)\n print('-------------------------------------------------------------')\nprint('After doing all test trigrams, this is what I have:')\npprint(trigram_count)\n",
"step-3": "<mask token>\n\n\ndef count_or_add_trigrams(trigram, trigrams_so_far):\n \"\"\"\n Takes a trigram, and a list of previously seen trigrams\n and yields the same list with all discovered and counted\n trigrams.\n Adds given trigram if not found,\n increments the trigram counter if found.\n \"\"\"\n for entry in trigrams_so_far:\n test_trigram = entry[0]\n if test_trigram == trigram:\n entry[1] += 1\n break\n else:\n trigrams_so_far.append([trigram, 1])\n return trigrams_so_far\n\n\ntest_trigrams = [['a', 'b', 'c'], ['d', 'e', 'f'], ['b', 'd', 'e'], ['d',\n 'e', 'f'], ['a', 'a', 'a'], ['d', 'e', 'f']]\ntrigram_count = []\nfor trigram in test_trigrams:\n print('I have been given this trigram:', end=' ')\n pprint(trigram)\n trigram_count = count_or_add_trigrams(trigram, trigram_count)\n print('After finishing this operation, my data looks like:')\n pprint(trigram_count)\n print('-------------------------------------------------------------')\nprint('After doing all test trigrams, this is what I have:')\npprint(trigram_count)\n",
"step-4": "from pprint import pprint\n\n\ndef count_or_add_trigrams(trigram, trigrams_so_far):\n \"\"\"\n Takes a trigram, and a list of previously seen trigrams\n and yields the same list with all discovered and counted\n trigrams.\n Adds given trigram if not found,\n increments the trigram counter if found.\n \"\"\"\n for entry in trigrams_so_far:\n test_trigram = entry[0]\n if test_trigram == trigram:\n entry[1] += 1\n break\n else:\n trigrams_so_far.append([trigram, 1])\n return trigrams_so_far\n\n\ntest_trigrams = [['a', 'b', 'c'], ['d', 'e', 'f'], ['b', 'd', 'e'], ['d',\n 'e', 'f'], ['a', 'a', 'a'], ['d', 'e', 'f']]\ntrigram_count = []\nfor trigram in test_trigrams:\n print('I have been given this trigram:', end=' ')\n pprint(trigram)\n trigram_count = count_or_add_trigrams(trigram, trigram_count)\n print('After finishing this operation, my data looks like:')\n pprint(trigram_count)\n print('-------------------------------------------------------------')\nprint('After doing all test trigrams, this is what I have:')\npprint(trigram_count)\n",
"step-5": "# Head start.\r\n# ask me for this solution: 6cb9ce6024b5fd41aebb86ccd40d8080\r\n\r\n# this line is not needed, just for better output:\r\nfrom pprint import pprint\r\n# just remove the top line\r\n\r\ndef count_or_add_trigrams(trigram, trigrams_so_far):\r\n '''\r\n Takes a trigram, and a list of previously seen trigrams\r\n and yields the same list with all discovered and counted\r\n trigrams.\r\n Adds given trigram if not found,\r\n increments the trigram counter if found.\r\n '''\r\n\r\n for entry in trigrams_so_far:\r\n test_trigram = entry[0]\r\n if test_trigram == trigram:\r\n entry[1] += 1\r\n\r\n break\r\n\r\n else:\r\n trigrams_so_far.append([trigram, 1])\r\n return trigrams_so_far\r\n\r\n\r\ntest_trigrams = [\r\n ['a', 'b', 'c'],\r\n ['d', 'e', 'f'],\r\n ['b', 'd', 'e'],\r\n ['d', 'e', 'f'],\r\n ['a', 'a', 'a'],\r\n ['d', 'e', 'f']\r\n]\r\n\r\ntrigram_count = []\r\nfor trigram in test_trigrams:\r\n print('I have been given this trigram:', end=' ')\r\n pprint(trigram)\r\n trigram_count = count_or_add_trigrams(trigram, trigram_count)\r\n print('After finishing this operation, my data looks like:')\r\n pprint(trigram_count)\r\n print('-------------------------------------------------------------')\r\n\r\nprint('After doing all test trigrams, this is what I have:')\r\npprint(trigram_count)\r\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# coding: utf-8
# # Configuration
# In[1]:
CONNECTION_STRING = "mongodb://localhost:27017"
DATABASE_NAME = "off"
COLLECTION_NAME = "products"
# # MongDB connection
# In[2]:
from pymongo import MongoClient
from bson.code import Code
import plotly, pymongo
plotly.offline.init_notebook_mode()
from plotly.graph_objs import Bar
client = MongoClient(CONNECTION_STRING)
db = client[DATABASE_NAME]
openfood = db[COLLECTION_NAME]
# # Nutrition grade
# In[6]:
mapper = Code("""
function () {
if (typeof this.nutrition_grades !== 'undefined' && this.nutrition_grades !== ""){
emit(this.nutrition_grades, 1);
}
}""")
reducer = Code("""
function (key, values) {
var total = 0;
for (var i = 0; i < values.length; i++) {
total += values[i];
}
return total;
}""")
grades = openfood.inline_map_reduce(mapper, reducer)
print grades
# In[14]:
import numpy as np
import matplotlib.pyplot as plt
objects = [item['_id'] for item in grades] # [a,b,c,d,e]
y_pos = np.arange(len(objects))
count = [item['value'] for item in grades]
plt.bar(y_pos, count, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('Count')
plt.title('Nutrition Grades')
plt.show()
# Each food entry states the countries which the food it is sold. Below, we try to find out the list of countries which the food are sold.
# # Nutrients (100g)
# In[16]:
mapper = Code("""
function () {
if (typeof this.nutriments !== 'undefined' && this.nutriments !== "") {
for (var key in this.nutriments) {
if (key.match(/.*100g/))
emit(key, null);
}
}
}""")
reducer = Code("""
function (key, values) {
return key
}""")
nutriments_100g_fields = openfood.inline_map_reduce(mapper, reducer)
for n in nutriments_100g_fields:
print n
# In[17]:
for n in nutriments_100g_fields:
print n['_id']
# # Additives
# In[24]:
mapper = Code("""
function () {
if (typeof this.additives !== "undefined" && this.additives_n >= 0){
var add = this.additives.substring(3, this.additives.length-3); // remove "^ [ " and " ] $"
var add_str = add.split(" ] [ ");
for (var i = 0; i < add_str.length; i++){
var additive_parts = add_str[i].split(" -> exists -- ");
if (additive_parts.length == 2){
var add_code = additive_parts[0].split(" -> ")[1];
emit(add_code, 1);
}
}
}
}""")
reducer = Code("""
function (key, values) {
var total = 0;
for (var i = 0; i < values.length; i++) {
total += values[i];
}
return total;
}""")
additives_stats = openfood.inline_map_reduce(mapper, reducer)
print additives_stats
# In[29]:
add_clean = [(x['value'], x['_id']) for x in additives_stats]
add_clean.sort()
print len(add_clean)
for add in add_clean:
print "{}: {}".format(add[0], add[1])
|
normal
|
{
"blob_id": "2ecd234753fabbca2829dc86db2f740e371e4ea7",
"index": 6499,
"step-1": "\n# coding: utf-8\n\n# # Configuration\n\n# In[1]:\n\nCONNECTION_STRING = \"mongodb://localhost:27017\"\nDATABASE_NAME = \"off\"\nCOLLECTION_NAME = \"products\"\n\n\n# # MongDB connection\n\n# In[2]:\n\nfrom pymongo import MongoClient\nfrom bson.code import Code\nimport plotly, pymongo\nplotly.offline.init_notebook_mode()\nfrom plotly.graph_objs import Bar\n\nclient = MongoClient(CONNECTION_STRING)\ndb = client[DATABASE_NAME]\nopenfood = db[COLLECTION_NAME]\n\n\n# # Nutrition grade\n\n# In[6]:\n\nmapper = Code(\"\"\"\n function () {\n if (typeof this.nutrition_grades !== 'undefined' && this.nutrition_grades !== \"\"){\n emit(this.nutrition_grades, 1);\n }\n }\"\"\")\nreducer = Code(\"\"\"\n function (key, values) {\n var total = 0;\n for (var i = 0; i < values.length; i++) {\n total += values[i];\n }\n return total;\n }\"\"\")\n\ngrades = openfood.inline_map_reduce(mapper, reducer)\nprint grades\n\n\n# In[14]:\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n \nobjects = [item['_id'] for item in grades] # [a,b,c,d,e]\ny_pos = np.arange(len(objects))\ncount = [item['value'] for item in grades]\n \nplt.bar(y_pos, count, align='center', alpha=0.5)\nplt.xticks(y_pos, objects)\nplt.ylabel('Count')\nplt.title('Nutrition Grades')\n \nplt.show()\n\n\n# Each food entry states the countries which the food it is sold. Below, we try to find out the list of countries which the food are sold.\n\n# # Nutrients (100g)\n\n# In[16]:\n\nmapper = Code(\"\"\"\n function () {\n if (typeof this.nutriments !== 'undefined' && this.nutriments !== \"\") {\n for (var key in this.nutriments) {\n if (key.match(/.*100g/))\n emit(key, null);\n }\n }\n }\"\"\")\nreducer = Code(\"\"\"\n function (key, values) {\n return key\n }\"\"\")\n\nnutriments_100g_fields = openfood.inline_map_reduce(mapper, reducer)\nfor n in nutriments_100g_fields:\n print n\n\n\n# In[17]:\n\nfor n in nutriments_100g_fields:\n print n['_id']\n\n\n# # Additives\n\n# In[24]:\n\nmapper = Code(\"\"\"\n function () {\n if (typeof this.additives !== \"undefined\" && this.additives_n >= 0){\n var add = this.additives.substring(3, this.additives.length-3); // remove \"^ [ \" and \" ] $\"\n var add_str = add.split(\" ] [ \");\n for (var i = 0; i < add_str.length; i++){\n var additive_parts = add_str[i].split(\" -> exists -- \");\n if (additive_parts.length == 2){\n var add_code = additive_parts[0].split(\" -> \")[1];\n emit(add_code, 1);\n }\n }\n }\n }\"\"\")\nreducer = Code(\"\"\"\n function (key, values) {\n var total = 0;\n for (var i = 0; i < values.length; i++) {\n total += values[i];\n }\n return total;\n }\"\"\")\n\nadditives_stats = openfood.inline_map_reduce(mapper, reducer)\nprint additives_stats\n\n\n# In[29]:\n\nadd_clean = [(x['value'], x['_id']) for x in additives_stats]\nadd_clean.sort()\n\nprint len(add_clean)\nfor add in add_clean:\n print \"{}: {}\".format(add[0], add[1])\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
# Const. low-rank version
class xCNNlow(torch.nn.Module):
def __init__(self, channels, filters, kernel_size, padding=0, stride=1, groups=1, rank=1, bias=True):
super(xCNNlow, self).__init__()
self.filters = filters
self.times = 2
self.kernel_size = kernel_size
self.channels = channels//groups
self.padding = padding
self.stride = stride
self.biasTrue = bias
self.rank = rank
self.groups = groups
self.conv_weights = nn.Parameter(torch.Tensor(filters//self.times, channels, kernel_size, kernel_size).to(device))
self.column_weights = nn.Parameter(torch.Tensor(filters-filters//self.times, self.rank).to(device))
self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters//self.times).to(device))
torch.nn.init.xavier_uniform(self.conv_weights)
self.column_weights.data.uniform_(-0.1, 0.1)
self.row_weights.data.uniform_(-0.1, 0.1)
if self.biasTrue:
self.bias = nn.Parameter(torch.Tensor(filters).to(device))
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input):
self.correlated_weights = torch.mm(self.column_weights, torch.mm(self.row_weights,self.conv_weights.reshape(self.filters//self.times,-1)))\
.reshape(self.filters-self.filters//self.times, self.channels, self.kernel_size, self.kernel_size)
if self.biasTrue:
return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\
bias=self.bias, padding=self.padding, stride=self.stride)
else:
return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\
padding=self.padding, stride=self.stride)
#count FLOPs
def count_op_xCNNlow(m, x, y):
x = x[0]
multiply_adds = 1
cin = m.channels
cout = m.filters
kh, kw = m.kernel_size, m.kernel_size
batch_size = x.size()[0]
out_h = y.size(2)
out_w = y.size(3)
# ops per output element
# kernel_mul = kh * kw * cin
# kernel_add = kh * kw * cin - 1
kernel_ops = multiply_adds * kh * kw
bias_ops = 1 if m.biasTrue is True else 0
ops_per_element = kernel_ops + bias_ops
# total ops
# num_out_elements = y.numel()
output_elements = batch_size * out_w * out_h * cout
conv_ops = output_elements * ops_per_element * cin // m.groups
# per output element
total_mul_1 = m.filters//m.times
total_add_1 = total_mul_1 - 1
num_elements_1 = m.rank * (cin * kh * kw) # (m.filters - m.filters//m.times)
total_mul_2 = m.rank
total_add_2 = total_mul_2 - 1
num_elements_2 = (m.filters - m.filters//m.times) * (cin * kh * kw) # (m.filters - m.filters//m.times)
lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 + total_add_2) * num_elements_2
total_ops = lin_ops + conv_ops
print(lin_ops, conv_ops)
m.total_ops = torch.Tensor([int(total_ops)])
|
normal
|
{
"blob_id": "f714c7006f50379cc7508a13d710d902d38d2d1f",
"index": 425,
"step-1": "<mask token>\n\n\nclass xCNNlow(torch.nn.Module):\n\n def __init__(self, channels, filters, kernel_size, padding=0, stride=1,\n groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels // groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,\n channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters - filters //\n self.times, self.rank).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //\n self.times).to(device))\n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass xCNNlow(torch.nn.Module):\n\n def __init__(self, channels, filters, kernel_size, padding=0, stride=1,\n groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels // groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,\n channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters - filters //\n self.times, self.rank).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //\n self.times).to(device))\n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input):\n self.correlated_weights = torch.mm(self.column_weights, torch.mm(\n self.row_weights, self.conv_weights.reshape(self.filters //\n self.times, -1))).reshape(self.filters - self.filters // self.\n times, self.channels, self.kernel_size, self.kernel_size)\n if self.biasTrue:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), bias=self.bias, padding=self.\n padding, stride=self.stride)\n else:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), padding=self.padding, stride=\n self.stride)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass xCNNlow(torch.nn.Module):\n\n def __init__(self, channels, filters, kernel_size, padding=0, stride=1,\n groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels // groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,\n channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters - filters //\n self.times, self.rank).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //\n self.times).to(device))\n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input):\n self.correlated_weights = torch.mm(self.column_weights, torch.mm(\n self.row_weights, self.conv_weights.reshape(self.filters //\n self.times, -1))).reshape(self.filters - self.filters // self.\n times, self.channels, self.kernel_size, self.kernel_size)\n if self.biasTrue:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), bias=self.bias, padding=self.\n padding, stride=self.stride)\n else:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), padding=self.padding, stride=\n self.stride)\n\n\ndef count_op_xCNNlow(m, x, y):\n x = x[0]\n multiply_adds = 1\n cin = m.channels\n cout = m.filters\n kh, kw = m.kernel_size, m.kernel_size\n batch_size = x.size()[0]\n out_h = y.size(2)\n out_w = y.size(3)\n kernel_ops = multiply_adds * kh * kw\n bias_ops = 1 if m.biasTrue is True else 0\n ops_per_element = kernel_ops + bias_ops\n output_elements = batch_size * out_w * out_h * cout\n conv_ops = output_elements * ops_per_element * cin // m.groups\n total_mul_1 = m.filters // m.times\n total_add_1 = total_mul_1 - 1\n num_elements_1 = m.rank * (cin * kh * kw)\n total_mul_2 = m.rank\n total_add_2 = total_mul_2 - 1\n num_elements_2 = (m.filters - m.filters // m.times) * (cin * kh * kw)\n lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 +\n total_add_2) * num_elements_2\n total_ops = lin_ops + conv_ops\n print(lin_ops, conv_ops)\n m.total_ops = torch.Tensor([int(total_ops)])\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass xCNNlow(torch.nn.Module):\n\n def __init__(self, channels, filters, kernel_size, padding=0, stride=1,\n groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels // groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,\n channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters - filters //\n self.times, self.rank).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //\n self.times).to(device))\n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input):\n self.correlated_weights = torch.mm(self.column_weights, torch.mm(\n self.row_weights, self.conv_weights.reshape(self.filters //\n self.times, -1))).reshape(self.filters - self.filters // self.\n times, self.channels, self.kernel_size, self.kernel_size)\n if self.biasTrue:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), bias=self.bias, padding=self.\n padding, stride=self.stride)\n else:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), padding=self.padding, stride=\n self.stride)\n\n\ndef count_op_xCNNlow(m, x, y):\n x = x[0]\n multiply_adds = 1\n cin = m.channels\n cout = m.filters\n kh, kw = m.kernel_size, m.kernel_size\n batch_size = x.size()[0]\n out_h = y.size(2)\n out_w = y.size(3)\n kernel_ops = multiply_adds * kh * kw\n bias_ops = 1 if m.biasTrue is True else 0\n ops_per_element = kernel_ops + bias_ops\n output_elements = batch_size * out_w * out_h * cout\n conv_ops = output_elements * ops_per_element * cin // m.groups\n total_mul_1 = m.filters // m.times\n total_add_1 = total_mul_1 - 1\n num_elements_1 = m.rank * (cin * kh * kw)\n total_mul_2 = m.rank\n total_add_2 = total_mul_2 - 1\n num_elements_2 = (m.filters - m.filters // m.times) * (cin * kh * kw)\n lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 +\n total_add_2) * num_elements_2\n total_ops = lin_ops + conv_ops\n print(lin_ops, conv_ops)\n m.total_ops = torch.Tensor([int(total_ops)])\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Const. low-rank version\nclass xCNNlow(torch.nn.Module):\n def __init__(self, channels, filters, kernel_size, padding=0, stride=1, groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels//groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n\n self.conv_weights = nn.Parameter(torch.Tensor(filters//self.times, channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters-filters//self.times, self.rank).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters//self.times).to(device))\n \n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n \n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input): \n self.correlated_weights = torch.mm(self.column_weights, torch.mm(self.row_weights,self.conv_weights.reshape(self.filters//self.times,-1)))\\\n .reshape(self.filters-self.filters//self.times, self.channels, self.kernel_size, self.kernel_size) \n if self.biasTrue:\n return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\\\n bias=self.bias, padding=self.padding, stride=self.stride)\n else:\n return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\\\n padding=self.padding, stride=self.stride)\n\n\n#count FLOPs\ndef count_op_xCNNlow(m, x, y):\n x = x[0]\n\n multiply_adds = 1\n\n cin = m.channels\n cout = m.filters\n kh, kw = m.kernel_size, m.kernel_size\n batch_size = x.size()[0]\n\n out_h = y.size(2)\n out_w = y.size(3)\n\n # ops per output element\n # kernel_mul = kh * kw * cin\n # kernel_add = kh * kw * cin - 1\n kernel_ops = multiply_adds * kh * kw\n bias_ops = 1 if m.biasTrue is True else 0\n ops_per_element = kernel_ops + bias_ops\n\n # total ops\n # num_out_elements = y.numel()\n output_elements = batch_size * out_w * out_h * cout\n conv_ops = output_elements * ops_per_element * cin // m.groups\n\n # per output element\n total_mul_1 = m.filters//m.times\n total_add_1 = total_mul_1 - 1\n num_elements_1 = m.rank * (cin * kh * kw) # (m.filters - m.filters//m.times)\n total_mul_2 = m.rank\n total_add_2 = total_mul_2 - 1\n num_elements_2 = (m.filters - m.filters//m.times) * (cin * kh * kw) # (m.filters - m.filters//m.times)\n lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 + total_add_2) * num_elements_2\n total_ops = lin_ops + conv_ops\n print(lin_ops, conv_ops)\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import numpy as np
def weight_init(layers):
for layer in layers:
if isinstance(layer, nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
elif isinstance(layer, nn.Linear):
n = layer.in_features
y = 1.0 / np.sqrt(n)
layer.weight.data.uniform_(-y, y)
layer.bias.data.fill_(0)
# nn.init.kaiming_normal_(layer.weight.data, nonlinearity='relu')
# 传统的预测点击率模型
class LR(nn.Module):
def __init__(self,
feature_nums,
output_dim = 1):
super(LR, self).__init__()
self.linear = nn.Linear(feature_nums, output_dim)
self.bias = nn.Parameter(torch.zeros((output_dim,)))
def forward(self, x):
"""
:param x: Int tensor of size (batch_size, feature_nums, latent_nums)
:return: pctrs
"""
out = self.bias + torch.sum(self.linear(x), dim=1)
return out.unsqueeze(1)
class RNN(nn.Module):
def __init__(self,
feature_nums,
hidden_dims,
bi_lstm,
out_dims=1):
super(RNN, self).__init__()
self.feature_nums = feature_nums # 输入数据特征维度
self.hidden_dims = hidden_dims # 隐藏层维度
self.bi_lism = bi_lstm # LSTM串联数量
self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)
self.out = nn.Linear(self.hidden_dims, out_dims)
def forward(self,x):
x1, _ = self.lstm(x)
a, b, c = x1.shape
out = self.out(x1.view(-1, c))
out1 = out.view(a, b, -1)
return out1
class MLP(nn.Module):
def __init__(self,
feature_nums,
neuron_nums,
dropout_rate,
output_dim=1):
super(MLP, self).__init__()
self.feature_nums = feature_nums
self.neuron_nums = neuron_nums
self.dropout_rate = dropout_rate
deep_input_dims = self.feature_nums
layers = list()
neuron_nums = self.neuron_nums
for neuron_num in neuron_nums:
layers.append(nn.Linear(deep_input_dims, neuron_num))
# layers.append(nn.BatchNorm1d(neuron_num))
layers.append(nn.ReLU())
layers.append(nn.Dropout(p=0.2))
deep_input_dims = neuron_num
weight_init(layers)
layers.append(nn.Linear(deep_input_dims, output_dim))
self.mlp = nn.Sequential(*layers)
def forward(self, x):
"""
:param x: Int tensor of size (batch_size, feature_nums, latent_nums)
:return: pctrs
"""
out = self.mlp(x)
return out
|
normal
|
{
"blob_id": "2c2b075f9ea9e8d6559e44ad09d3e7767c48205e",
"index": 6772,
"step-1": "<mask token>\n\n\nclass LR(nn.Module):\n <mask token>\n <mask token>\n\n\nclass RNN(nn.Module):\n\n def __init__(self, feature_nums, hidden_dims, bi_lstm, out_dims=1):\n super(RNN, self).__init__()\n self.feature_nums = feature_nums\n self.hidden_dims = hidden_dims\n self.bi_lism = bi_lstm\n self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)\n self.out = nn.Linear(self.hidden_dims, out_dims)\n\n def forward(self, x):\n x1, _ = self.lstm(x)\n a, b, c = x1.shape\n out = self.out(x1.view(-1, c))\n out1 = out.view(a, b, -1)\n return out1\n\n\nclass MLP(nn.Module):\n\n def __init__(self, feature_nums, neuron_nums, dropout_rate, output_dim=1):\n super(MLP, self).__init__()\n self.feature_nums = feature_nums\n self.neuron_nums = neuron_nums\n self.dropout_rate = dropout_rate\n deep_input_dims = self.feature_nums\n layers = list()\n neuron_nums = self.neuron_nums\n for neuron_num in neuron_nums:\n layers.append(nn.Linear(deep_input_dims, neuron_num))\n layers.append(nn.ReLU())\n layers.append(nn.Dropout(p=0.2))\n deep_input_dims = neuron_num\n weight_init(layers)\n layers.append(nn.Linear(deep_input_dims, output_dim))\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.mlp(x)\n return out\n",
"step-2": "<mask token>\n\n\nclass LR(nn.Module):\n\n def __init__(self, feature_nums, output_dim=1):\n super(LR, self).__init__()\n self.linear = nn.Linear(feature_nums, output_dim)\n self.bias = nn.Parameter(torch.zeros((output_dim,)))\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.bias + torch.sum(self.linear(x), dim=1)\n return out.unsqueeze(1)\n\n\nclass RNN(nn.Module):\n\n def __init__(self, feature_nums, hidden_dims, bi_lstm, out_dims=1):\n super(RNN, self).__init__()\n self.feature_nums = feature_nums\n self.hidden_dims = hidden_dims\n self.bi_lism = bi_lstm\n self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)\n self.out = nn.Linear(self.hidden_dims, out_dims)\n\n def forward(self, x):\n x1, _ = self.lstm(x)\n a, b, c = x1.shape\n out = self.out(x1.view(-1, c))\n out1 = out.view(a, b, -1)\n return out1\n\n\nclass MLP(nn.Module):\n\n def __init__(self, feature_nums, neuron_nums, dropout_rate, output_dim=1):\n super(MLP, self).__init__()\n self.feature_nums = feature_nums\n self.neuron_nums = neuron_nums\n self.dropout_rate = dropout_rate\n deep_input_dims = self.feature_nums\n layers = list()\n neuron_nums = self.neuron_nums\n for neuron_num in neuron_nums:\n layers.append(nn.Linear(deep_input_dims, neuron_num))\n layers.append(nn.ReLU())\n layers.append(nn.Dropout(p=0.2))\n deep_input_dims = neuron_num\n weight_init(layers)\n layers.append(nn.Linear(deep_input_dims, output_dim))\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.mlp(x)\n return out\n",
"step-3": "<mask token>\n\n\ndef weight_init(layers):\n for layer in layers:\n if isinstance(layer, nn.BatchNorm1d):\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n elif isinstance(layer, nn.Linear):\n n = layer.in_features\n y = 1.0 / np.sqrt(n)\n layer.weight.data.uniform_(-y, y)\n layer.bias.data.fill_(0)\n\n\nclass LR(nn.Module):\n\n def __init__(self, feature_nums, output_dim=1):\n super(LR, self).__init__()\n self.linear = nn.Linear(feature_nums, output_dim)\n self.bias = nn.Parameter(torch.zeros((output_dim,)))\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.bias + torch.sum(self.linear(x), dim=1)\n return out.unsqueeze(1)\n\n\nclass RNN(nn.Module):\n\n def __init__(self, feature_nums, hidden_dims, bi_lstm, out_dims=1):\n super(RNN, self).__init__()\n self.feature_nums = feature_nums\n self.hidden_dims = hidden_dims\n self.bi_lism = bi_lstm\n self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)\n self.out = nn.Linear(self.hidden_dims, out_dims)\n\n def forward(self, x):\n x1, _ = self.lstm(x)\n a, b, c = x1.shape\n out = self.out(x1.view(-1, c))\n out1 = out.view(a, b, -1)\n return out1\n\n\nclass MLP(nn.Module):\n\n def __init__(self, feature_nums, neuron_nums, dropout_rate, output_dim=1):\n super(MLP, self).__init__()\n self.feature_nums = feature_nums\n self.neuron_nums = neuron_nums\n self.dropout_rate = dropout_rate\n deep_input_dims = self.feature_nums\n layers = list()\n neuron_nums = self.neuron_nums\n for neuron_num in neuron_nums:\n layers.append(nn.Linear(deep_input_dims, neuron_num))\n layers.append(nn.ReLU())\n layers.append(nn.Dropout(p=0.2))\n deep_input_dims = neuron_num\n weight_init(layers)\n layers.append(nn.Linear(deep_input_dims, output_dim))\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.mlp(x)\n return out\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\nimport numpy as np\n\n\ndef weight_init(layers):\n for layer in layers:\n if isinstance(layer, nn.BatchNorm1d):\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n elif isinstance(layer, nn.Linear):\n n = layer.in_features\n y = 1.0 / np.sqrt(n)\n layer.weight.data.uniform_(-y, y)\n layer.bias.data.fill_(0)\n\n\nclass LR(nn.Module):\n\n def __init__(self, feature_nums, output_dim=1):\n super(LR, self).__init__()\n self.linear = nn.Linear(feature_nums, output_dim)\n self.bias = nn.Parameter(torch.zeros((output_dim,)))\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.bias + torch.sum(self.linear(x), dim=1)\n return out.unsqueeze(1)\n\n\nclass RNN(nn.Module):\n\n def __init__(self, feature_nums, hidden_dims, bi_lstm, out_dims=1):\n super(RNN, self).__init__()\n self.feature_nums = feature_nums\n self.hidden_dims = hidden_dims\n self.bi_lism = bi_lstm\n self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)\n self.out = nn.Linear(self.hidden_dims, out_dims)\n\n def forward(self, x):\n x1, _ = self.lstm(x)\n a, b, c = x1.shape\n out = self.out(x1.view(-1, c))\n out1 = out.view(a, b, -1)\n return out1\n\n\nclass MLP(nn.Module):\n\n def __init__(self, feature_nums, neuron_nums, dropout_rate, output_dim=1):\n super(MLP, self).__init__()\n self.feature_nums = feature_nums\n self.neuron_nums = neuron_nums\n self.dropout_rate = dropout_rate\n deep_input_dims = self.feature_nums\n layers = list()\n neuron_nums = self.neuron_nums\n for neuron_num in neuron_nums:\n layers.append(nn.Linear(deep_input_dims, neuron_num))\n layers.append(nn.ReLU())\n layers.append(nn.Dropout(p=0.2))\n deep_input_dims = neuron_num\n weight_init(layers)\n layers.append(nn.Linear(deep_input_dims, output_dim))\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.mlp(x)\n return out\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\n\nimport numpy as np\n\ndef weight_init(layers):\n for layer in layers:\n if isinstance(layer, nn.BatchNorm1d):\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n elif isinstance(layer, nn.Linear):\n n = layer.in_features\n y = 1.0 / np.sqrt(n)\n layer.weight.data.uniform_(-y, y)\n layer.bias.data.fill_(0)\n # nn.init.kaiming_normal_(layer.weight.data, nonlinearity='relu')\n\n# 传统的预测点击率模型\nclass LR(nn.Module):\n def __init__(self,\n feature_nums,\n output_dim = 1):\n super(LR, self).__init__()\n self.linear = nn.Linear(feature_nums, output_dim)\n\n self.bias = nn.Parameter(torch.zeros((output_dim,)))\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.bias + torch.sum(self.linear(x), dim=1)\n\n return out.unsqueeze(1)\n\n\nclass RNN(nn.Module):\n def __init__(self,\n feature_nums,\n hidden_dims,\n bi_lstm,\n out_dims=1):\n super(RNN, self).__init__()\n self.feature_nums = feature_nums # 输入数据特征维度\n self.hidden_dims = hidden_dims # 隐藏层维度\n self.bi_lism = bi_lstm # LSTM串联数量\n\n self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)\n self.out = nn.Linear(self.hidden_dims, out_dims)\n\n def forward(self,x):\n x1, _ = self.lstm(x)\n a, b, c = x1.shape\n out = self.out(x1.view(-1, c))\n out1 = out.view(a, b, -1)\n\n return out1\n\nclass MLP(nn.Module):\n def __init__(self,\n feature_nums,\n neuron_nums,\n dropout_rate,\n output_dim=1):\n super(MLP, self).__init__()\n self.feature_nums = feature_nums\n self.neuron_nums = neuron_nums\n self.dropout_rate = dropout_rate\n\n deep_input_dims = self.feature_nums\n\n layers = list()\n\n neuron_nums = self.neuron_nums\n for neuron_num in neuron_nums:\n layers.append(nn.Linear(deep_input_dims, neuron_num))\n # layers.append(nn.BatchNorm1d(neuron_num))\n layers.append(nn.ReLU())\n layers.append(nn.Dropout(p=0.2))\n deep_input_dims = neuron_num\n\n weight_init(layers)\n\n layers.append(nn.Linear(deep_input_dims, output_dim))\n\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.mlp(x)\n\n return out",
"step-ids": [
7,
9,
10,
11,
12
]
}
|
[
7,
9,
10,
11,
12
] |
# _*_ coding: utf-8 _*_
from service import service_logger
from service.TaskService import TaskService
class ApiException(Exception):
def __init__(self, message, code=400, data=None):
Exception.__init__(self, message)
self.code = code
self.msg = message
self.data = data
def __str__(self):
return self.msg
def to_dict(self):
res = dict(self.data or ())
res['msg'] = self.msg
res['code'] = self.code
return res
def error_handle(msg='', data=None):
service_logger.error(data={"msg": msg, "data": data})
raise ApiException(msg)
|
normal
|
{
"blob_id": "0ac14b023c51bfd1cf99bd2d991baa30a671e066",
"index": 9994,
"step-1": "<mask token>\n\n\nclass ApiException(Exception):\n\n def __init__(self, message, code=400, data=None):\n Exception.__init__(self, message)\n self.code = code\n self.msg = message\n self.data = data\n\n def __str__(self):\n return self.msg\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ApiException(Exception):\n\n def __init__(self, message, code=400, data=None):\n Exception.__init__(self, message)\n self.code = code\n self.msg = message\n self.data = data\n\n def __str__(self):\n return self.msg\n\n def to_dict(self):\n res = dict(self.data or ())\n res['msg'] = self.msg\n res['code'] = self.code\n return res\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ApiException(Exception):\n\n def __init__(self, message, code=400, data=None):\n Exception.__init__(self, message)\n self.code = code\n self.msg = message\n self.data = data\n\n def __str__(self):\n return self.msg\n\n def to_dict(self):\n res = dict(self.data or ())\n res['msg'] = self.msg\n res['code'] = self.code\n return res\n\n\ndef error_handle(msg='', data=None):\n service_logger.error(data={'msg': msg, 'data': data})\n raise ApiException(msg)\n",
"step-4": "from service import service_logger\nfrom service.TaskService import TaskService\n\n\nclass ApiException(Exception):\n\n def __init__(self, message, code=400, data=None):\n Exception.__init__(self, message)\n self.code = code\n self.msg = message\n self.data = data\n\n def __str__(self):\n return self.msg\n\n def to_dict(self):\n res = dict(self.data or ())\n res['msg'] = self.msg\n res['code'] = self.code\n return res\n\n\ndef error_handle(msg='', data=None):\n service_logger.error(data={'msg': msg, 'data': data})\n raise ApiException(msg)\n",
"step-5": "# _*_ coding: utf-8 _*_\r\nfrom service import service_logger\r\nfrom service.TaskService import TaskService\r\n\r\nclass ApiException(Exception):\r\n\r\n def __init__(self, message, code=400, data=None):\r\n Exception.__init__(self, message)\r\n\r\n self.code = code\r\n self.msg = message\r\n self.data = data\r\n\r\n def __str__(self):\r\n return self.msg\r\n\r\n def to_dict(self):\r\n res = dict(self.data or ())\r\n res['msg'] = self.msg\r\n res['code'] = self.code\r\n\r\n return res\r\n\r\n\r\ndef error_handle(msg='', data=None):\r\n service_logger.error(data={\"msg\": msg, \"data\": data})\r\n raise ApiException(msg)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import unittest
import numpy
import set_solver
class TestSets(unittest.TestCase):
def test_is_set(self):
"""Test set validator (Exercise 3a)."""
cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,
2], [0, 1, 2, 2, 2]])
self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))
self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))
self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))
def test_find_sets(self):
"""Test solver (Exercise 3b)."""
cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,
2], [0, 1, 2, 2, 2]])
set_indices = set_solver.find_sets(cards)
self.assertEqual(len(set_indices), 2)
self.assertTrue((0, 1, 2) in set_indices)
self.assertTrue((2, 3, 4) in set_indices)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "6065fae2a11f6b525ef10346e297505ec9d4e9d5",
"index": 8550,
"step-1": "<mask token>\n\n\nclass TestSets(unittest.TestCase):\n\n def test_is_set(self):\n \"\"\"Test set validator (Exercise 3a).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))\n self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))\n self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestSets(unittest.TestCase):\n\n def test_is_set(self):\n \"\"\"Test set validator (Exercise 3a).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))\n self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))\n self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))\n\n def test_find_sets(self):\n \"\"\"Test solver (Exercise 3b).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n set_indices = set_solver.find_sets(cards)\n self.assertEqual(len(set_indices), 2)\n self.assertTrue((0, 1, 2) in set_indices)\n self.assertTrue((2, 3, 4) in set_indices)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestSets(unittest.TestCase):\n\n def test_is_set(self):\n \"\"\"Test set validator (Exercise 3a).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))\n self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))\n self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))\n\n def test_find_sets(self):\n \"\"\"Test solver (Exercise 3b).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n set_indices = set_solver.find_sets(cards)\n self.assertEqual(len(set_indices), 2)\n self.assertTrue((0, 1, 2) in set_indices)\n self.assertTrue((2, 3, 4) in set_indices)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport numpy\nimport set_solver\n\n\nclass TestSets(unittest.TestCase):\n\n def test_is_set(self):\n \"\"\"Test set validator (Exercise 3a).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))\n self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))\n self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))\n\n def test_find_sets(self):\n \"\"\"Test solver (Exercise 3b).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n set_indices = set_solver.find_sets(cards)\n self.assertEqual(len(set_indices), 2)\n self.assertTrue((0, 1, 2) in set_indices)\n self.assertTrue((2, 3, 4) in set_indices)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# Generated by Django 3.1.6 on 2021-04-22 07:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_project_is_featured'),
]
operations = [
migrations.AlterField(
model_name='project',
name='pin_id',
field=models.CharField(max_length=20, null=True, unique=True),
),
]
|
normal
|
{
"blob_id": "24ed29dfaaf7ce508b2d80740bad1304b291c596",
"index": 8466,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('projects', '0004_project_is_featured')]\n operations = [migrations.AlterField(model_name='project', name='pin_id',\n field=models.CharField(max_length=20, null=True, unique=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('projects', '0004_project_is_featured')]\n operations = [migrations.AlterField(model_name='project', name='pin_id',\n field=models.CharField(max_length=20, null=True, unique=True))]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-04-22 07:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('projects', '0004_project_is_featured'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='project',\n name='pin_id',\n field=models.CharField(max_length=20, null=True, unique=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelBinarizer
from ann.act import relu, softmax_with_xentropy
from ann.loss import xentropy_with_softmax
from ann.opt import SGD, RMSprop, Adam, SGDM
from ann.sklearn import NetworkClassifier, FC
# set seeds
random.seed(42)
np.random.seed(42)
# prepare data
mnist = fetch_mldata('MNIST original')
x = mnist.data
y = LabelBinarizer().fit_transform(mnist.target.astype(int))
x_train, x_dev, y_train, y_dev = train_test_split(x, y, test_size=1000, stratify=y)
# normalize input
scaler = StandardScaler(copy=False)
x_train = scaler.fit_transform(x_train)
x_dev = scaler.transform(x_dev)
# define configurations
configs = []
net1 = NetworkClassifier(layers=[
FC(n_in=x.shape[1], n_out=256, act=relu),
FC(n_in=256, n_out=10, act=softmax_with_xentropy)
])
opt1 = SGD(loss=xentropy_with_softmax, lr=0.001, batch_size=64)
configs.append(("SGD", net1, opt1))
net2 = net1.clone()
opt2 = SGDM(loss=xentropy_with_softmax, lr=0.001, batch_size=64)
configs.append(("SGDM", net2, opt2))
net3 = net1.clone()
opt3 = RMSprop(loss=xentropy_with_softmax, lr=1e-8, batch_size=64)
configs.append(("RMSprop", net3, opt3))
net4 = net1.clone()
opt4 = Adam(loss=xentropy_with_softmax, lr=1e-8, batch_size=64)
configs.append(("Adam", net4, opt4))
# --- you can add other configurations here ---
# define training procedure
epochs = 10
early_stop_patience = 200
# train networks
results = []
for _, net, opt in configs:
res = opt.optimize(net, x_train, y_train, epochs, x_dev=x_dev, y_dev=y_dev, track_loss=True,
early_stop_pat=early_stop_patience, verbose=1)
results.append(res)
def plot(ax, ls_batch, ls_dev, its, title):
ax.plot(range(len(ls_batch)), ls_batch, label="Batch")
ax.plot(range(len(ls_dev)), ls_dev, label="Dev")
ax.text(0.3, 0.93, "Batch: {:.3f}".format(ls_batch[-1]), transform=ax.transAxes)
ax.text(0.3, 0.86, "Dev: {:.3f}".format(ls_dev[-1]), transform=ax.transAxes)
ax.text(0.3, 0.79, "Its: {}".format(its), transform=ax.transAxes)
ax.set_xlabel("Iterations")
ax.set_ylabel("Loss")
ax.set_title(title)
ax.legend(loc="upper right")
# plot results
rows = np.sqrt(len(configs)).astype(np.int)
cols = np.ceil(len(configs) / rows).astype(np.int)
plt.figure(figsize=(4 * cols, 4 * rows))
last_ax = None
for i, ((title, net, opt), (ls_batch, ls_dev, its)) in enumerate(zip(configs, results)):
ax = plt.subplot(rows, cols, i + 1, sharex=last_ax, sharey=last_ax)
if len(ls_batch) > 0:
plot(ax, ls_batch, ls_dev, its, title)
else:
print("Warning: Config {} did not return any results".format(title))
last_ax = ax
plt.tight_layout()
plt.show()
|
normal
|
{
"blob_id": "2f6e5ed4e2d52190551dec2ac18441b8355699b5",
"index": 7096,
"step-1": "<mask token>\n\n\ndef plot(ax, ls_batch, ls_dev, its, title):\n ax.plot(range(len(ls_batch)), ls_batch, label='Batch')\n ax.plot(range(len(ls_dev)), ls_dev, label='Dev')\n ax.text(0.3, 0.93, 'Batch: {:.3f}'.format(ls_batch[-1]), transform=ax.\n transAxes)\n ax.text(0.3, 0.86, 'Dev: {:.3f}'.format(ls_dev[-1]), transform=ax.transAxes\n )\n ax.text(0.3, 0.79, 'Its: {}'.format(its), transform=ax.transAxes)\n ax.set_xlabel('Iterations')\n ax.set_ylabel('Loss')\n ax.set_title(title)\n ax.legend(loc='upper right')\n\n\n<mask token>\n",
"step-2": "<mask token>\nrandom.seed(42)\nnp.random.seed(42)\n<mask token>\nconfigs.append(('SGD', net1, opt1))\n<mask token>\nconfigs.append(('SGDM', net2, opt2))\n<mask token>\nconfigs.append(('RMSprop', net3, opt3))\n<mask token>\nconfigs.append(('Adam', net4, opt4))\n<mask token>\nfor _, net, opt in configs:\n res = opt.optimize(net, x_train, y_train, epochs, x_dev=x_dev, y_dev=\n y_dev, track_loss=True, early_stop_pat=early_stop_patience, verbose=1)\n results.append(res)\n\n\ndef plot(ax, ls_batch, ls_dev, its, title):\n ax.plot(range(len(ls_batch)), ls_batch, label='Batch')\n ax.plot(range(len(ls_dev)), ls_dev, label='Dev')\n ax.text(0.3, 0.93, 'Batch: {:.3f}'.format(ls_batch[-1]), transform=ax.\n transAxes)\n ax.text(0.3, 0.86, 'Dev: {:.3f}'.format(ls_dev[-1]), transform=ax.transAxes\n )\n ax.text(0.3, 0.79, 'Its: {}'.format(its), transform=ax.transAxes)\n ax.set_xlabel('Iterations')\n ax.set_ylabel('Loss')\n ax.set_title(title)\n ax.legend(loc='upper right')\n\n\n<mask token>\nplt.figure(figsize=(4 * cols, 4 * rows))\n<mask token>\nfor i, ((title, net, opt), (ls_batch, ls_dev, its)) in enumerate(zip(\n configs, results)):\n ax = plt.subplot(rows, cols, i + 1, sharex=last_ax, sharey=last_ax)\n if len(ls_batch) > 0:\n plot(ax, ls_batch, ls_dev, its, title)\n else:\n print('Warning: Config {} did not return any results'.format(title))\n last_ax = ax\nplt.tight_layout()\nplt.show()\n",
"step-3": "<mask token>\nrandom.seed(42)\nnp.random.seed(42)\nmnist = fetch_mldata('MNIST original')\nx = mnist.data\ny = LabelBinarizer().fit_transform(mnist.target.astype(int))\nx_train, x_dev, y_train, y_dev = train_test_split(x, y, test_size=1000,\n stratify=y)\nscaler = StandardScaler(copy=False)\nx_train = scaler.fit_transform(x_train)\nx_dev = scaler.transform(x_dev)\nconfigs = []\nnet1 = NetworkClassifier(layers=[FC(n_in=x.shape[1], n_out=256, act=relu),\n FC(n_in=256, n_out=10, act=softmax_with_xentropy)])\nopt1 = SGD(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append(('SGD', net1, opt1))\nnet2 = net1.clone()\nopt2 = SGDM(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append(('SGDM', net2, opt2))\nnet3 = net1.clone()\nopt3 = RMSprop(loss=xentropy_with_softmax, lr=1e-08, batch_size=64)\nconfigs.append(('RMSprop', net3, opt3))\nnet4 = net1.clone()\nopt4 = Adam(loss=xentropy_with_softmax, lr=1e-08, batch_size=64)\nconfigs.append(('Adam', net4, opt4))\nepochs = 10\nearly_stop_patience = 200\nresults = []\nfor _, net, opt in configs:\n res = opt.optimize(net, x_train, y_train, epochs, x_dev=x_dev, y_dev=\n y_dev, track_loss=True, early_stop_pat=early_stop_patience, verbose=1)\n results.append(res)\n\n\ndef plot(ax, ls_batch, ls_dev, its, title):\n ax.plot(range(len(ls_batch)), ls_batch, label='Batch')\n ax.plot(range(len(ls_dev)), ls_dev, label='Dev')\n ax.text(0.3, 0.93, 'Batch: {:.3f}'.format(ls_batch[-1]), transform=ax.\n transAxes)\n ax.text(0.3, 0.86, 'Dev: {:.3f}'.format(ls_dev[-1]), transform=ax.transAxes\n )\n ax.text(0.3, 0.79, 'Its: {}'.format(its), transform=ax.transAxes)\n ax.set_xlabel('Iterations')\n ax.set_ylabel('Loss')\n ax.set_title(title)\n ax.legend(loc='upper right')\n\n\nrows = np.sqrt(len(configs)).astype(np.int)\ncols = np.ceil(len(configs) / rows).astype(np.int)\nplt.figure(figsize=(4 * cols, 4 * rows))\nlast_ax = None\nfor i, ((title, net, opt), (ls_batch, ls_dev, its)) in enumerate(zip(\n configs, results)):\n ax = plt.subplot(rows, cols, i + 1, sharex=last_ax, sharey=last_ax)\n if len(ls_batch) > 0:\n plot(ax, ls_batch, ls_dev, its, title)\n else:\n print('Warning: Config {} did not return any results'.format(title))\n last_ax = ax\nplt.tight_layout()\nplt.show()\n",
"step-4": "import random\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.datasets import fetch_mldata\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, LabelBinarizer\nfrom ann.act import relu, softmax_with_xentropy\nfrom ann.loss import xentropy_with_softmax\nfrom ann.opt import SGD, RMSprop, Adam, SGDM\nfrom ann.sklearn import NetworkClassifier, FC\nrandom.seed(42)\nnp.random.seed(42)\nmnist = fetch_mldata('MNIST original')\nx = mnist.data\ny = LabelBinarizer().fit_transform(mnist.target.astype(int))\nx_train, x_dev, y_train, y_dev = train_test_split(x, y, test_size=1000,\n stratify=y)\nscaler = StandardScaler(copy=False)\nx_train = scaler.fit_transform(x_train)\nx_dev = scaler.transform(x_dev)\nconfigs = []\nnet1 = NetworkClassifier(layers=[FC(n_in=x.shape[1], n_out=256, act=relu),\n FC(n_in=256, n_out=10, act=softmax_with_xentropy)])\nopt1 = SGD(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append(('SGD', net1, opt1))\nnet2 = net1.clone()\nopt2 = SGDM(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append(('SGDM', net2, opt2))\nnet3 = net1.clone()\nopt3 = RMSprop(loss=xentropy_with_softmax, lr=1e-08, batch_size=64)\nconfigs.append(('RMSprop', net3, opt3))\nnet4 = net1.clone()\nopt4 = Adam(loss=xentropy_with_softmax, lr=1e-08, batch_size=64)\nconfigs.append(('Adam', net4, opt4))\nepochs = 10\nearly_stop_patience = 200\nresults = []\nfor _, net, opt in configs:\n res = opt.optimize(net, x_train, y_train, epochs, x_dev=x_dev, y_dev=\n y_dev, track_loss=True, early_stop_pat=early_stop_patience, verbose=1)\n results.append(res)\n\n\ndef plot(ax, ls_batch, ls_dev, its, title):\n ax.plot(range(len(ls_batch)), ls_batch, label='Batch')\n ax.plot(range(len(ls_dev)), ls_dev, label='Dev')\n ax.text(0.3, 0.93, 'Batch: {:.3f}'.format(ls_batch[-1]), transform=ax.\n transAxes)\n ax.text(0.3, 0.86, 'Dev: {:.3f}'.format(ls_dev[-1]), transform=ax.transAxes\n )\n ax.text(0.3, 0.79, 'Its: {}'.format(its), transform=ax.transAxes)\n ax.set_xlabel('Iterations')\n ax.set_ylabel('Loss')\n ax.set_title(title)\n ax.legend(loc='upper right')\n\n\nrows = np.sqrt(len(configs)).astype(np.int)\ncols = np.ceil(len(configs) / rows).astype(np.int)\nplt.figure(figsize=(4 * cols, 4 * rows))\nlast_ax = None\nfor i, ((title, net, opt), (ls_batch, ls_dev, its)) in enumerate(zip(\n configs, results)):\n ax = plt.subplot(rows, cols, i + 1, sharex=last_ax, sharey=last_ax)\n if len(ls_batch) > 0:\n plot(ax, ls_batch, ls_dev, its, title)\n else:\n print('Warning: Config {} did not return any results'.format(title))\n last_ax = ax\nplt.tight_layout()\nplt.show()\n",
"step-5": "import random\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.datasets import fetch_mldata\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, LabelBinarizer\n\nfrom ann.act import relu, softmax_with_xentropy\nfrom ann.loss import xentropy_with_softmax\nfrom ann.opt import SGD, RMSprop, Adam, SGDM\nfrom ann.sklearn import NetworkClassifier, FC\n\n# set seeds\nrandom.seed(42)\nnp.random.seed(42)\n\n# prepare data\nmnist = fetch_mldata('MNIST original')\nx = mnist.data\ny = LabelBinarizer().fit_transform(mnist.target.astype(int))\nx_train, x_dev, y_train, y_dev = train_test_split(x, y, test_size=1000, stratify=y)\n# normalize input\nscaler = StandardScaler(copy=False)\nx_train = scaler.fit_transform(x_train)\nx_dev = scaler.transform(x_dev)\n\n# define configurations\nconfigs = []\n\nnet1 = NetworkClassifier(layers=[\n\tFC(n_in=x.shape[1], n_out=256, act=relu),\n\tFC(n_in=256, n_out=10, act=softmax_with_xentropy)\n])\nopt1 = SGD(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append((\"SGD\", net1, opt1))\n\nnet2 = net1.clone()\nopt2 = SGDM(loss=xentropy_with_softmax, lr=0.001, batch_size=64)\nconfigs.append((\"SGDM\", net2, opt2))\n\nnet3 = net1.clone()\nopt3 = RMSprop(loss=xentropy_with_softmax, lr=1e-8, batch_size=64)\nconfigs.append((\"RMSprop\", net3, opt3))\n\nnet4 = net1.clone()\nopt4 = Adam(loss=xentropy_with_softmax, lr=1e-8, batch_size=64)\nconfigs.append((\"Adam\", net4, opt4))\n\n# --- you can add other configurations here ---\n\n# define training procedure\nepochs = 10\nearly_stop_patience = 200\n\n# train networks\nresults = []\nfor _, net, opt in configs:\n\tres = opt.optimize(net, x_train, y_train, epochs, x_dev=x_dev, y_dev=y_dev, track_loss=True,\n\t\t\t\t\t early_stop_pat=early_stop_patience, verbose=1)\n\tresults.append(res)\n\n\ndef plot(ax, ls_batch, ls_dev, its, title):\n\tax.plot(range(len(ls_batch)), ls_batch, label=\"Batch\")\n\tax.plot(range(len(ls_dev)), ls_dev, label=\"Dev\")\n\tax.text(0.3, 0.93, \"Batch: {:.3f}\".format(ls_batch[-1]), transform=ax.transAxes)\n\tax.text(0.3, 0.86, \"Dev: {:.3f}\".format(ls_dev[-1]), transform=ax.transAxes)\n\tax.text(0.3, 0.79, \"Its: {}\".format(its), transform=ax.transAxes)\n\tax.set_xlabel(\"Iterations\")\n\tax.set_ylabel(\"Loss\")\n\tax.set_title(title)\n\tax.legend(loc=\"upper right\")\n\n\n# plot results\nrows = np.sqrt(len(configs)).astype(np.int)\ncols = np.ceil(len(configs) / rows).astype(np.int)\nplt.figure(figsize=(4 * cols, 4 * rows))\nlast_ax = None\nfor i, ((title, net, opt), (ls_batch, ls_dev, its)) in enumerate(zip(configs, results)):\n\tax = plt.subplot(rows, cols, i + 1, sharex=last_ax, sharey=last_ax)\n\tif len(ls_batch) > 0:\n\t\tplot(ax, ls_batch, ls_dev, its, title)\n\telse:\n\t\tprint(\"Warning: Config {} did not return any results\".format(title))\n\tlast_ax = ax\nplt.tight_layout()\nplt.show()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pandas as pd
import folium
ctx = '../data/'
json = ctx + 'us-states.json'
csv = ctx + 'US_Unemployment_Oct2012.csv'
data = pd.read_csv(csv)
m = folium.Map(location=[37, -102], zoom_start=5)
m.choropleth(geo_data=json, name='choropleth', data=data, columns=['State',
'Unemployment'], Key_on='feature.id', fill_color='YlGn', fill_opacity=
0.7, line_opacity=0.2, legend_name='Unemployment Rate (%)')
folium.LayerControl().add_to(m)
m.save(ctx + 'result.html')
|
normal
|
{
"blob_id": "382cb55a6b849f0240276d8f45746e995b16d714",
"index": 4455,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nm.choropleth(geo_data=json, name='choropleth', data=data, columns=['State',\n 'Unemployment'], Key_on='feature.id', fill_color='YlGn', fill_opacity=\n 0.7, line_opacity=0.2, legend_name='Unemployment Rate (%)')\nfolium.LayerControl().add_to(m)\nm.save(ctx + 'result.html')\n",
"step-3": "<mask token>\nctx = '../data/'\njson = ctx + 'us-states.json'\ncsv = ctx + 'US_Unemployment_Oct2012.csv'\ndata = pd.read_csv(csv)\nm = folium.Map(location=[37, -102], zoom_start=5)\nm.choropleth(geo_data=json, name='choropleth', data=data, columns=['State',\n 'Unemployment'], Key_on='feature.id', fill_color='YlGn', fill_opacity=\n 0.7, line_opacity=0.2, legend_name='Unemployment Rate (%)')\nfolium.LayerControl().add_to(m)\nm.save(ctx + 'result.html')\n",
"step-4": "import pandas as pd\nimport folium\nctx = '../data/'\njson = ctx + 'us-states.json'\ncsv = ctx + 'US_Unemployment_Oct2012.csv'\ndata = pd.read_csv(csv)\nm = folium.Map(location=[37, -102], zoom_start=5)\nm.choropleth(geo_data=json, name='choropleth', data=data, columns=['State',\n 'Unemployment'], Key_on='feature.id', fill_color='YlGn', fill_opacity=\n 0.7, line_opacity=0.2, legend_name='Unemployment Rate (%)')\nfolium.LayerControl().add_to(m)\nm.save(ctx + 'result.html')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# Copyright 2017 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Test SummaryModel objects."""
from oslotest import base
from cloudkitty.api.v1.datamodels import report
class TestSummary(base.BaseTestCase):
def setUp(self):
super(TestSummary, self).setUp()
def test_nulls(self):
s = report.SummaryModel(begin=None,
end=None,
tenant_id=None,
res_type=None,
rate=None)
self.assertIsNone(s.begin)
self.assertIsNone(s.end)
self.assertEqual(s.tenant_id, "ALL")
self.assertEqual(s.res_type, "ALL")
self.assertEqual(s.rate, "0")
|
normal
|
{
"blob_id": "0ea67ac97ec8e7f287a2430c67f8f7d841d8b646",
"index": 813,
"step-1": "<mask token>\n\n\nclass TestSummary(base.BaseTestCase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n\n def test_nulls(self):\n s = report.SummaryModel(begin=None, end=None, tenant_id=None,\n res_type=None, rate=None)\n self.assertIsNone(s.begin)\n self.assertIsNone(s.end)\n self.assertEqual(s.tenant_id, 'ALL')\n self.assertEqual(s.res_type, 'ALL')\n self.assertEqual(s.rate, '0')\n",
"step-4": "<mask token>\nfrom oslotest import base\nfrom cloudkitty.api.v1.datamodels import report\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n\n def test_nulls(self):\n s = report.SummaryModel(begin=None, end=None, tenant_id=None,\n res_type=None, rate=None)\n self.assertIsNone(s.begin)\n self.assertIsNone(s.end)\n self.assertEqual(s.tenant_id, 'ALL')\n self.assertEqual(s.res_type, 'ALL')\n self.assertEqual(s.rate, '0')\n",
"step-5": "# -*- coding: utf-8 -*-\n# Copyright 2017 Objectif Libre\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\"\"\"Test SummaryModel objects.\"\"\"\nfrom oslotest import base\n\nfrom cloudkitty.api.v1.datamodels import report\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n\n def test_nulls(self):\n s = report.SummaryModel(begin=None,\n end=None,\n tenant_id=None,\n res_type=None,\n rate=None)\n self.assertIsNone(s.begin)\n self.assertIsNone(s.end)\n self.assertEqual(s.tenant_id, \"ALL\")\n self.assertEqual(s.res_type, \"ALL\")\n self.assertEqual(s.rate, \"0\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
from sklearn import model_selection
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
"""
- binary cross-validate
- multi-class cross-validate
- multi-label cross-validate
- holdout
- regression
"""
class CrossValidate(object):
def __init__(self, df,
target_cols,
problem_type,
num_folds = 3,
shuffle = False,
random_state = 0):
"""
df - pandas dataframe
target_cols - list of targets
problem_type - ["binary", "multiclass", holdout_n, multilabel]
"""
self.dataframe = df
self.target_cols = target_cols
self.num_targets = len(target_cols)
self.problem_type = problem_type
self.shuffle = shuffle
self.num_folds = num_folds
self.random_state = random_state
if self.shuffle:
self.dataframe = self.dataframe.sample(frac = 1,
random_state = self.random_state).reset_index(drop = True)
self.dataframe["kfold"] = -1
def split(self):
if self.problem_type in ("binary", "multiclass"):
"""
target_cols - ['target_1']
unique_values - eg, [0, 1] for binary, [0, 1, 2,...] for multiclass
"""
if self.num_targets != 1:
raise Exception("Invalid number of targets for this problem type. \
Needed number of targets = 1")
target = self.target_cols[0]
unique_values = self.dataframe[target].nunique()
if unique_values == 1:
raise Exception("Only one unique value found! \
Must be two for Binary and Multiclass cross validation")
elif unique_values > 1:
kf = model_selection.StratifiedKFold(n_splits=self.num_folds,
shuffle = False)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.dataframe,
y=self.dataframe[target].values)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type == "multilabel":
"""
target_cols - ['target_1', 'target_2', 'target_3',....]
"""
if self.num_targets < 1:
raise Exception("Invalid number of targets for this problem type. \
Must be greater than 1.")
kf = MultilabelStratifiedKFold(n_splits=self.num_folds,
shuffle = False)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.dataframe,
y=self.dataframe[self.target_cols].values)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type in ("regression"):
kf = model_selection.KFold(n_splits=self.num_folds)
for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.dataframe)):
self.dataframe.loc[val_idx, 'kfold'] = fold
elif self.problem_type.startswith("holdout_"):
"""
1 : Training Set
0 : Validation Set
holdout_n : n% to holdout
"""
holdout_percentage = int(self.problem_type.split("_")[1])
num_holdout_samples = int(len(self.dataframe) * holdout_percentage / 100)
self.dataframe.loc[:len(self.dataframe) - num_holdout_samples, "kfold"] = 0
self.dataframe.loc[len(self.dataframe) - num_holdout_samples:, "kfold"] = 1
else:
raise Exception("Problem type not understood!")
return self.dataframe
|
normal
|
{
"blob_id": "0dad1937df39c012f7991c3897f27964bed1d5a0",
"index": 1533,
"step-1": "<mask token>\n\n\nclass CrossValidate(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CrossValidate(object):\n\n def __init__(self, df, target_cols, problem_type, num_folds=3, shuffle=\n False, random_state=0):\n \"\"\"\n df - pandas dataframe\n target_cols - list of targets\n problem_type - [\"binary\", \"multiclass\", holdout_n, multilabel]\n \"\"\"\n self.dataframe = df\n self.target_cols = target_cols\n self.num_targets = len(target_cols)\n self.problem_type = problem_type\n self.shuffle = shuffle\n self.num_folds = num_folds\n self.random_state = random_state\n if self.shuffle:\n self.dataframe = self.dataframe.sample(frac=1, random_state=\n self.random_state).reset_index(drop=True)\n self.dataframe['kfold'] = -1\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CrossValidate(object):\n\n def __init__(self, df, target_cols, problem_type, num_folds=3, shuffle=\n False, random_state=0):\n \"\"\"\n df - pandas dataframe\n target_cols - list of targets\n problem_type - [\"binary\", \"multiclass\", holdout_n, multilabel]\n \"\"\"\n self.dataframe = df\n self.target_cols = target_cols\n self.num_targets = len(target_cols)\n self.problem_type = problem_type\n self.shuffle = shuffle\n self.num_folds = num_folds\n self.random_state = random_state\n if self.shuffle:\n self.dataframe = self.dataframe.sample(frac=1, random_state=\n self.random_state).reset_index(drop=True)\n self.dataframe['kfold'] = -1\n\n def split(self):\n if self.problem_type in ('binary', 'multiclass'):\n \"\"\"\n target_cols - ['target_1']\n unique_values - eg, [0, 1] for binary, [0, 1, 2,...] for multiclass\n \"\"\"\n if self.num_targets != 1:\n raise Exception(\n 'Invalid number of targets for this problem type. Needed number of targets = 1'\n )\n target = self.target_cols[0]\n unique_values = self.dataframe[target].nunique()\n if unique_values == 1:\n raise Exception(\n 'Only one unique value found! Must be two for Binary and Multiclass cross validation'\n )\n elif unique_values > 1:\n kf = model_selection.StratifiedKFold(n_splits=self.\n num_folds, shuffle=False)\n for fold, (train_idx, val_idx) in enumerate(kf.split(X=self\n .dataframe, y=self.dataframe[target].values)):\n self.dataframe.loc[val_idx, 'kfold'] = fold\n elif self.problem_type == 'multilabel':\n \"\"\"\n target_cols - ['target_1', 'target_2', 'target_3',....]\n \"\"\"\n if self.num_targets < 1:\n raise Exception(\n 'Invalid number of targets for this problem type. Must be greater than 1.'\n )\n kf = MultilabelStratifiedKFold(n_splits=self.num_folds, shuffle\n =False)\n for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.\n dataframe, y=self.dataframe[self.target_cols].values)):\n self.dataframe.loc[val_idx, 'kfold'] = fold\n elif self.problem_type in 'regression':\n kf = model_selection.KFold(n_splits=self.num_folds)\n for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.\n dataframe)):\n self.dataframe.loc[val_idx, 'kfold'] = fold\n elif self.problem_type.startswith('holdout_'):\n \"\"\"\n 1 : Training Set\n 0 : Validation Set\n holdout_n : n% to holdout\n \"\"\"\n holdout_percentage = int(self.problem_type.split('_')[1])\n num_holdout_samples = int(len(self.dataframe) *\n holdout_percentage / 100)\n self.dataframe.loc[:len(self.dataframe) - num_holdout_samples,\n 'kfold'] = 0\n self.dataframe.loc[len(self.dataframe) - num_holdout_samples:,\n 'kfold'] = 1\n else:\n raise Exception('Problem type not understood!')\n return self.dataframe\n",
"step-4": "import numpy as np\nfrom sklearn import model_selection\nfrom iterstrat.ml_stratifiers import MultilabelStratifiedKFold\n<mask token>\n\n\nclass CrossValidate(object):\n\n def __init__(self, df, target_cols, problem_type, num_folds=3, shuffle=\n False, random_state=0):\n \"\"\"\n df - pandas dataframe\n target_cols - list of targets\n problem_type - [\"binary\", \"multiclass\", holdout_n, multilabel]\n \"\"\"\n self.dataframe = df\n self.target_cols = target_cols\n self.num_targets = len(target_cols)\n self.problem_type = problem_type\n self.shuffle = shuffle\n self.num_folds = num_folds\n self.random_state = random_state\n if self.shuffle:\n self.dataframe = self.dataframe.sample(frac=1, random_state=\n self.random_state).reset_index(drop=True)\n self.dataframe['kfold'] = -1\n\n def split(self):\n if self.problem_type in ('binary', 'multiclass'):\n \"\"\"\n target_cols - ['target_1']\n unique_values - eg, [0, 1] for binary, [0, 1, 2,...] for multiclass\n \"\"\"\n if self.num_targets != 1:\n raise Exception(\n 'Invalid number of targets for this problem type. Needed number of targets = 1'\n )\n target = self.target_cols[0]\n unique_values = self.dataframe[target].nunique()\n if unique_values == 1:\n raise Exception(\n 'Only one unique value found! Must be two for Binary and Multiclass cross validation'\n )\n elif unique_values > 1:\n kf = model_selection.StratifiedKFold(n_splits=self.\n num_folds, shuffle=False)\n for fold, (train_idx, val_idx) in enumerate(kf.split(X=self\n .dataframe, y=self.dataframe[target].values)):\n self.dataframe.loc[val_idx, 'kfold'] = fold\n elif self.problem_type == 'multilabel':\n \"\"\"\n target_cols - ['target_1', 'target_2', 'target_3',....]\n \"\"\"\n if self.num_targets < 1:\n raise Exception(\n 'Invalid number of targets for this problem type. Must be greater than 1.'\n )\n kf = MultilabelStratifiedKFold(n_splits=self.num_folds, shuffle\n =False)\n for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.\n dataframe, y=self.dataframe[self.target_cols].values)):\n self.dataframe.loc[val_idx, 'kfold'] = fold\n elif self.problem_type in 'regression':\n kf = model_selection.KFold(n_splits=self.num_folds)\n for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.\n dataframe)):\n self.dataframe.loc[val_idx, 'kfold'] = fold\n elif self.problem_type.startswith('holdout_'):\n \"\"\"\n 1 : Training Set\n 0 : Validation Set\n holdout_n : n% to holdout\n \"\"\"\n holdout_percentage = int(self.problem_type.split('_')[1])\n num_holdout_samples = int(len(self.dataframe) *\n holdout_percentage / 100)\n self.dataframe.loc[:len(self.dataframe) - num_holdout_samples,\n 'kfold'] = 0\n self.dataframe.loc[len(self.dataframe) - num_holdout_samples:,\n 'kfold'] = 1\n else:\n raise Exception('Problem type not understood!')\n return self.dataframe\n",
"step-5": "import numpy as np\nfrom sklearn import model_selection\nfrom iterstrat.ml_stratifiers import MultilabelStratifiedKFold\n\n\"\"\"\n- binary cross-validate\n- multi-class cross-validate\n- multi-label cross-validate\n- holdout\n- regression\n\"\"\"\n\nclass CrossValidate(object):\n def __init__(self, df,\n target_cols,\n problem_type,\n num_folds = 3,\n shuffle = False,\n random_state = 0):\n \"\"\"\n df - pandas dataframe\n target_cols - list of targets\n problem_type - [\"binary\", \"multiclass\", holdout_n, multilabel]\n \"\"\"\n\n self.dataframe = df\n self.target_cols = target_cols\n self.num_targets = len(target_cols)\n self.problem_type = problem_type\n self.shuffle = shuffle\n self.num_folds = num_folds\n self.random_state = random_state\n\n if self.shuffle:\n self.dataframe = self.dataframe.sample(frac = 1,\n random_state = self.random_state).reset_index(drop = True)\n\n self.dataframe[\"kfold\"] = -1\n\n def split(self):\n if self.problem_type in (\"binary\", \"multiclass\"):\n \"\"\"\n target_cols - ['target_1']\n unique_values - eg, [0, 1] for binary, [0, 1, 2,...] for multiclass\n \"\"\"\n if self.num_targets != 1:\n raise Exception(\"Invalid number of targets for this problem type. \\\n Needed number of targets = 1\")\n target = self.target_cols[0]\n unique_values = self.dataframe[target].nunique()\n\n if unique_values == 1:\n raise Exception(\"Only one unique value found! \\\n Must be two for Binary and Multiclass cross validation\")\n elif unique_values > 1:\n kf = model_selection.StratifiedKFold(n_splits=self.num_folds,\n shuffle = False)\n\n for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.dataframe,\n y=self.dataframe[target].values)):\n self.dataframe.loc[val_idx, 'kfold'] = fold\n\n elif self.problem_type == \"multilabel\":\n \"\"\"\n target_cols - ['target_1', 'target_2', 'target_3',....]\n \"\"\"\n if self.num_targets < 1:\n raise Exception(\"Invalid number of targets for this problem type. \\\n Must be greater than 1.\")\n\n kf = MultilabelStratifiedKFold(n_splits=self.num_folds,\n shuffle = False)\n\n for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.dataframe,\n y=self.dataframe[self.target_cols].values)):\n self.dataframe.loc[val_idx, 'kfold'] = fold\n\n elif self.problem_type in (\"regression\"):\n kf = model_selection.KFold(n_splits=self.num_folds)\n for fold, (train_idx, val_idx) in enumerate(kf.split(X=self.dataframe)):\n self.dataframe.loc[val_idx, 'kfold'] = fold\n\n elif self.problem_type.startswith(\"holdout_\"):\n \"\"\"\n 1 : Training Set\n 0 : Validation Set\n holdout_n : n% to holdout\n \"\"\"\n holdout_percentage = int(self.problem_type.split(\"_\")[1])\n num_holdout_samples = int(len(self.dataframe) * holdout_percentage / 100)\n self.dataframe.loc[:len(self.dataframe) - num_holdout_samples, \"kfold\"] = 0\n self.dataframe.loc[len(self.dataframe) - num_holdout_samples:, \"kfold\"] = 1\n\n else:\n raise Exception(\"Problem type not understood!\")\n\n return self.dataframe\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-12 20:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0010_auto_20170512_2248'),
]
operations = [
migrations.AlterField(
model_name='classroom',
name='subject5teacher',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='home.Teacher', verbose_name='Chemistry'),
),
]
|
normal
|
{
"blob_id": "438efbaf35401a29ea5408fee3b49b85f237760e",
"index": 1089,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('home', '0010_auto_20170512_2248')]\n operations = [migrations.AlterField(model_name='classroom', name=\n 'subject5teacher', field=models.ForeignKey(default=None, on_delete=\n django.db.models.deletion.CASCADE, related_name='+', to=\n 'home.Teacher', verbose_name='Chemistry'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('home', '0010_auto_20170512_2248')]\n operations = [migrations.AlterField(model_name='classroom', name=\n 'subject5teacher', field=models.ForeignKey(default=None, on_delete=\n django.db.models.deletion.CASCADE, related_name='+', to=\n 'home.Teacher', verbose_name='Chemistry'))]\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# Generated by Django 1.11 on 2017-05-12 20:48\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('home', '0010_auto_20170512_2248'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='classroom',\r\n name='subject5teacher',\r\n field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='home.Teacher', verbose_name='Chemistry'),\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('examen', '0002_auto_20161122_1836'),
]
operations = [
migrations.RemoveField(
model_name='actuacionventa',
name='DPI',
),
migrations.RemoveField(
model_name='venta',
name='DPI',
),
migrations.AddField(
model_name='venta',
name='DPI',
field=models.ForeignKey(default=1, to='examen.Usuario'),
preserve_default=False,
),
]
|
normal
|
{
"blob_id": "5acbd6002c5e3cfac942d52b788f18c6afa92da2",
"index": 7028,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('examen', '0002_auto_20161122_1836')]\n operations = [migrations.RemoveField(model_name='actuacionventa', name=\n 'DPI'), migrations.RemoveField(model_name='venta', name='DPI'),\n migrations.AddField(model_name='venta', name='DPI', field=models.\n ForeignKey(default=1, to='examen.Usuario'), preserve_default=False)]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('examen', '0002_auto_20161122_1836')]\n operations = [migrations.RemoveField(model_name='actuacionventa', name=\n 'DPI'), migrations.RemoveField(model_name='venta', name='DPI'),\n migrations.AddField(model_name='venta', name='DPI', field=models.\n ForeignKey(default=1, to='examen.Usuario'), preserve_default=False)]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('examen', '0002_auto_20161122_1836'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='actuacionventa',\n name='DPI',\n ),\n migrations.RemoveField(\n model_name='venta',\n name='DPI',\n ),\n migrations.AddField(\n model_name='venta',\n name='DPI',\n field=models.ForeignKey(default=1, to='examen.Usuario'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
from app.main.model.location import Location
from app.main.util.db_util import save_changes
key = 'a544aecdde85a1f52a56292f77ecde6e'
def save_location(ip_addr):
try:
existing_location = Location.query.filter_by(ip=ip_addr).first()
if existing_location:
location_data = existing_location.location
else:
location_data = get_location(ip_addr=ip_addr)
location = Location(ip=ip_addr, location=location_data)
save_changes(location)
except Exception as e:
if 'UNIQUE constraint failed: location.ip' not in str(e):
response_object = {'status': 'fail', 'message': e}
return response_object, 400
response_object = {'status': 'success', 'message':
'Successfully saved location.', 'location': location_data}
return response_object, 200
def get_location(ip_addr):
r = requests.get('http://api.ipstack.com/{ip}?access_key={key}'.format(
ip=ip_addr, key=key))
return r.text
|
normal
|
{
"blob_id": "eb8aec947cc1eeeb56b3884286b46ec7468dcc23",
"index": 9035,
"step-1": "<mask token>\n\n\ndef save_location(ip_addr):\n try:\n existing_location = Location.query.filter_by(ip=ip_addr).first()\n if existing_location:\n location_data = existing_location.location\n else:\n location_data = get_location(ip_addr=ip_addr)\n location = Location(ip=ip_addr, location=location_data)\n save_changes(location)\n except Exception as e:\n if 'UNIQUE constraint failed: location.ip' not in str(e):\n response_object = {'status': 'fail', 'message': e}\n return response_object, 400\n response_object = {'status': 'success', 'message':\n 'Successfully saved location.', 'location': location_data}\n return response_object, 200\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef save_location(ip_addr):\n try:\n existing_location = Location.query.filter_by(ip=ip_addr).first()\n if existing_location:\n location_data = existing_location.location\n else:\n location_data = get_location(ip_addr=ip_addr)\n location = Location(ip=ip_addr, location=location_data)\n save_changes(location)\n except Exception as e:\n if 'UNIQUE constraint failed: location.ip' not in str(e):\n response_object = {'status': 'fail', 'message': e}\n return response_object, 400\n response_object = {'status': 'success', 'message':\n 'Successfully saved location.', 'location': location_data}\n return response_object, 200\n\n\ndef get_location(ip_addr):\n r = requests.get('http://api.ipstack.com/{ip}?access_key={key}'.format(\n ip=ip_addr, key=key))\n return r.text\n",
"step-3": "<mask token>\nkey = 'a544aecdde85a1f52a56292f77ecde6e'\n\n\ndef save_location(ip_addr):\n try:\n existing_location = Location.query.filter_by(ip=ip_addr).first()\n if existing_location:\n location_data = existing_location.location\n else:\n location_data = get_location(ip_addr=ip_addr)\n location = Location(ip=ip_addr, location=location_data)\n save_changes(location)\n except Exception as e:\n if 'UNIQUE constraint failed: location.ip' not in str(e):\n response_object = {'status': 'fail', 'message': e}\n return response_object, 400\n response_object = {'status': 'success', 'message':\n 'Successfully saved location.', 'location': location_data}\n return response_object, 200\n\n\ndef get_location(ip_addr):\n r = requests.get('http://api.ipstack.com/{ip}?access_key={key}'.format(\n ip=ip_addr, key=key))\n return r.text\n",
"step-4": "import requests\nfrom app.main.model.location import Location\nfrom app.main.util.db_util import save_changes\nkey = 'a544aecdde85a1f52a56292f77ecde6e'\n\n\ndef save_location(ip_addr):\n try:\n existing_location = Location.query.filter_by(ip=ip_addr).first()\n if existing_location:\n location_data = existing_location.location\n else:\n location_data = get_location(ip_addr=ip_addr)\n location = Location(ip=ip_addr, location=location_data)\n save_changes(location)\n except Exception as e:\n if 'UNIQUE constraint failed: location.ip' not in str(e):\n response_object = {'status': 'fail', 'message': e}\n return response_object, 400\n response_object = {'status': 'success', 'message':\n 'Successfully saved location.', 'location': location_data}\n return response_object, 200\n\n\ndef get_location(ip_addr):\n r = requests.get('http://api.ipstack.com/{ip}?access_key={key}'.format(\n ip=ip_addr, key=key))\n return r.text\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
class _ProtectedClass:
pass
class MyClass:
pass
class OtherClass(MyClass):
pass
def _protected_fun() -> MyClass:
return variable # noqa: F821
def my_fun() -> MyClass:
return variable # noqa: F821
def my_fun2() -> MyClass:
return variable # noqa: F821
variable: MyClass
variable_with_value: MyClass = MyClass()
__all__ = [ # noqa: F822
"OtherClass",
"my_fun2",
"variable",
]
|
normal
|
{
"blob_id": "b5949b40d731178bdbab776af8877921dcdfbf15",
"index": 3215,
"step-1": "class _ProtectedClass:\n pass\n\n\nclass MyClass:\n pass\n\n\nclass OtherClass(MyClass):\n pass\n\n\ndef _protected_fun() ->MyClass:\n return variable\n\n\n<mask token>\n\n\ndef my_fun2() ->MyClass:\n return variable\n\n\n<mask token>\n",
"step-2": "class _ProtectedClass:\n pass\n\n\nclass MyClass:\n pass\n\n\nclass OtherClass(MyClass):\n pass\n\n\ndef _protected_fun() ->MyClass:\n return variable\n\n\ndef my_fun() ->MyClass:\n return variable\n\n\ndef my_fun2() ->MyClass:\n return variable\n\n\n<mask token>\n",
"step-3": "class _ProtectedClass:\n pass\n\n\nclass MyClass:\n pass\n\n\nclass OtherClass(MyClass):\n pass\n\n\ndef _protected_fun() ->MyClass:\n return variable\n\n\ndef my_fun() ->MyClass:\n return variable\n\n\ndef my_fun2() ->MyClass:\n return variable\n\n\nvariable: MyClass\nvariable_with_value: MyClass = MyClass()\n<mask token>\n",
"step-4": "class _ProtectedClass:\n pass\n\n\nclass MyClass:\n pass\n\n\nclass OtherClass(MyClass):\n pass\n\n\ndef _protected_fun() ->MyClass:\n return variable\n\n\ndef my_fun() ->MyClass:\n return variable\n\n\ndef my_fun2() ->MyClass:\n return variable\n\n\nvariable: MyClass\nvariable_with_value: MyClass = MyClass()\n__all__ = ['OtherClass', 'my_fun2', 'variable']\n",
"step-5": "class _ProtectedClass:\n pass\n\n\nclass MyClass:\n pass\n\n\nclass OtherClass(MyClass):\n pass\n\n\ndef _protected_fun() -> MyClass:\n return variable # noqa: F821\n\n\ndef my_fun() -> MyClass:\n return variable # noqa: F821\n\n\ndef my_fun2() -> MyClass:\n return variable # noqa: F821\n\n\nvariable: MyClass\nvariable_with_value: MyClass = MyClass()\n\n\n__all__ = [ # noqa: F822\n \"OtherClass\",\n \"my_fun2\",\n \"variable\",\n]\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.shtiker.CogPageGlobals
COG_QUOTAS = ((30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10))
COG_UNSEEN = 1
COG_BATTLED = 2
COG_DEFEATED = 3
COG_COMPLETE1 = 4
COG_COMPLETE2 = 5
|
normal
|
{
"blob_id": "fdb680f12dfb4b29f25cfe4f7af80469dc4294cf",
"index": 2437,
"step-1": "<mask token>\n",
"step-2": "COG_QUOTAS = (30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10)\nCOG_UNSEEN = 1\nCOG_BATTLED = 2\nCOG_DEFEATED = 3\nCOG_COMPLETE1 = 4\nCOG_COMPLETE2 = 5\n",
"step-3": "# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.shtiker.CogPageGlobals\r\nCOG_QUOTAS = ((30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10))\r\nCOG_UNSEEN = 1\r\nCOG_BATTLED = 2\r\nCOG_DEFEATED = 3\r\nCOG_COMPLETE1 = 4\r\nCOG_COMPLETE2 = 5",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from math import sqrt
def prime_generator(n):
pp=[2,3]
for i in range(3,n):
i+=2
count=0
for ps in pp:
if ps>(sqrt(i)+1):
break
if i%ps==0:
count+=1
break
if count==0:
pp.append(i)
return pp
|
normal
|
{
"blob_id": "cfa064611a4aa16638bd649c68d64872b9fac1ff",
"index": 4647,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef prime_generator(n):\n pp = [2, 3]\n for i in range(3, n):\n i += 2\n count = 0\n for ps in pp:\n if ps > sqrt(i) + 1:\n break\n if i % ps == 0:\n count += 1\n break\n if count == 0:\n pp.append(i)\n return pp\n",
"step-3": "from math import sqrt\n\n\ndef prime_generator(n):\n pp = [2, 3]\n for i in range(3, n):\n i += 2\n count = 0\n for ps in pp:\n if ps > sqrt(i) + 1:\n break\n if i % ps == 0:\n count += 1\n break\n if count == 0:\n pp.append(i)\n return pp\n",
"step-4": "from math import sqrt\ndef prime_generator(n):\n pp=[2,3]\n for i in range(3,n):\n i+=2\n count=0\n for ps in pp:\n if ps>(sqrt(i)+1):\n break\n if i%ps==0:\n count+=1\n break \n if count==0:\n pp.append(i)\n return pp\n\n\n\n\n \n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <[email protected]>
#
# Distributed under terms of the MIT license.
"""
You are given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin.
Example 1:
Input: amount = 5, coins = [1, 2, 5]
Output: 4
Explanation: there are four ways to make up the amount:
5=5
5=2+2+1
5=2+1+1+1
5=1+1+1+1+1
Example 2:
Input: amount = 3, coins = [2]
Output: 0
Explanation: the amount of 3 cannot be made up just with coins of 2.
Example 3:
Input: amount = 10, coins = [10]
Output: 1
Note:
You can assume that
1. 0 <= amount <= 5000
2. 1 <= coin <= 5000
3. the number of coins is less than 500
4. the answer is guaranteed to fit into signed 32-bit integer
"""
import sys
from functools import lru_cache
from typing import List
import pytest
class Solution:
def change(self, amount: int, coins: List[int]) -> int:
coins = sorted(coins, reverse=True)
@lru_cache(None)
def rec(i, amount):
if i == len(coins):
return 1 if amount == 0 else 0
return sum(rec(i+1, amount-c) for c in range(0, amount+1, coins[i]))
return rec(0, amount)
@pytest.mark.parametrize('amount, coins, expected', [
(5, [1,2,5], 4),
(3, [2], 0),
(10, [10], 1),
])
def test(amount, coins, expected):
assert expected == Solution().change(amount, coins)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
|
normal
|
{
"blob_id": "332c530d221c9441d6ff3646f8e9226dc78067f9",
"index": 2902,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def change(self, amount: int, coins: List[int]) ->int:\n coins = sorted(coins, reverse=True)\n\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,\n coins[i]))\n return rec(0, amount)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def change(self, amount: int, coins: List[int]) ->int:\n coins = sorted(coins, reverse=True)\n\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,\n coins[i]))\n return rec(0, amount)\n\n\[email protected]('amount, coins, expected', [(5, [1, 2, 5], 4), (3,\n [2], 0), (10, [10], 1)])\ndef test(amount, coins, expected):\n assert expected == Solution().change(amount, coins)\n\n\nif __name__ == '__main__':\n sys.exit(pytest.main(['-s', '-v'] + sys.argv))\n",
"step-4": "<mask token>\nimport sys\nfrom functools import lru_cache\nfrom typing import List\nimport pytest\n\n\nclass Solution:\n\n def change(self, amount: int, coins: List[int]) ->int:\n coins = sorted(coins, reverse=True)\n\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,\n coins[i]))\n return rec(0, amount)\n\n\[email protected]('amount, coins, expected', [(5, [1, 2, 5], 4), (3,\n [2], 0), (10, [10], 1)])\ndef test(amount, coins, expected):\n assert expected == Solution().change(amount, coins)\n\n\nif __name__ == '__main__':\n sys.exit(pytest.main(['-s', '-v'] + sys.argv))\n",
"step-5": "\n#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2020 sungminoh <[email protected]>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nYou are given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin.\n\nExample 1:\n\nInput: amount = 5, coins = [1, 2, 5]\nOutput: 4\nExplanation: there are four ways to make up the amount:\n5=5\n5=2+2+1\n5=2+1+1+1\n5=1+1+1+1+1\n\nExample 2:\n\nInput: amount = 3, coins = [2]\nOutput: 0\nExplanation: the amount of 3 cannot be made up just with coins of 2.\n\nExample 3:\n\nInput: amount = 10, coins = [10]\nOutput: 1\n\nNote:\n\nYou can assume that\n 1. 0 <= amount <= 5000\n 2. 1 <= coin <= 5000\n 3. the number of coins is less than 500\n 4. the answer is guaranteed to fit into signed 32-bit integer\n\"\"\"\nimport sys\nfrom functools import lru_cache\nfrom typing import List\nimport pytest\n\n\nclass Solution:\n def change(self, amount: int, coins: List[int]) -> int:\n coins = sorted(coins, reverse=True)\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i+1, amount-c) for c in range(0, amount+1, coins[i]))\n return rec(0, amount)\n\n\[email protected]('amount, coins, expected', [\n (5, [1,2,5], 4),\n (3, [2], 0),\n (10, [10], 1),\n])\ndef test(amount, coins, expected):\n assert expected == Solution().change(amount, coins)\n\n\nif __name__ == '__main__':\n sys.exit(pytest.main([\"-s\", \"-v\"] + sys.argv))\n\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
import random
print(random.choice(['python', 'c++', 'java']))
print(random.choice((1.1, -5, 6, 4, 7)))
|
normal
|
{
"blob_id": "44f18d7e7713073c27fec38f0b847803eceefbc9",
"index": 2687,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(random.choice(['python', 'c++', 'java']))\nprint(random.choice((1.1, -5, 6, 4, 7)))\n",
"step-3": "import random\nprint(random.choice(['python', 'c++', 'java']))\nprint(random.choice((1.1, -5, 6, 4, 7)))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib as mpl
from matplotlib import pyplot as plt
plt.style.use('seaborn-muted')
#from IPython import get_ipython
from IPython.display import HTML, Markdown
import air_cargo_problems as acp
problems = ['Air Cargo Problem 1',
'Air Cargo Problem 2',
'Air Cargo Problem 3',
'Air Cargo Problem 4']
SEARCHES = ['breadth_first_search',
'depth_first_graph_search',
'uniform_cost_search',
'greedy_best_first_graph_search h_unmet_goals',
'greedy_best_first_graph_search h_pg_levelsum',
'greedy_best_first_graph_search h_pg_maxlevel',
'greedy_best_first_graph_search h_pg_setlevel',
'astar_search h_unmet_goals',
'astar_search h_pg_levelsum',
'astar_search h_pg_maxlevel',
'astar_search h_pg_setlevel']
def get_prob_specs():
Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(),
acp.air_cargo_p3(), acp.air_cargo_p4()]
problems_specs = {'Problem': [name for name in problems],
'Air cargo problem': [i+1 for i in range(len(problems))],
'Cargos': [len(p.cargos) for p in Probs],
'Planes': [len(p.planes) for p in Probs],
'Airports': [len(p.airports) for p in Probs],
'Goal': [len(p.goal) for p in Probs]}
return pd.DataFrame(problems_specs)
specs = get_prob_specs()
def df2tsv(df, fname, replace=False):
if Path(fname).exists():
if replace:
df.to_csv(fname, sep='\t')
#else:
# print(f'File {fname} not replaced.')
return
df.to_csv(fname, sep='\t')
return
def get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=False, replace=False):
"""
Combine all processed files of a problem found in Path(data_dir) with given stem.
The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.
Input example:
file_stem = 'prob_2'
problem = 'Air Cargo Problem 2'
Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.
"""
if file_stem is None or problem is None:
print('file_stem and problem must have a value.')
return
t = '\t'
# input/output file suffixes:
sfx = ['.csv', '_df.csv']
# Try retrieving it from out_dir if not replacing it:
fout = None
if file_as_tsv:
fout = Path(out_dir).joinpath(file_stem + sfx[1])
if fout.exists() and not replace:
df = pd.read_csv(fout, sep=t)
try:
return df.drop('Unnamed: 0', axis=1)
except KeyError:
pass
# else: (re)process
pfiles = list(Path(raw_dir).glob(file_stem + '*'))
if len(pfiles) == 0:
print(f'No raw files with stem: {file_stem}')
return
dflist = []
for f in pfiles:
df, err = get_results_df(f, problem)
if df is not None:
df = df.merge(specs)
df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x)+1)
df['index'] = df['index'].astype(int)
df.set_index('index', drop=True, inplace=True)
dflist.append(df)
del df
else:
print(f'Error from get_results_df:\n\t{err}')
dfout = pd.concat(dflist, ignore_index=False)
dfout.sort_index(inplace=True)
if file_as_tsv:
df2tsv(dfout, fout, replace=replace)
return dfout
def get_results_df(fname, problem):
"""Process csv into dataframe.
"""
t = '\t'
# Cols to add:
val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']
err = ''
df = pd.read_csv(fname, sep=t)
if df.shape[0] < len(val_cols):
err = f'Data for {fname.name} is incomplete.'
return None, err
# Rename cols: c (temp) -> Searcher
df.columns = ['c', 'Searcher']
# Add new cols & reindex
df = df.reindex(columns = df.columns.tolist() + val_cols)
# Populate new cols according to row with search name:
sr = df.loc[df.c == 'Searcher', 'Searcher']
for (idx, sr_row) in sr.items():
j = idx
for c in df.columns[2:].tolist():
j += 1
if c == 'ElapsedSeconds':
df.loc[idx, c] = float(df.loc[j, 'Searcher'])
else:
df.loc[idx, c] = int(df.loc[j, 'Searcher'])
df.dropna(inplace=True)
# Add a minute column:
df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)
# Replace values of 1st col with problem name & update col name:
df['c'] = problem
df.rename(columns={'c': 'Problem'}, inplace=True)
df.reset_index(drop=True, inplace=True)
return df, ''
def concat_all_dfs(dflist):
"""
Output combined df for complete runs, Actions>0.
"""
dfall = pd.concat(dflist, ignore_index=False)
dfall.reset_index(drop=False, inplace=True)
dfall.rename(columns={'index': 'id'}, inplace=True)
# reduced
drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']
dfa = dfall.drop(drop_cols, axis=1)
del dfall
# add col for function name
dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]
# reorder cols
dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',
'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]
# complete runs only:
return dfa[dfa['Actions'].values > 0]
def plans_length(dfa, which):
"""
dfa: frame of concatenated df1 to df4.
Analysis of plan length for which in ['double', 'single']:
PlanLength is double(single)-digit.
"""
if which == 'double':
msk = dfa.PlanLength >= 10
col2 = 'Frequency where PlanLength >=10'
else:
msk = dfa.PlanLength < 10
col2 = 'Frequency where PlanLength <10'
dfa_rows = dfa.shape[0]
dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)
uniq_probs = dfout['Air cargo problem'].unique()
n_plans = dfout.shape[0]
searcher_cnt = dfout['Searcher'].value_counts()
fn_cnt = dfout['search_fn'].value_counts()
# get the html string:
df_fn = fn_cnt.to_frame()
df_fn.reset_index(drop=False, inplace=True)
df_fn.columns = ['Search function', col2]
df_fn_html = df_fn.to_html(index=False, justify='center')
replace_str1 = ' style="text-align: center;"'
replace_str2 = 'class="dataframe"'
df_fn_html = df_fn_html.replace(replace_str1, '')
df_fn_html = df_fn_html.replace(replace_str2, replace_str1)
pct_plans = n_plans/dfa_rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn/n_plans
text = f"Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>"
text += f"In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`."
if len(uniq_probs) < 4:
text += " And this occurs only for Problems: "
pro = ",".join('{}' for p in uniq_probs) +'.<br>'
text += pro.format(*uniq_probs)
else:
text += " And this occurs for all Problems."
text += "<br>"
return df_fn_html, text, dfout
def make_bar_plots(df_list,
x_col, y_col,
problems,
legend_bbox=(.05, .95),
to_file='',
show=False,
excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1>0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2>0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
# Seach functions names should be common to all dfs:
search = df_list[0].Searcher.tolist()
# Sample cmap according to categories:
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i*m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))
# Use the minutes columns for the more complex problems:
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4: # applies to problems 3/4
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',
y = 1.05, fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
# log scale on NewNodes for df2, df3, df4:
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog,
color=colors,
legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(axs[i])
legt = 'Searchers'
new_lgd = p1 == 3 and excluded is not None
if new_lgd:
# Modify the legend to indicate excluded searches
# (bc colormap is identical to fig1/2, but some runs have no data).
legt += ' (X :: excluded)'
excluded_len = len(excluded)
x_idx = [excluded[i][0]-1 for i in range(excluded_len)]
legend_patches = []
for i, c in enumerate(colors):
lab = search[i]
if new_lgd:
if SEARCHES.index(lab) in x_idx:
lab = lab.replace(' ', ' + ')
lab += ' X'
else:
lab = lab.replace(' ', ' + ')
else:
lab = lab.replace(' ', ' + ')
legend_patches.append(mpatches.Patch(color=c, label=lab))
axs[1].legend(handles=legend_patches,
title=legt,
title_fontsize='14',
fontsize='medium',
bbox_to_anchor=legend_bbox,
loc='upper left',
labelspacing=0.6,
fancybox=True)
plt.tight_layout()
if to_file:
plt.savefig(to_file)
if show:
return axs
def format_multiples(multi):
s = ''
for i in range(len(multi)):
s += '{'+ str(i) +':s}, '
s = s[:-2]
return '[' + s.format(*multi.values) + ']'
def order_analysis(df2, df1, column_to_compare):
"""
df2: has the large values.
"""
colA_larger_values = df2[column_to_compare]
colA_smaller_values = df1[column_to_compare]
# orders of magnitude difference btw dfB and dfA (min, max):
mag = np.round(np.log(colA_larger_values/colA_smaller_values), 0)
mag.sort_values(ascending=False, inplace=True)
mag_aver = int(np.round(mag.mean(), 0))
# get the indices of values above average:
ma = mag[mag > mag_aver].index.tolist()
# get the names of all searchers corresponding to the ma:
above_multiples = (mag_aver, df2.loc[ma, 'Searcher'])
return above_multiples
def comparison_paragraph(df2, df1, heading, column_to_compare, return_html=False):
p1 = df1.loc[0,'Problem'][-1]
p2 = df2.loc[0,'Problem'][-1]
order_aver, searches_above = order_analysis(df2, df1, column_to_compare)
above = format_multiples(searches_above)
headinglc = heading.lower()
text = f"""<h3>* {heading}</h3><p style="font-size:110%;">For Problems {p1} and {p2}, """
text += f"the <i>average</i> order of magnitude difference in {headinglc} is "
text += f"<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>"
if return_html:
return text
else:
return Markdown(text)
def get_elim_candidates(df2, df1):
"""
For the analysis of problems 1 & 2.
List the costliest searches: candidates for elimination on more complex problems.
"""
if df1.loc[1,'Problem']!= problems[0]:
return
nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')
time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')
elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(time_above[:time_order_av]))
# return their 1-base index also:
out = [(SEARCHES.index(c)+1, c) for c in elim_candidates]
return out
def paragraph_p12(candidates_tup, return_html=False):
"""
For displaying the analysis of problems 1 & 2.
"""
elim_list = ""
for i, c in candidates_tup:
elim_list += f"<dt><b>{i:>2}: {c}</b></dt>"
text = """<h3>* Insights from Problems 1 and 2</h3><p style="font-size:110%;">"""
text += """On the basis of Figures 1 and 2, which show the number of new nodes created,
and the time spent by each search function, respectively, the searches that are candidates
for elimination for more complex problems are those at the intersection of the average-ranked
costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>"""
text += f"<dl>{elim_list}</dl></p></pre>"
if return_html:
return text
else:
return Markdown(text)
def add_div_around_html(div_html_text, output_string=False, div_style="{width: 80%}"):
"""
Wrap an html code str inside a div.
div_style: whatever follows style= within the <div>
Behaviour with `output_string=True`:
The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')
The only thing to do is change the cell mode to Markdown.
If `output_string=False`, the HTML/md output is displayed in an output cell.
"""
div = f"""<div style="{div_style}">{div_html_text}</div>"""
if output_string:
return div
#get_ipython().set_next_input(div, 'markdown')
else:
return Markdown(div)
|
normal
|
{
"blob_id": "cd49230be3c418853aa2986ed727204e51a6b6ae",
"index": 3794,
"step-1": "<mask token>\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\n<mask token>\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\n<mask token>\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\n<mask token>\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-2": "<mask token>\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),\n acp.air_cargo_p4()]\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [(i + 1) for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for\n p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':\n [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\n\n<mask token>\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n return\n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=\n False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n t = '\\t'\n sfx = ['.csv', '_df.csv']\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',\n 'GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',\n 'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\n<mask token>\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=\n False):\n p1 = df1.loc[0, 'Problem'][-1]\n p2 = df2.loc[0, 'Problem'][-1]\n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n headinglc = heading.lower()\n text = (\n f'<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, '\n )\n text += (\n f'the <i>average</i> order of magnitude difference in {headinglc} is ')\n text += (\n f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'\n )\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1, 'Problem'] != problems[0]:\n return\n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(\n time_above[:time_order_av]))\n out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]\n return out\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-3": "<mask token>\nplt.style.use('seaborn-muted')\n<mask token>\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),\n acp.air_cargo_p4()]\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [(i + 1) for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for\n p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':\n [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\n\n<mask token>\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n return\n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=\n False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n t = '\\t'\n sfx = ['.csv', '_df.csv']\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',\n 'GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',\n 'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\ndef format_multiples(multi):\n s = ''\n for i in range(len(multi)):\n s += '{' + str(i) + ':s}, '\n s = s[:-2]\n return '[' + s.format(*multi.values) + ']'\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=\n False):\n p1 = df1.loc[0, 'Problem'][-1]\n p2 = df2.loc[0, 'Problem'][-1]\n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n headinglc = heading.lower()\n text = (\n f'<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, '\n )\n text += (\n f'the <i>average</i> order of magnitude difference in {headinglc} is ')\n text += (\n f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'\n )\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1, 'Problem'] != problems[0]:\n return\n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(\n time_above[:time_order_av]))\n out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]\n return out\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-4": "import numpy as np\nimport pandas as pd\nfrom pathlib import Path\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nplt.style.use('seaborn-muted')\nfrom IPython.display import HTML, Markdown\nimport air_cargo_problems as acp\nproblems = ['Air Cargo Problem 1', 'Air Cargo Problem 2',\n 'Air Cargo Problem 3', 'Air Cargo Problem 4']\nSEARCHES = ['breadth_first_search', 'depth_first_graph_search',\n 'uniform_cost_search', 'greedy_best_first_graph_search h_unmet_goals',\n 'greedy_best_first_graph_search h_pg_levelsum',\n 'greedy_best_first_graph_search h_pg_maxlevel',\n 'greedy_best_first_graph_search h_pg_setlevel',\n 'astar_search h_unmet_goals', 'astar_search h_pg_levelsum',\n 'astar_search h_pg_maxlevel', 'astar_search h_pg_setlevel']\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),\n acp.air_cargo_p4()]\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [(i + 1) for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for\n p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':\n [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\n\nspecs = get_prob_specs()\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n return\n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=\n False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n t = '\\t'\n sfx = ['.csv', '_df.csv']\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',\n 'GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',\n 'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\ndef format_multiples(multi):\n s = ''\n for i in range(len(multi)):\n s += '{' + str(i) + ':s}, '\n s = s[:-2]\n return '[' + s.format(*multi.values) + ']'\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=\n False):\n p1 = df1.loc[0, 'Problem'][-1]\n p2 = df2.loc[0, 'Problem'][-1]\n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n headinglc = heading.lower()\n text = (\n f'<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, '\n )\n text += (\n f'the <i>average</i> order of magnitude difference in {headinglc} is ')\n text += (\n f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'\n )\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1, 'Problem'] != problems[0]:\n return\n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(\n time_above[:time_order_av]))\n out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]\n return out\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-5": "import numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nplt.style.use('seaborn-muted')\n\n#from IPython import get_ipython\nfrom IPython.display import HTML, Markdown\n\nimport air_cargo_problems as acp\n\n\nproblems = ['Air Cargo Problem 1', \n 'Air Cargo Problem 2',\n 'Air Cargo Problem 3',\n 'Air Cargo Problem 4']\n\nSEARCHES = ['breadth_first_search',\n 'depth_first_graph_search',\n 'uniform_cost_search',\n 'greedy_best_first_graph_search h_unmet_goals',\n 'greedy_best_first_graph_search h_pg_levelsum',\n 'greedy_best_first_graph_search h_pg_maxlevel',\n 'greedy_best_first_graph_search h_pg_setlevel',\n 'astar_search h_unmet_goals',\n 'astar_search h_pg_levelsum',\n 'astar_search h_pg_maxlevel',\n 'astar_search h_pg_setlevel']\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(),\n acp.air_cargo_p3(), acp.air_cargo_p4()]\n\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [i+1 for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs],\n 'Planes': [len(p.planes) for p in Probs],\n 'Airports': [len(p.airports) for p in Probs],\n 'Goal': [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\nspecs = get_prob_specs()\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n #else:\n # print(f'File {fname} not replaced.')\n return\n \n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n \n t = '\\t'\n \n # input/output file suffixes:\n sfx = ['.csv', '_df.csv']\n \n # Try retrieving it from out_dir if not replacing it:\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n # else: (re)process\n \n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n \n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n \n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x)+1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n \n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n \n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n \n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n \n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n \n # Cols to add:\n val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n \n # Rename cols: c (temp) -> Searcher\n df.columns = ['c', 'Searcher']\n # Add new cols & reindex\n df = df.reindex(columns = df.columns.tolist() + val_cols)\n \n # Populate new cols according to row with search name:\n sr = df.loc[df.c == 'Searcher', 'Searcher'] \n for (idx, sr_row) in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n\n df.dropna(inplace=True)\n # Add a minute column:\n df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)\n \n # Replace values of 1st col with problem name & update col name:\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n \n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n # reduced\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n # add col for function name\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n # reorder cols\n dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',\n 'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]\n\n # complete runs only:\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n \n dfa_rows = dfa.shape[0]\n \n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n\n # get the html string:\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n \n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n\n pct_plans = n_plans/dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn/n_plans\n\n text = f\"Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>\"\n text += f\"In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.\"\n if len(uniq_probs) < 4:\n text += \" And this occurs only for Problems: \"\n pro = \",\".join('{}' for p in uniq_probs) +'.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += \" And this occurs for all Problems.\"\n text += \"<br>\"\n \n return df_fn_html, text, dfout\n\ndef make_bar_plots(df_list,\n x_col, y_col,\n problems,\n legend_bbox=(.05, .95),\n to_file='',\n show=False,\n excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\" \n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1>0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2>0]\n assert len(a1) == len(a2) == 1\n \n action_nums = [a1[0], a2[0]]\n \n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n \n # Seach functions names should be common to all dfs:\n search = df_list[0].Searcher.tolist()\n \n # Sample cmap according to categories:\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i*m] for i in range(s_len)]\n \n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))\n \n # Use the minutes columns for the more complex problems:\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4: # applies to problems 3/4\n y_col = 'Minutes'\n else:\n ty_col = y_col\n \n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',\n y = 1.05, fontsize=14)\n\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n # log scale on NewNodes for df2, df3, df4:\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n \n axs[i].set_ylabel(ylab, fontsize=12)\n\n df[y_col].plot.bar(ax=axs[i], logy=ylog,\n color=colors,\n legend=False)\n \n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n # Modify the legend to indicate excluded searches\n # (bc colormap is identical to fig1/2, but some runs have no data).\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [excluded[i][0]-1 for i in range(excluded_len)]\n \n legend_patches = [] \n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n \n axs[1].legend(handles=legend_patches,\n title=legt,\n title_fontsize='14',\n fontsize='medium', \n bbox_to_anchor=legend_bbox, \n loc='upper left',\n labelspacing=0.6,\n fancybox=True)\n\n plt.tight_layout()\n \n if to_file:\n plt.savefig(to_file)\n \n if show:\n return axs\n\n\ndef format_multiples(multi):\n s = ''\n for i in range(len(multi)):\n s += '{'+ str(i) +':s}, '\n s = s[:-2]\n return '[' + s.format(*multi.values) + ']'\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n\n # orders of magnitude difference btw dfB and dfA (min, max):\n mag = np.round(np.log(colA_larger_values/colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n\n # get the indices of values above average:\n ma = mag[mag > mag_aver].index.tolist()\n \n # get the names of all searchers corresponding to the ma:\n above_multiples = (mag_aver, df2.loc[ma, 'Searcher'])\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=False):\n\n p1 = df1.loc[0,'Problem'][-1]\n p2 = df2.loc[0,'Problem'][-1]\n \n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n \n headinglc = heading.lower()\n text = f\"\"\"<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, \"\"\"\n text += f\"the <i>average</i> order of magnitude difference in {headinglc} is \"\n text += f\"<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>\"\n\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1,'Problem']!= problems[0]:\n return\n \n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(time_above[:time_order_av]))\n # return their 1-base index also:\n out = [(SEARCHES.index(c)+1, c) for c in elim_candidates]\n return out\n\n \ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n\n elim_list = \"\"\n for i, c in candidates_tup:\n elim_list += f\"<dt><b>{i:>2}: {c}</b></dt>\"\n \n text = \"\"\"<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">\"\"\"\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f\"<dl>{elim_list}</dl></p></pre>\"\n \n if return_html:\n return text\n else:\n return Markdown(text) \n\n \ndef add_div_around_html(div_html_text, output_string=False, div_style=\"{width: 80%}\"):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f\"\"\"<div style=\"{div_style}\">{div_html_text}</div>\"\"\"\n if output_string:\n return div\n #get_ipython().set_next_input(div, 'markdown')\n else:\n return Markdown(div)",
"step-ids": [
6,
12,
14,
16,
17
]
}
|
[
6,
12,
14,
16,
17
] |
"""
Simple neural network using pytorch
"""
import torch
import torch.nn as nn
# Prepare the data
# X represents the amount of hours studied and how much time students spent sleeping
X = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float) # 3 X 2 tensor
# y represent grades.
y = torch.tensor(([92], [100], [89]), dtype=torch.float) # 3 X 1 tensor
# xPredicted is a single input for which we want to predict a grade using
# the parameters learned by the neural network.
xPredicted = torch.tensor(([4, 8]), dtype=torch.float) # 1 X 2 tensor
# Scale units
breakpoint()
X_max, index1 = torch.max(X, 0)
xPredicted_max, index2 = torch.max(xPredicted, 0)
X = torch.div(X, X_max)
xPredicted = torch.div(xPredicted, xPredicted_max)
y = y / 100 # max test score is 100
print("X_max:", X_max)
print("xPredicted_max:", xPredicted_max)
print("X:", X)
print("y:", y)
print("xPredicted:", xPredicted)
class Neural_Network(nn.Module):
"""Neural network class"""
def __init__(self, input_size=2, output_size=1, hidden_size=3):
super(Neural_Network, self).__init__()
# parameters
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
# weights
self.W1 = torch.randn(self.input_size, self.hidden_size) # 3 X 2 tensor
self.W2 = torch.randn(self.hidden_size, self.output_size) # 3 X 1 tensor
def forward(self, X):
"""forward calculation"""
self.z = torch.matmul(X, self.W1) # 3 X 3 ".dot" does not broadcast in PyTorch
self.z2 = self.sigmoid(self.z) # activation function
self.z3 = torch.matmul(self.z2, self.W2)
o = self.sigmoid(self.z3) # final activation function
return o
def backward(self, X, y, o):
"""backward calculation"""
self.o_error = y - o # error in output
self.o_delta = self.o_error * self.sigmoid_prime(o) # derivative of sig to error
self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))
self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)
self.W1 += torch.matmul(torch.t(X), self.z2_delta)
self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)
def sigmoid(self, s):
"""calculate sigmoid"""
return 1 / (1 + torch.exp(-s))
def sigmoid_prime(self, s):
"""calculate derivative of sigmoid"""
return s * (1 - s)
def train(self, X, y):
# forward + backward pass for training
o = self.forward(X)
self.backward(X, y, o)
def save_weights(self, model):
# we will use the PyTorch internal storage functions
torch.save(model, "NN")
# you can reload model with all the weights and so forth with:
# torch.load("NN")
def predict(self):
"""predict"""
# @TODO: should be passed in as argument
print ("Predicted data based on trained weights: ")
print ("Input (scaled): \n" + str(xPredicted))
print ("Output: \n" + str(self.forward(xPredicted)))
NN = Neural_Network()
epoch = 1000
for i in range(epoch): # trains the NN epoch times
#print ("#" + str(i) + " Loss: " + str(torch.mean((y - NN(X))**2).detach().item())) # mean sum squared loss
NN.train(X, y)
NN.save_weights(NN)
NN.predict()
|
normal
|
{
"blob_id": "2d5e7c57f58f189e8d0c7d703c1672ea3586e4ac",
"index": 6771,
"step-1": "<mask token>\n\n\nclass Neural_Network(nn.Module):\n <mask token>\n\n def __init__(self, input_size=2, output_size=1, hidden_size=3):\n super(Neural_Network, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.W1 = torch.randn(self.input_size, self.hidden_size)\n self.W2 = torch.randn(self.hidden_size, self.output_size)\n\n def forward(self, X):\n \"\"\"forward calculation\"\"\"\n self.z = torch.matmul(X, self.W1)\n self.z2 = self.sigmoid(self.z)\n self.z3 = torch.matmul(self.z2, self.W2)\n o = self.sigmoid(self.z3)\n return o\n\n def backward(self, X, y, o):\n \"\"\"backward calculation\"\"\"\n self.o_error = y - o\n self.o_delta = self.o_error * self.sigmoid_prime(o)\n self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)\n self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n\n def sigmoid(self, s):\n \"\"\"calculate sigmoid\"\"\"\n return 1 / (1 + torch.exp(-s))\n\n def sigmoid_prime(self, s):\n \"\"\"calculate derivative of sigmoid\"\"\"\n return s * (1 - s)\n\n def train(self, X, y):\n o = self.forward(X)\n self.backward(X, y, o)\n\n def save_weights(self, model):\n torch.save(model, 'NN')\n\n def predict(self):\n \"\"\"predict\"\"\"\n print('Predicted data based on trained weights: ')\n print('Input (scaled): \\n' + str(xPredicted))\n print('Output: \\n' + str(self.forward(xPredicted)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Neural_Network(nn.Module):\n \"\"\"Neural network class\"\"\"\n\n def __init__(self, input_size=2, output_size=1, hidden_size=3):\n super(Neural_Network, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.W1 = torch.randn(self.input_size, self.hidden_size)\n self.W2 = torch.randn(self.hidden_size, self.output_size)\n\n def forward(self, X):\n \"\"\"forward calculation\"\"\"\n self.z = torch.matmul(X, self.W1)\n self.z2 = self.sigmoid(self.z)\n self.z3 = torch.matmul(self.z2, self.W2)\n o = self.sigmoid(self.z3)\n return o\n\n def backward(self, X, y, o):\n \"\"\"backward calculation\"\"\"\n self.o_error = y - o\n self.o_delta = self.o_error * self.sigmoid_prime(o)\n self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)\n self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n\n def sigmoid(self, s):\n \"\"\"calculate sigmoid\"\"\"\n return 1 / (1 + torch.exp(-s))\n\n def sigmoid_prime(self, s):\n \"\"\"calculate derivative of sigmoid\"\"\"\n return s * (1 - s)\n\n def train(self, X, y):\n o = self.forward(X)\n self.backward(X, y, o)\n\n def save_weights(self, model):\n torch.save(model, 'NN')\n\n def predict(self):\n \"\"\"predict\"\"\"\n print('Predicted data based on trained weights: ')\n print('Input (scaled): \\n' + str(xPredicted))\n print('Output: \\n' + str(self.forward(xPredicted)))\n\n\n<mask token>\n",
"step-3": "<mask token>\nX = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float)\ny = torch.tensor(([92], [100], [89]), dtype=torch.float)\nxPredicted = torch.tensor([4, 8], dtype=torch.float)\nbreakpoint()\nX_max, index1 = torch.max(X, 0)\nxPredicted_max, index2 = torch.max(xPredicted, 0)\nX = torch.div(X, X_max)\nxPredicted = torch.div(xPredicted, xPredicted_max)\ny = y / 100\nprint('X_max:', X_max)\nprint('xPredicted_max:', xPredicted_max)\nprint('X:', X)\nprint('y:', y)\nprint('xPredicted:', xPredicted)\n\n\nclass Neural_Network(nn.Module):\n \"\"\"Neural network class\"\"\"\n\n def __init__(self, input_size=2, output_size=1, hidden_size=3):\n super(Neural_Network, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.W1 = torch.randn(self.input_size, self.hidden_size)\n self.W2 = torch.randn(self.hidden_size, self.output_size)\n\n def forward(self, X):\n \"\"\"forward calculation\"\"\"\n self.z = torch.matmul(X, self.W1)\n self.z2 = self.sigmoid(self.z)\n self.z3 = torch.matmul(self.z2, self.W2)\n o = self.sigmoid(self.z3)\n return o\n\n def backward(self, X, y, o):\n \"\"\"backward calculation\"\"\"\n self.o_error = y - o\n self.o_delta = self.o_error * self.sigmoid_prime(o)\n self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)\n self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n\n def sigmoid(self, s):\n \"\"\"calculate sigmoid\"\"\"\n return 1 / (1 + torch.exp(-s))\n\n def sigmoid_prime(self, s):\n \"\"\"calculate derivative of sigmoid\"\"\"\n return s * (1 - s)\n\n def train(self, X, y):\n o = self.forward(X)\n self.backward(X, y, o)\n\n def save_weights(self, model):\n torch.save(model, 'NN')\n\n def predict(self):\n \"\"\"predict\"\"\"\n print('Predicted data based on trained weights: ')\n print('Input (scaled): \\n' + str(xPredicted))\n print('Output: \\n' + str(self.forward(xPredicted)))\n\n\nNN = Neural_Network()\nepoch = 1000\nfor i in range(epoch):\n NN.train(X, y)\nNN.save_weights(NN)\nNN.predict()\n",
"step-4": "<mask token>\nimport torch\nimport torch.nn as nn\nX = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float)\ny = torch.tensor(([92], [100], [89]), dtype=torch.float)\nxPredicted = torch.tensor([4, 8], dtype=torch.float)\nbreakpoint()\nX_max, index1 = torch.max(X, 0)\nxPredicted_max, index2 = torch.max(xPredicted, 0)\nX = torch.div(X, X_max)\nxPredicted = torch.div(xPredicted, xPredicted_max)\ny = y / 100\nprint('X_max:', X_max)\nprint('xPredicted_max:', xPredicted_max)\nprint('X:', X)\nprint('y:', y)\nprint('xPredicted:', xPredicted)\n\n\nclass Neural_Network(nn.Module):\n \"\"\"Neural network class\"\"\"\n\n def __init__(self, input_size=2, output_size=1, hidden_size=3):\n super(Neural_Network, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.W1 = torch.randn(self.input_size, self.hidden_size)\n self.W2 = torch.randn(self.hidden_size, self.output_size)\n\n def forward(self, X):\n \"\"\"forward calculation\"\"\"\n self.z = torch.matmul(X, self.W1)\n self.z2 = self.sigmoid(self.z)\n self.z3 = torch.matmul(self.z2, self.W2)\n o = self.sigmoid(self.z3)\n return o\n\n def backward(self, X, y, o):\n \"\"\"backward calculation\"\"\"\n self.o_error = y - o\n self.o_delta = self.o_error * self.sigmoid_prime(o)\n self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)\n self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n\n def sigmoid(self, s):\n \"\"\"calculate sigmoid\"\"\"\n return 1 / (1 + torch.exp(-s))\n\n def sigmoid_prime(self, s):\n \"\"\"calculate derivative of sigmoid\"\"\"\n return s * (1 - s)\n\n def train(self, X, y):\n o = self.forward(X)\n self.backward(X, y, o)\n\n def save_weights(self, model):\n torch.save(model, 'NN')\n\n def predict(self):\n \"\"\"predict\"\"\"\n print('Predicted data based on trained weights: ')\n print('Input (scaled): \\n' + str(xPredicted))\n print('Output: \\n' + str(self.forward(xPredicted)))\n\n\nNN = Neural_Network()\nepoch = 1000\nfor i in range(epoch):\n NN.train(X, y)\nNN.save_weights(NN)\nNN.predict()\n",
"step-5": "\"\"\"\nSimple neural network using pytorch\n\"\"\"\nimport torch\nimport torch.nn as nn\n\n# Prepare the data\n\n# X represents the amount of hours studied and how much time students spent sleeping\nX = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float) # 3 X 2 tensor\n# y represent grades. \ny = torch.tensor(([92], [100], [89]), dtype=torch.float) # 3 X 1 tensor\n# xPredicted is a single input for which we want to predict a grade using \n# the parameters learned by the neural network.\nxPredicted = torch.tensor(([4, 8]), dtype=torch.float) # 1 X 2 tensor\n\n# Scale units\nbreakpoint()\nX_max, index1 = torch.max(X, 0)\nxPredicted_max, index2 = torch.max(xPredicted, 0)\n\nX = torch.div(X, X_max)\nxPredicted = torch.div(xPredicted, xPredicted_max)\ny = y / 100 # max test score is 100\n\nprint(\"X_max:\", X_max)\nprint(\"xPredicted_max:\", xPredicted_max)\nprint(\"X:\", X)\nprint(\"y:\", y)\nprint(\"xPredicted:\", xPredicted)\n\nclass Neural_Network(nn.Module):\n \"\"\"Neural network class\"\"\"\n def __init__(self, input_size=2, output_size=1, hidden_size=3):\n super(Neural_Network, self).__init__()\n # parameters\n\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n \n # weights\n self.W1 = torch.randn(self.input_size, self.hidden_size) # 3 X 2 tensor\n self.W2 = torch.randn(self.hidden_size, self.output_size) # 3 X 1 tensor\n \n def forward(self, X):\n \"\"\"forward calculation\"\"\"\n self.z = torch.matmul(X, self.W1) # 3 X 3 \".dot\" does not broadcast in PyTorch\n self.z2 = self.sigmoid(self.z) # activation function\n self.z3 = torch.matmul(self.z2, self.W2)\n o = self.sigmoid(self.z3) # final activation function\n return o\n\n def backward(self, X, y, o):\n \"\"\"backward calculation\"\"\"\n self.o_error = y - o # error in output\n self.o_delta = self.o_error * self.sigmoid_prime(o) # derivative of sig to error\n self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)\n self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n \n def sigmoid(self, s):\n \"\"\"calculate sigmoid\"\"\"\n return 1 / (1 + torch.exp(-s))\n \n def sigmoid_prime(self, s):\n \"\"\"calculate derivative of sigmoid\"\"\"\n return s * (1 - s)\n \n def train(self, X, y):\n # forward + backward pass for training\n o = self.forward(X)\n self.backward(X, y, o)\n \n def save_weights(self, model):\n # we will use the PyTorch internal storage functions\n torch.save(model, \"NN\")\n # you can reload model with all the weights and so forth with:\n # torch.load(\"NN\")\n \n def predict(self):\n \"\"\"predict\"\"\"\n # @TODO: should be passed in as argument\n print (\"Predicted data based on trained weights: \")\n print (\"Input (scaled): \\n\" + str(xPredicted))\n print (\"Output: \\n\" + str(self.forward(xPredicted)))\n \n\nNN = Neural_Network()\nepoch = 1000\nfor i in range(epoch): # trains the NN epoch times\n #print (\"#\" + str(i) + \" Loss: \" + str(torch.mean((y - NN(X))**2).detach().item())) # mean sum squared loss\n NN.train(X, y)\nNN.save_weights(NN)\nNN.predict()",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
#!/usr/bin python
import socket
import json
import threading
import sys
from db_util import DBUtil
from cryptoLib import AesCtr,Hmac
class Client(threading.Thread):
def __init__(self, (client_conn, client_addr), sema):
threading.Thread.__init__(self)
self.client_conn = client_conn
self.client_addr = client_addr
self.size = 4096
self.len_of_mac = 12
self.sema = sema
def run(self):
while True:
dataRaw = None
try:
dataRaw = self.client_conn.recv(self.size)
iv,dataEnc,dataHmac=dataRaw.split("nonce")
dataAuth=verHmac(dataEnc,dataHmac)
if not dataAuth:
continue
else:
dataChecked=decrypt(dataEnc,iv)
except socket.error, e:
print(e.message)
if dataRaw is not None:
try:
data = json.loads(dataChecked)
print("Received : " + str(data))
dbutil = DBUtil()
self.sema.acquire()
dbutil.update_database(data)
self.sema.release()
except ValueError:
continue
self.client_conn.close()
break
def verHmac(dataHmac,dataEnc):
hmObj1=Hmac(dataEnc)
l=hmObj1.verifyHmac(dataHmac)
return l
def decrypt(dataEnc,iv):
e2=AesCtr()
unEnc=e2.decryptData(enc,iv)
class Receiver:
def __init__(self,port):
self.host ="127.0.0.1"
#why not "127.0.0.1"
self.port = port
self.threads = list()
self.udp_sock = None
self.semaphore = threading.Semaphore(1)
def get_ip_address(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def create_socket(self):
try:
self.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udp_sock.bind((self.host, self.port))
#self.udp_sock.listen(2)
except socket.error:
if self.udp_sock:
self.udp_sock.close()
print("Failure to open socket")
sys.exit(1)
def run(self):
self.create_socket()
while True:
client = Client(self.udp_sock.accept(), self.semaphore)
client.start()
self.threads.append(client)
def main():
receiver = Receiver(49999)
receiver.run()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "1338d6578a94338c6e75acc025ddddd14097ee10",
"index": 2044,
"step-1": "#!/usr/bin python\n\nimport socket\nimport json\nimport threading\nimport sys\nfrom db_util import DBUtil\nfrom cryptoLib import AesCtr,Hmac\n\n\nclass Client(threading.Thread):\n\tdef __init__(self, (client_conn, client_addr), sema):\n\t\tthreading.Thread.__init__(self)\n\t\tself.client_conn = client_conn\n\t\tself.client_addr = client_addr\n\t\tself.size = 4096\n\t\tself.len_of_mac = 12\n\t\tself.sema = sema\n\n\n\tdef run(self):\n\t\twhile True:\n\t\t\tdataRaw = None\n\t\t\ttry:\n\t\t\t\tdataRaw = self.client_conn.recv(self.size)\n\t\t\t\tiv,dataEnc,dataHmac=dataRaw.split(\"nonce\")\n\t\t\t\tdataAuth=verHmac(dataEnc,dataHmac)\n\t\t\t\tif not dataAuth:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tdataChecked=decrypt(dataEnc,iv)\n\t\t\texcept socket.error, e:\n\t\t\t\tprint(e.message)\n\n\t\t\tif dataRaw is not None:\n\t\t\t\ttry:\n\t\t\t\t\tdata = json.loads(dataChecked)\n\t\t\t\t\tprint(\"Received : \" + str(data))\n\t\t\t\t\tdbutil = DBUtil()\n\t\t\t\t\tself.sema.acquire()\n\t\t\t\t\tdbutil.update_database(data)\n\t\t\t\t\tself.sema.release()\n\t\t\t\texcept ValueError:\n\t\t\t\t\tcontinue\n\n\t\t\tself.client_conn.close()\n\t\t\tbreak\n\tdef verHmac(dataHmac,dataEnc):\n\t\thmObj1=Hmac(dataEnc)\n\t\tl=hmObj1.verifyHmac(dataHmac)\n\t\treturn l\n\t\tdef decrypt(dataEnc,iv):\n\t\te2=AesCtr()\n\t\tunEnc=e2.decryptData(enc,iv)\n\n\n\nclass Receiver:\n\tdef __init__(self,port):\n\t\tself.host =\"127.0.0.1\"\n\t\t#why not \"127.0.0.1\"\n\t\tself.port = port\n\t\tself.threads = list()\n\t\tself.udp_sock = None\n\t\tself.semaphore = threading.Semaphore(1)\n\n\tdef get_ip_address(self):\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\ts.connect((\"8.8.8.8\", 80))\n\t\treturn s.getsockname()[0]\n\n\n\tdef create_socket(self):\n\t\ttry:\n\t\t\tself.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\t\tself.udp_sock.bind((self.host, self.port))\n\t\t\t#self.udp_sock.listen(2)\n\t\texcept socket.error:\n\t\t\tif self.udp_sock:\n\t\t\t\tself.udp_sock.close()\n\t\t\tprint(\"Failure to open socket\")\n\t\t\tsys.exit(1)\n\n\tdef run(self):\n\t\tself.create_socket()\n\t\twhile True:\n\t\t\tclient = Client(self.udp_sock.accept(), self.semaphore)\n\t\t\tclient.start()\n\t\t\tself.threads.append(client)\n\ndef main():\n\treceiver = Receiver(49999)\n\treceiver.run()\n\nif __name__ == '__main__':\n\tmain()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# pylama:ignore=E221,E251
from setuptools import find_packages, setup
setup(
name = 'coding_exercises',
version = '1.0',
description = 'Coding Exercises in Python',
author = 'Gustavo Gama',
author_email = '[email protected]',
url = 'https://gama.igenesis.com.br',
packages = find_packages()
)
|
normal
|
{
"blob_id": "5f4abc7e9397034737ee214b0d0aae39ebf1548b",
"index": 8098,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='coding_exercises', version='1.0', description=\n 'Coding Exercises in Python', author='Gustavo Gama', author_email=\n '[email protected]', url='https://gama.igenesis.com.br', packages=\n find_packages())\n",
"step-3": "from setuptools import find_packages, setup\nsetup(name='coding_exercises', version='1.0', description=\n 'Coding Exercises in Python', author='Gustavo Gama', author_email=\n '[email protected]', url='https://gama.igenesis.com.br', packages=\n find_packages())\n",
"step-4": "#!/usr/bin/env python\n# pylama:ignore=E221,E251\n\nfrom setuptools import find_packages, setup\n\nsetup(\n name = 'coding_exercises',\n version = '1.0',\n description = 'Coding Exercises in Python',\n author = 'Gustavo Gama',\n author_email = '[email protected]',\n url = 'https://gama.igenesis.com.br',\n packages = find_packages()\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import urllib.request
import json
def kind():
data={}
with open("dataset.json", "r") as read_file:
data = json.load(read_file)
return data["kind"]
def items():
data={}
with open("dataset.json", "r") as read_file:
data = json.load(read_file)
return data["items"]
#Can add a bunch of other things after refering to data
|
normal
|
{
"blob_id": "630480e9458491a26ea9060bd36541a0d5805a11",
"index": 647,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef kind():\n data = {}\n with open('dataset.json', 'r') as read_file:\n data = json.load(read_file)\n return data['kind']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef kind():\n data = {}\n with open('dataset.json', 'r') as read_file:\n data = json.load(read_file)\n return data['kind']\n\n\ndef items():\n data = {}\n with open('dataset.json', 'r') as read_file:\n data = json.load(read_file)\n return data['items']\n",
"step-4": "import urllib.request\nimport json\n\n\ndef kind():\n data = {}\n with open('dataset.json', 'r') as read_file:\n data = json.load(read_file)\n return data['kind']\n\n\ndef items():\n data = {}\n with open('dataset.json', 'r') as read_file:\n data = json.load(read_file)\n return data['items']\n",
"step-5": "import urllib.request\nimport json\n\ndef kind():\n data={}\n with open(\"dataset.json\", \"r\") as read_file:\n data = json.load(read_file)\n return data[\"kind\"]\n\ndef items():\n data={}\n with open(\"dataset.json\", \"r\") as read_file:\n data = json.load(read_file)\n return data[\"items\"]\n\n#Can add a bunch of other things after refering to data\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Helpers for FormatCBFMiniPilatus..."""
from __future__ import annotations
import calendar
import time
def get_pilatus_timestamp(timestamp_string):
if "." in timestamp_string:
timestamp, milliseconds = timestamp_string.split(".")
else:
timestamp = timestamp_string
milliseconds = "000"
for format in ["%Y-%b-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S", "%Y/%b/%d %H:%M:%S"]:
try:
struct_time = time.strptime(timestamp, format)
return calendar.timegm(struct_time) + float("0." + milliseconds)
except Exception:
pass
raise RuntimeError("timestamp %s not recognised" % timestamp)
|
normal
|
{
"blob_id": "21526dabe8456c599e4409228fa69ffd0d672c5b",
"index": 4689,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_pilatus_timestamp(timestamp_string):\n if '.' in timestamp_string:\n timestamp, milliseconds = timestamp_string.split('.')\n else:\n timestamp = timestamp_string\n milliseconds = '000'\n for format in ['%Y-%b-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S',\n '%Y/%b/%d %H:%M:%S']:\n try:\n struct_time = time.strptime(timestamp, format)\n return calendar.timegm(struct_time) + float('0.' + milliseconds)\n except Exception:\n pass\n raise RuntimeError('timestamp %s not recognised' % timestamp)\n",
"step-3": "<mask token>\nfrom __future__ import annotations\nimport calendar\nimport time\n\n\ndef get_pilatus_timestamp(timestamp_string):\n if '.' in timestamp_string:\n timestamp, milliseconds = timestamp_string.split('.')\n else:\n timestamp = timestamp_string\n milliseconds = '000'\n for format in ['%Y-%b-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S',\n '%Y/%b/%d %H:%M:%S']:\n try:\n struct_time = time.strptime(timestamp, format)\n return calendar.timegm(struct_time) + float('0.' + milliseconds)\n except Exception:\n pass\n raise RuntimeError('timestamp %s not recognised' % timestamp)\n",
"step-4": "\"\"\"Helpers for FormatCBFMiniPilatus...\"\"\"\n\n\nfrom __future__ import annotations\n\nimport calendar\nimport time\n\n\ndef get_pilatus_timestamp(timestamp_string):\n if \".\" in timestamp_string:\n timestamp, milliseconds = timestamp_string.split(\".\")\n else:\n timestamp = timestamp_string\n milliseconds = \"000\"\n\n for format in [\"%Y-%b-%dT%H:%M:%S\", \"%Y-%m-%dT%H:%M:%S\", \"%Y/%b/%d %H:%M:%S\"]:\n\n try:\n struct_time = time.strptime(timestamp, format)\n return calendar.timegm(struct_time) + float(\"0.\" + milliseconds)\n\n except Exception:\n pass\n\n raise RuntimeError(\"timestamp %s not recognised\" % timestamp)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Handprint module for handling credentials.
Authors
-------
Michael Hucka <[email protected]> -- Caltech Library
Copyright
---------
Copyright (c) 2018-2022 by the California Institute of Technology. This code
is open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
from .base import Credentials
from .amazon_auth import AmazonCredentials
from .google_auth import GoogleCredentials
from .microsoft_auth import MicrosoftCredentials
|
normal
|
{
"blob_id": "7e29220752b4a52be34cdf0c734695d1052d0414",
"index": 9309,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfrom .base import Credentials\nfrom .amazon_auth import AmazonCredentials\nfrom .google_auth import GoogleCredentials\nfrom .microsoft_auth import MicrosoftCredentials\n",
"step-3": "'''\nHandprint module for handling credentials.\n\nAuthors\n-------\n\nMichael Hucka <[email protected]> -- Caltech Library\n\nCopyright\n---------\n\nCopyright (c) 2018-2022 by the California Institute of Technology. This code\nis open-source software released under a 3-clause BSD license. Please see the\nfile \"LICENSE\" for more information.\n'''\n\nfrom .base import Credentials\nfrom .amazon_auth import AmazonCredentials\nfrom .google_auth import GoogleCredentials\nfrom .microsoft_auth import MicrosoftCredentials\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
'''
3、 编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n
'''
def f(n):
if n%2==0:
sum=0
for x in range(2,n+1,2):
sum+=1/x
print(sum)
if n%2!=0:
sum=0
for x in range(1,n+1,2):
sum+=1/x
print(sum)
|
normal
|
{
"blob_id": "69cf28d32e6543271a0855d61a76808b03c06891",
"index": 4805,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(n):\n if n % 2 == 0:\n sum = 0\n for x in range(2, n + 1, 2):\n sum += 1 / x\n print(sum)\n if n % 2 != 0:\n sum = 0\n for x in range(1, n + 1, 2):\n sum += 1 / x\n print(sum)\n",
"step-3": "'''\n3、\t编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n\n'''\n\ndef f(n):\n if n%2==0:\n sum=0\n for x in range(2,n+1,2):\n sum+=1/x\n print(sum)\n if n%2!=0:\n sum=0\n for x in range(1,n+1,2):\n sum+=1/x\n print(sum)\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding: utf-8
# 02. 「パトカー」+「タクシー」=「パタトクカシーー」
# 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.
s1 = "パトカー"
s2 = "タクシー"
ans = ""
for c1, c2 in zip(s1, s2):
ans += c1 + c2
print(ans)
#パタトクカシーー
|
normal
|
{
"blob_id": "4d7e30714ae209e1d09d895dadf7a19928fe253f",
"index": 6623,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor c1, c2 in zip(s1, s2):\n ans += c1 + c2\nprint(ans)\n",
"step-3": "s1 = 'パトカー'\ns2 = 'タクシー'\nans = ''\nfor c1, c2 in zip(s1, s2):\n ans += c1 + c2\nprint(ans)\n",
"step-4": "# coding: utf-8\n\n# 02. 「パトカー」+「タクシー」=「パタトクカシーー」\n# 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.\n\ns1 = \"パトカー\"\ns2 = \"タクシー\"\n\nans = \"\"\nfor c1, c2 in zip(s1, s2):\n ans += c1 + c2\n\nprint(ans)\n#パタトクカシーー\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import division, print_function, absolute_import
import numbers
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from .base import check_frame
from skutil.base import overrides
from sklearn.externals import six
from sklearn.base import _pprint
from sklearn.utils.fixes import signature, bincount
from sklearn.utils import check_random_state
from math import ceil, floor
try:
from h2o import H2OEstimator
except ImportError:
from h2o.estimators.estimator_base import H2OEstimator
try:
from sklearn.model_selection import KFold
SK18 = True
except ImportError:
from sklearn.cross_validation import KFold
SK18 = False
__all__ = [
'check_cv',
'h2o_train_test_split',
'H2OKFold',
'H2OShuffleSplit',
'H2OStratifiedKFold',
'H2OStratifiedShuffleSplit'
]
def _build_repr(self):
# XXX This is copied from sklearn.BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_signature = signature(init)
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
def check_cv(cv=3):
"""Checks the ``cv`` parameter to determine
whether it's a valid int or H2OBaseCrossValidator.
Parameters
----------
cv : int or H2OBaseCrossValidator, optional (default=3)
The number of folds or the H2OBaseCrossValidator
instance.
Returns
-------
cv : H2OBaseCrossValidator
The instance of H2OBaseCrossValidator
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
return H2OKFold(cv)
if not isinstance(cv, H2OBaseCrossValidator):
raise ValueError('expected int or instance of '
'H2OBaseCrossValidator but got %s'
% type(cv))
return cv
def h2o_train_test_split(frame, test_size=None, train_size=None, random_state=None, stratify=None):
"""Splits an H2OFrame into random train and test subsets
Parameters
----------
frame : H2OFrame
The h2o frame to split
test_size : float, int, or None (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25
train_size : float, int, or None (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : str or None (default=None)
The name of the target on which to stratify the sampling
Returns
-------
out : tuple, shape=(2,)
training_frame : H2OFrame
The training fold split
testing_frame : H2OFrame
The testing fold split
"""
frame = check_frame(frame, copy=False)
if test_size is None and train_size is None:
test_size = 0.25
if stratify is not None:
CVClass = H2OStratifiedShuffleSplit
else:
CVClass = H2OShuffleSplit
cv = CVClass(n_splits=2,
test_size=test_size,
train_size=train_size,
random_state=random_state)
# for the h2o one, we only need iter 0
tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]
# h2o "doesn't reorder rows" so we need to keep these sorted...
train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))
out = (
frame[train, :],
frame[test, :]
)
return out
# Avoid a pb with nosetests...
h2o_train_test_split.__test__ = False
def _val_y(y):
if isinstance(y, six.string_types):
return str(y)
elif y is None:
return y
raise TypeError('y must be a string. Got %s' % y)
class H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):
"""Base class for H2O cross validation operations.
All implementing subclasses should override ``get_n_splits``
and ``_iter_test_indices``.
"""
def __init__(self):
pass
def split(self, frame, y=None):
"""Generate indices to split data into training and test.
Parameters
----------
frame : ``H2OFrame``
The h2o frame to split
y : str, optional (default=None)
The name of the column to stratify, if applicable.
Returns
-------
train : ndarray
The training set indices for the split
test : ndarray
The testing set indices for that split
"""
frame = check_frame(frame, copy=False)
indices = np.arange(frame.shape[0])
for test_index in self._iter_test_masks(frame, y):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
# h2o can't handle anything but lists...
yield list(train_index), list(test_index)
def _iter_test_masks(self, frame, y=None):
"""Generates boolean masks corresponding to the tests set.
Parameters
----------
frame : H2OFrame
The h2o frame to split
y : string, optional (default=None)
The column to stratify.
Returns
-------
test_mask : np.ndarray, shape=(n_samples,)
The indices for the test split
"""
for test_index in self._iter_test_indices(frame, y):
test_mask = np.zeros(frame.shape[0], dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, frame, y=None):
raise NotImplementedError('this method must be implemented by a subclass')
@abstractmethod
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the cross validator.
"""
pass
def __repr__(self):
return _build_repr(self)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
raise ValueError('Invalid value for test_size: %r' % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError(
'train_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f'
'should be smaller than 1.0. Reduce test_size '
'and/or train_size.' % (train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
raise ValueError('Invalid value for train_size: %r' % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
if test_size is not None and np.asarray(test_size).dtype.kind == 'i' and test_size >= n_samples:
raise ValueError('test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n_samples))
if train_size is not None and np.asarray(train_size).dtype.kind == 'i' and train_size >= n_samples:
raise ValueError('train_size=%d should be smaller '
'than the number of samples %d' % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size=%d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):
"""Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This
is used for ``h2o_train_test_split`` in strategic train/test splits of
H2OFrames. Implementing subclasses should override ``_iter_indices``.
Parameters
----------
n_splits : int, optional (default=2)
The number of folds or splits in the split
test_size : float or int, optional (default=0.1)
The ratio of observations for the test fold
train_size : float or int, optional (default=None)
The ratio of observations for the train fold
random_state : int or RandomState, optional (default=None)
The random state for duplicative purposes.
"""
def __init__(self, n_splits=2, test_size=0.1, train_size=None, random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
for train, test in self._iter_indices(frame, y):
yield train, test
@abstractmethod
def _iter_indices(self, frame, y):
"""Abstract method for iterating the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
pass
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the shuffle split.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class H2OShuffleSplit(H2OBaseShuffleSplit):
"""Default shuffle splitter used for ``h2o_train_test_split``.
This shuffle split class will not perform any stratification, and
will simply shuffle indices and split into the number of specified
sub-frames.
"""
def _iter_indices(self, frame, y=None):
"""Iterate the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify. Since this class does
not perform stratification, ``y`` is unused.
Returns
-------
ind_train : np.ndarray, shape=(n_samples,)
The train indices
ind_test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):
"""Shuffle splitter used for ``h2o_train_test_split`` when stratified
option is specified. This shuffle split class will perform stratification.
"""
def _iter_indices(self, frame, y):
"""Iterate the indices with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
Returns
-------
train : np.ndarray, shape=(n_samples,)
The train indices
test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples,
self.test_size, self.train_size)
# need to validate y...
y = _val_y(y)
target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist())
classes, y_indices = np.unique(target, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError('The least populated class in y has only 1 '
'member, which is too few. The minimum number of labels '
'for any class cannot be less than 2.')
if n_train < n_classes:
raise ValueError('The train_size=%d should be greater than or '
'equal to the number of classes=%d' % (n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size=%d should be greater than or '
'equal to the number of classes=%d' % (n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int))
for _ in range(self.n_splits):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((target == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
# Might end up here with less samples in train and test than we asked
# for, due to rounding errors.
if len(train) + len(test) < n_train + n_test:
missing_indices = np.where(bincount(train + test, minlength=len(target)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
n_missing_train = n_train - len(train)
n_missing_test = n_test - len(test)
if n_missing_train > 0:
train.extend(missing_indices[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_indices[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedShuffleSplit, self).split(frame, y)
class _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):
"""Base class for KFold and Stratified KFold.
Parameters
----------
n_folds : int
The number of splits
shuffle : bool
Whether to shuffle indices
random_state : int or RandomState
The random state for the split
"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError('n_folds must be of Integral type. '
'%s of type %s was passed' % (n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError('k-fold cross-validation requires at least one '
'train/test split by setting n_folds=2 or more')
if shuffle not in [True, False]:
raise TypeError('shuffle must be True or False. Got %s (type=%s)'
% (str(shuffle), type(shuffle)))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
@overrides(H2OBaseCrossValidator)
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
frame = check_frame(frame, copy=False)
n_obs = frame.shape[0]
if self.n_folds > n_obs:
raise ValueError('Cannot have n_folds greater than n_obs')
for train, test in super(_H2OBaseKFold, self).split(frame, y):
yield train, test
@overrides(H2OBaseCrossValidator)
def get_n_splits(self):
"""Get the number of splits or folds.
Returns
-------
n_folds : int
The number of folds
"""
return self.n_folds
class H2OKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OKFold, self).__init__(n_folds, shuffle, random_state)
@overrides(_H2OBaseKFold)
def _iter_test_indices(self, frame, y=None):
n_obs = frame.shape[0]
indices = np.arange(n_obs)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = (n_obs // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_obs % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class H2OStratifiedKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame with
stratified splits.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state)
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedKFold, self).split(frame, y)
def _iter_test_masks(self, frame, y):
test_folds = self._make_test_folds(frame, y)
for i in range(self.n_folds):
yield test_folds == i
def _make_test_folds(self, frame, y):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# validate that it's a string
y = _val_y(y) # gets a string back or None
if y is None:
raise ValueError('H2OStratifiedKFold requires a target name (got None)')
target = frame[y].as_data_frame(use_pandas=True)[y].values
n_samples = target.shape[0]
unique_y, y_inversed = np.unique(target, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if np.all(self.n_folds > y_counts):
raise ValueError(('All the n_labels for individual classes'
' are less than %d folds.'
% self.n_folds), Warning)
if self.n_folds > min_labels:
warnings.warn(('The least populated class in y has only %d'
' members, which is too few. The minimum'
' number of labels for any class cannot'
' be less than n_folds=%d.'
% (min_labels, self.n_folds)), Warning)
# NOTE FROM SKLEARN:
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_folds)) as data to the KFold.
# Remember, however that we might be using the old-fold KFold which doesn't
# have a split method...
if SK18:
per_cls_cvs = [
KFold(self.n_folds, # using sklearn's KFold here
shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds)))
for count in y_counts
]
else:
per_cls_cvs = [
KFold(max(count, self.n_folds), # using sklearn's KFold here
self.n_folds,
shuffle=self.shuffle,
random_state=rng)
for count in y_counts
]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[target == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_folds)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[target == cls] = cls_test_folds
return test_folds
|
normal
|
{
"blob_id": "c59707ba07c1659d94684c54cdd7bb2658cba935",
"index": 6,
"step-1": "<mask token>\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n <mask token>\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n",
"step-2": "<mask token>\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n",
"step-3": "<mask token>\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError(\n 'expected int or instance of H2OBaseCrossValidator but got %s' %\n type(cv))\n return cv\n\n\n<mask token>\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\n<mask token>\n\n\ndef _validate_shuffle_split(n_samples, test_size, train_size):\n if test_size is not None and np.asarray(test_size\n ).dtype.kind == 'i' and test_size >= n_samples:\n raise ValueError(\n 'test_size=%d should be smaller than the number of samples %d' %\n (test_size, n_samples))\n if train_size is not None and np.asarray(train_size\n ).dtype.kind == 'i' and train_size >= n_samples:\n raise ValueError(\n 'train_size=%d should be smaller than the number of samples %d' %\n (train_size, n_samples))\n if np.asarray(test_size).dtype.kind == 'f':\n n_test = ceil(test_size * n_samples)\n elif np.asarray(test_size).dtype.kind == 'i':\n n_test = float(test_size)\n if train_size is None:\n n_train = n_samples - n_test\n elif np.asarray(train_size).dtype.kind == 'f':\n n_train = floor(train_size * n_samples)\n else:\n n_train = float(train_size)\n if test_size is None:\n n_test = n_samples - n_train\n if n_train + n_test > n_samples:\n raise ValueError(\n 'The sum of train_size and test_size=%d, should be smaller than the number of samples %d. Reduce test_size and/or train_size.'\n % (n_train + n_test, n_samples))\n return int(n_train), int(n_test)\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n",
"step-4": "<mask token>\n\n\ndef _build_repr(self):\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n init_signature = signature(init)\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values() if\n p.name != 'self' and p.kind != p.VAR_KEYWORD])\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter('always', DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError(\n 'expected int or instance of H2OBaseCrossValidator but got %s' %\n type(cv))\n return cv\n\n\ndef h2o_train_test_split(frame, test_size=None, train_size=None,\n random_state=None, stratify=None):\n \"\"\"Splits an H2OFrame into random train and test subsets\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n test_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the test split. If\n int, represents the absolute number of test samples. If None,\n the value is automatically set to the complement of the train size.\n If train size is also None, test size is set to 0.25\n\n train_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the absolute number of train samples. If None,\n the value is automatically set to the complement of the test size.\n\n random_state : int or RandomState\n Pseudo-random number generator state used for random sampling.\n\n stratify : str or None (default=None)\n The name of the target on which to stratify the sampling\n\n Returns\n -------\n\n out : tuple, shape=(2,)\n training_frame : H2OFrame\n The training fold split\n\n testing_frame : H2OFrame\n The testing fold split\n \"\"\"\n frame = check_frame(frame, copy=False)\n if test_size is None and train_size is None:\n test_size = 0.25\n if stratify is not None:\n CVClass = H2OStratifiedShuffleSplit\n else:\n CVClass = H2OShuffleSplit\n cv = CVClass(n_splits=2, test_size=test_size, train_size=train_size,\n random_state=random_state)\n tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]\n train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))\n out = frame[train, :], frame[test, :]\n return out\n\n\n<mask token>\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError(\n 'this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\ndef _validate_shuffle_split_init(test_size, train_size):\n \"\"\"Validation helper to check the test_size and train_size at init\"\"\"\n if test_size is None and train_size is None:\n raise ValueError('test_size and train_size can not both be None')\n if test_size is not None:\n if np.asarray(test_size).dtype.kind == 'f':\n if test_size >= 1.0:\n raise ValueError(\n 'test_size=%f should be smaller than 1.0 or be an integer'\n % test_size)\n elif np.asarray(test_size).dtype.kind != 'i':\n raise ValueError('Invalid value for test_size: %r' % test_size)\n if train_size is not None:\n if np.asarray(train_size).dtype.kind == 'f':\n if train_size >= 1.0:\n raise ValueError(\n 'train_size=%f should be smaller than 1.0 or be an integer'\n % test_size)\n elif np.asarray(test_size\n ).dtype.kind == 'f' and train_size + test_size > 1.0:\n raise ValueError(\n 'The sum of test_size and train_size = %fshould be smaller than 1.0. Reduce test_size and/or train_size.'\n % (train_size + test_size))\n elif np.asarray(train_size).dtype.kind != 'i':\n raise ValueError('Invalid value for train_size: %r' % train_size)\n\n\ndef _validate_shuffle_split(n_samples, test_size, train_size):\n if test_size is not None and np.asarray(test_size\n ).dtype.kind == 'i' and test_size >= n_samples:\n raise ValueError(\n 'test_size=%d should be smaller than the number of samples %d' %\n (test_size, n_samples))\n if train_size is not None and np.asarray(train_size\n ).dtype.kind == 'i' and train_size >= n_samples:\n raise ValueError(\n 'train_size=%d should be smaller than the number of samples %d' %\n (train_size, n_samples))\n if np.asarray(test_size).dtype.kind == 'f':\n n_test = ceil(test_size * n_samples)\n elif np.asarray(test_size).dtype.kind == 'i':\n n_test = float(test_size)\n if train_size is None:\n n_train = n_samples - n_test\n elif np.asarray(train_size).dtype.kind == 'f':\n n_train = floor(train_size * n_samples)\n else:\n n_train = float(train_size)\n if test_size is None:\n n_test = n_samples - n_train\n if n_train + n_test > n_samples:\n raise ValueError(\n 'The sum of train_size and test_size=%d, should be smaller than the number of samples %d. Reduce test_size and/or train_size.'\n % (n_train + n_test, n_samples))\n return int(n_train), int(n_test)\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None,\n random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:n_test + n_train]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,\n self.train_size)\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()\n )\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError(\n 'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'\n )\n if n_train < n_classes:\n raise ValueError(\n 'The train_size=%d should be greater than or equal to the number of classes=%d'\n % (n_train, n_classes))\n if n_test < n_classes:\n raise ValueError(\n 'The test_size=%d should be greater than or equal to the number of classes=%d'\n % (n_test, n_classes))\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)\n )\n for _ in range(self.n_splits):\n train = []\n test = []\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where(target == class_i)[0][\n permutation]\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength\n =len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n train = rng.permutation(train)\n test = rng.permutation(test)\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError(\n 'n_folds must be of Integral type. %s of type %s was passed' %\n (n_folds, type(n_folds)))\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError(\n 'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'\n )\n if shuffle not in [True, False]:\n raise TypeError(\n 'shuffle must be True or False. Got %s (type=%s)' % (str(\n shuffle), type(shuffle)))\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n n_folds = self.n_folds\n fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state\n )\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n y = _val_y(y)\n if y is None:\n raise ValueError(\n 'H2OStratifiedKFold requires a target name (got None)')\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n if np.all(self.n_folds > y_counts):\n raise ValueError(\n 'All the n_labels for individual classes are less than %d folds.'\n % self.n_folds, Warning)\n if self.n_folds > min_labels:\n warnings.warn(\n 'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'\n % (min_labels, self.n_folds), Warning)\n if SK18:\n per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds))) for\n count in y_counts]\n else:\n per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,\n shuffle=self.shuffle, random_state=rng) for count in y_counts]\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n return test_folds\n",
"step-5": "from __future__ import division, print_function, absolute_import\nimport numbers\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nimport numpy as np\nfrom .base import check_frame\nfrom skutil.base import overrides\nfrom sklearn.externals import six\nfrom sklearn.base import _pprint\nfrom sklearn.utils.fixes import signature, bincount\nfrom sklearn.utils import check_random_state\nfrom math import ceil, floor\n\ntry:\n from h2o import H2OEstimator\nexcept ImportError:\n from h2o.estimators.estimator_base import H2OEstimator\n\ntry:\n from sklearn.model_selection import KFold\n SK18 = True\nexcept ImportError:\n from sklearn.cross_validation import KFold\n SK18 = False\n\n__all__ = [\n 'check_cv',\n 'h2o_train_test_split',\n 'H2OKFold',\n 'H2OShuffleSplit',\n 'H2OStratifiedKFold',\n 'H2OStratifiedShuffleSplit'\n]\n\n\ndef _build_repr(self):\n # XXX This is copied from sklearn.BaseEstimator's get_params\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n\n init_signature = signature(init)\n\n if init is object.__init__:\n args = []\n else:\n args = sorted([p.name for p in init_signature.parameters.values()\n if p.name != 'self' and p.kind != p.VAR_KEYWORD])\n\n class_name = self.__class__.__name__\n params = dict()\n for key in args:\n warnings.simplefilter(\"always\", DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n continue\n finally:\n warnings.filters.pop(0)\n params[key] = value\n\n return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))\n\n\ndef check_cv(cv=3):\n \"\"\"Checks the ``cv`` parameter to determine\n whether it's a valid int or H2OBaseCrossValidator.\n\n Parameters\n ----------\n\n cv : int or H2OBaseCrossValidator, optional (default=3)\n The number of folds or the H2OBaseCrossValidator\n instance.\n\n Returns\n -------\n\n cv : H2OBaseCrossValidator\n The instance of H2OBaseCrossValidator\n \"\"\"\n if cv is None:\n cv = 3\n\n if isinstance(cv, numbers.Integral):\n return H2OKFold(cv)\n\n if not isinstance(cv, H2OBaseCrossValidator):\n raise ValueError('expected int or instance of '\n 'H2OBaseCrossValidator but got %s'\n % type(cv))\n\n return cv\n\n\ndef h2o_train_test_split(frame, test_size=None, train_size=None, random_state=None, stratify=None):\n \"\"\"Splits an H2OFrame into random train and test subsets\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n test_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the test split. If\n int, represents the absolute number of test samples. If None,\n the value is automatically set to the complement of the train size.\n If train size is also None, test size is set to 0.25\n\n train_size : float, int, or None (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the absolute number of train samples. If None,\n the value is automatically set to the complement of the test size.\n\n random_state : int or RandomState\n Pseudo-random number generator state used for random sampling.\n\n stratify : str or None (default=None)\n The name of the target on which to stratify the sampling\n\n Returns\n -------\n\n out : tuple, shape=(2,)\n training_frame : H2OFrame\n The training fold split\n\n testing_frame : H2OFrame\n The testing fold split\n \"\"\"\n frame = check_frame(frame, copy=False)\n if test_size is None and train_size is None:\n test_size = 0.25\n\n if stratify is not None:\n CVClass = H2OStratifiedShuffleSplit\n else:\n CVClass = H2OShuffleSplit\n\n cv = CVClass(n_splits=2,\n test_size=test_size,\n train_size=train_size,\n random_state=random_state)\n\n # for the h2o one, we only need iter 0\n tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]\n\n # h2o \"doesn't reorder rows\" so we need to keep these sorted...\n train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))\n out = (\n frame[train, :],\n frame[test, :]\n )\n\n return out\n\n\n# Avoid a pb with nosetests...\nh2o_train_test_split.__test__ = False\n\n\ndef _val_y(y):\n if isinstance(y, six.string_types):\n return str(y)\n elif y is None:\n return y\n raise TypeError('y must be a string. Got %s' % y)\n\n\nclass H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2O cross validation operations.\n All implementing subclasses should override ``get_n_splits``\n and ``_iter_test_indices``.\n \"\"\"\n\n def __init__(self):\n pass\n\n def split(self, frame, y=None):\n \"\"\"Generate indices to split data into training and test.\n\n Parameters\n ----------\n\n frame : ``H2OFrame``\n The h2o frame to split\n\n y : str, optional (default=None)\n The name of the column to stratify, if applicable.\n\n Returns\n -------\n\n train : ndarray\n The training set indices for the split\n\n test : ndarray\n The testing set indices for that split\n \"\"\"\n\n frame = check_frame(frame, copy=False)\n indices = np.arange(frame.shape[0])\n for test_index in self._iter_test_masks(frame, y):\n train_index = indices[np.logical_not(test_index)]\n test_index = indices[test_index]\n\n # h2o can't handle anything but lists...\n yield list(train_index), list(test_index)\n\n def _iter_test_masks(self, frame, y=None):\n \"\"\"Generates boolean masks corresponding to the tests set.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The h2o frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n\n Returns\n -------\n\n test_mask : np.ndarray, shape=(n_samples,)\n The indices for the test split\n \"\"\"\n for test_index in self._iter_test_indices(frame, y):\n test_mask = np.zeros(frame.shape[0], dtype=np.bool)\n test_mask[test_index] = True\n yield test_mask\n\n def _iter_test_indices(self, frame, y=None):\n raise NotImplementedError('this method must be implemented by a subclass')\n\n @abstractmethod\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the cross validator.\n \"\"\"\n pass\n\n def __repr__(self):\n return _build_repr(self)\n\n\ndef _validate_shuffle_split_init(test_size, train_size):\n \"\"\"Validation helper to check the test_size and train_size at init\"\"\"\n if test_size is None and train_size is None:\n raise ValueError('test_size and train_size can not both be None')\n\n if test_size is not None:\n if np.asarray(test_size).dtype.kind == 'f':\n if test_size >= 1.:\n raise ValueError(\n 'test_size=%f should be smaller '\n 'than 1.0 or be an integer' % test_size)\n elif np.asarray(test_size).dtype.kind != 'i':\n raise ValueError('Invalid value for test_size: %r' % test_size)\n\n if train_size is not None:\n if np.asarray(train_size).dtype.kind == 'f':\n if train_size >= 1.:\n raise ValueError(\n 'train_size=%f should be smaller '\n 'than 1.0 or be an integer' % test_size)\n elif (np.asarray(test_size).dtype.kind == 'f' and\n (train_size + test_size) > 1.):\n raise ValueError('The sum of test_size and train_size = %f'\n 'should be smaller than 1.0. Reduce test_size '\n 'and/or train_size.' % (train_size + test_size))\n elif np.asarray(train_size).dtype.kind != 'i':\n raise ValueError('Invalid value for train_size: %r' % train_size)\n\n\ndef _validate_shuffle_split(n_samples, test_size, train_size):\n if test_size is not None and np.asarray(test_size).dtype.kind == 'i' and test_size >= n_samples:\n raise ValueError('test_size=%d should be smaller '\n 'than the number of samples %d' % (test_size, n_samples))\n\n if train_size is not None and np.asarray(train_size).dtype.kind == 'i' and train_size >= n_samples:\n raise ValueError('train_size=%d should be smaller '\n 'than the number of samples %d' % (train_size, n_samples))\n\n if np.asarray(test_size).dtype.kind == 'f':\n n_test = ceil(test_size * n_samples)\n elif np.asarray(test_size).dtype.kind == 'i':\n n_test = float(test_size)\n\n if train_size is None:\n n_train = n_samples - n_test\n elif np.asarray(train_size).dtype.kind == 'f':\n n_train = floor(train_size * n_samples)\n else:\n n_train = float(train_size)\n\n if test_size is None:\n n_test = n_samples - n_train\n\n if n_train + n_test > n_samples:\n raise ValueError('The sum of train_size and test_size=%d, '\n 'should be smaller than the number of '\n 'samples %d. Reduce test_size and/or '\n 'train_size.' % (n_train + n_test, n_samples))\n\n return int(n_train), int(n_test)\n\n\nclass H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This\n is used for ``h2o_train_test_split`` in strategic train/test splits of\n H2OFrames. Implementing subclasses should override ``_iter_indices``.\n\n Parameters\n ----------\n\n n_splits : int, optional (default=2)\n The number of folds or splits in the split\n\n test_size : float or int, optional (default=0.1)\n The ratio of observations for the test fold\n\n train_size : float or int, optional (default=None)\n The ratio of observations for the train fold \n\n random_state : int or RandomState, optional (default=None)\n The random state for duplicative purposes. \n \"\"\"\n\n def __init__(self, n_splits=2, test_size=0.1, train_size=None, random_state=None):\n _validate_shuffle_split_init(test_size, train_size)\n self.n_splits = n_splits\n self.test_size = test_size\n self.train_size = train_size\n self.random_state = random_state\n\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n for train, test in self._iter_indices(frame, y):\n yield train, test\n\n @abstractmethod\n def _iter_indices(self, frame, y):\n \"\"\"Abstract method for iterating the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n pass\n\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds for\n this instance of the shuffle split.\n \"\"\"\n return self.n_splits\n\n def __repr__(self):\n return _build_repr(self)\n\n\nclass H2OShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Default shuffle splitter used for ``h2o_train_test_split``.\n This shuffle split class will not perform any stratification, and\n will simply shuffle indices and split into the number of specified\n sub-frames.\n \"\"\"\n\n def _iter_indices(self, frame, y=None):\n \"\"\"Iterate the indices.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify. Since this class does\n not perform stratification, ``y`` is unused.\n\n Returns\n -------\n\n ind_train : np.ndarray, shape=(n_samples,)\n The train indices\n\n ind_test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size)\n\n rng = check_random_state(self.random_state)\n for i in range(self.n_splits):\n permutation = rng.permutation(n_samples)\n ind_test = permutation[:n_test]\n ind_train = permutation[n_test:(n_test + n_train)]\n yield ind_train, ind_test\n\n\nclass H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):\n \"\"\"Shuffle splitter used for ``h2o_train_test_split`` when stratified\n option is specified. This shuffle split class will perform stratification.\n \"\"\"\n\n def _iter_indices(self, frame, y):\n \"\"\"Iterate the indices with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n\n Returns\n -------\n\n train : np.ndarray, shape=(n_samples,)\n The train indices\n\n test : np.ndarray, shape=(n_samples,)\n The test indices\n \"\"\"\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples,\n self.test_size, self.train_size)\n\n # need to validate y...\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist())\n\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError('The least populated class in y has only 1 '\n 'member, which is too few. The minimum number of labels '\n 'for any class cannot be less than 2.')\n\n if n_train < n_classes:\n raise ValueError('The train_size=%d should be greater than or '\n 'equal to the number of classes=%d' % (n_train, n_classes))\n\n if n_test < n_classes:\n raise ValueError('The test_size=%d should be greater than or '\n 'equal to the number of classes=%d' % (n_test, n_classes))\n\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int))\n\n for _ in range(self.n_splits):\n train = []\n test = []\n\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where((target == class_i))[0][permutation]\n\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n\n # Might end up here with less samples in train and test than we asked\n # for, due to rounding errors.\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength=len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n\n train = rng.permutation(train)\n test = rng.permutation(test)\n\n yield train, test\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedShuffleSplit, self).split(frame, y)\n\n\nclass _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):\n \"\"\"Base class for KFold and Stratified KFold.\n \n Parameters\n ----------\n\n n_folds : int\n The number of splits\n\n shuffle : bool\n Whether to shuffle indices\n\n random_state : int or RandomState\n The random state for the split\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_folds, shuffle, random_state):\n if not isinstance(n_folds, numbers.Integral):\n raise ValueError('n_folds must be of Integral type. '\n '%s of type %s was passed' % (n_folds, type(n_folds)))\n\n n_folds = int(n_folds)\n if n_folds <= 1:\n raise ValueError('k-fold cross-validation requires at least one '\n 'train/test split by setting n_folds=2 or more')\n\n if shuffle not in [True, False]:\n raise TypeError('shuffle must be True or False. Got %s (type=%s)'\n % (str(shuffle), type(shuffle)))\n\n self.n_folds = n_folds\n self.shuffle = shuffle\n self.random_state = random_state\n\n @overrides(H2OBaseCrossValidator)\n def split(self, frame, y=None):\n \"\"\"Split the frame.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string, optional (default=None)\n The column to stratify.\n \"\"\"\n frame = check_frame(frame, copy=False)\n n_obs = frame.shape[0]\n\n if self.n_folds > n_obs:\n raise ValueError('Cannot have n_folds greater than n_obs')\n\n for train, test in super(_H2OBaseKFold, self).split(frame, y):\n yield train, test\n\n @overrides(H2OBaseCrossValidator)\n def get_n_splits(self):\n \"\"\"Get the number of splits or folds.\n\n Returns\n -------\n\n n_folds : int\n The number of folds\n \"\"\"\n return self.n_folds\n\n\nclass H2OKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OKFold, self).__init__(n_folds, shuffle, random_state)\n\n @overrides(_H2OBaseKFold)\n def _iter_test_indices(self, frame, y=None):\n n_obs = frame.shape[0]\n indices = np.arange(n_obs)\n if self.shuffle:\n check_random_state(self.random_state).shuffle(indices)\n\n n_folds = self.n_folds\n fold_sizes = (n_obs // n_folds) * np.ones(n_folds, dtype=np.int)\n fold_sizes[:n_obs % n_folds] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n yield indices[start:stop]\n current = stop\n\n\nclass H2OStratifiedKFold(_H2OBaseKFold):\n \"\"\"K-folds cross-validator for an H2OFrame with\n stratified splits.\n \n Parameters\n ----------\n\n n_folds : int, optional (default=3)\n The number of splits\n\n shuffle : bool, optional (default=False)\n Whether to shuffle indices\n\n random_state : int or RandomState, optional (default=None)\n The random state for the split\n \"\"\"\n def __init__(self, n_folds=3, shuffle=False, random_state=None):\n super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state)\n\n def split(self, frame, y):\n \"\"\"Split the frame with stratification.\n\n Parameters\n ----------\n\n frame : H2OFrame\n The frame to split\n\n y : string\n The column to stratify.\n \"\"\"\n return super(H2OStratifiedKFold, self).split(frame, y)\n\n def _iter_test_masks(self, frame, y):\n test_folds = self._make_test_folds(frame, y)\n for i in range(self.n_folds):\n yield test_folds == i\n\n def _make_test_folds(self, frame, y):\n if self.shuffle:\n rng = check_random_state(self.random_state)\n else:\n rng = self.random_state\n\n # validate that it's a string\n y = _val_y(y) # gets a string back or None\n if y is None:\n raise ValueError('H2OStratifiedKFold requires a target name (got None)')\n\n target = frame[y].as_data_frame(use_pandas=True)[y].values\n n_samples = target.shape[0]\n unique_y, y_inversed = np.unique(target, return_inverse=True)\n y_counts = bincount(y_inversed)\n min_labels = np.min(y_counts)\n\n if np.all(self.n_folds > y_counts):\n raise ValueError(('All the n_labels for individual classes'\n ' are less than %d folds.'\n % self.n_folds), Warning)\n if self.n_folds > min_labels:\n warnings.warn(('The least populated class in y has only %d'\n ' members, which is too few. The minimum'\n ' number of labels for any class cannot'\n ' be less than n_folds=%d.'\n % (min_labels, self.n_folds)), Warning)\n\n # NOTE FROM SKLEARN:\n\n # pre-assign each sample to a test fold index using individual KFold\n # splitting strategies for each class so as to respect the balance of\n # classes\n # NOTE: Passing the data corresponding to ith class say X[y==class_i]\n # will break when the data is not 100% stratifiable for all classes.\n # So we pass np.zeroes(max(c, n_folds)) as data to the KFold.\n\n # Remember, however that we might be using the old-fold KFold which doesn't\n # have a split method...\n if SK18:\n per_cls_cvs = [\n KFold(self.n_folds, # using sklearn's KFold here\n shuffle=self.shuffle,\n random_state=rng).split(np.zeros(max(count, self.n_folds)))\n for count in y_counts\n ]\n else:\n per_cls_cvs = [\n KFold(max(count, self.n_folds), # using sklearn's KFold here\n self.n_folds,\n shuffle=self.shuffle,\n random_state=rng)\n for count in y_counts\n ]\n\n test_folds = np.zeros(n_samples, dtype=np.int)\n for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):\n for cls, (_, test_split) in zip(unique_y, per_cls_splits):\n cls_test_folds = test_folds[target == cls]\n\n # the test split can be too big because we used\n # KFold(...).split(X[:max(c, n_folds)]) when data is not 100%\n # stratifiable for all the classes\n # (we use a warning instead of raising an exception)\n # If this is the case, let's trim it:\n test_split = test_split[test_split < len(cls_test_folds)]\n cls_test_folds[test_split] = test_fold_indices\n test_folds[target == cls] = cls_test_folds\n\n return test_folds\n",
"step-ids": [
21,
29,
40,
43,
47
]
}
|
[
21,
29,
40,
43,
47
] |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import numpy.random as nr
import math
import os
from datetime import datetime
from sklearn.linear_model import LinearRegression, SGDRegressor
import sys
import time
import imp
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor, plot_importance
from sklearn.model_selection import train_test_split
import lightgbm as lgb
def drop_duplicate(data, sub_set):
print('Before drop shape:', data.shape)
before = data.shape[0]
data.drop_duplicates(sub_set, keep='first', inplace=True)
data.reset_index(drop=True, inplace=True)
print('After drop shape:', data.shape)
after = data.shape[0]
print('Total Duplicate:', before - after)
def rmse(predictions, targets):
return np.sqrt(np.mean((predictions - targets) ** 2))
class predict(object):
def __init__(self,trainfile,testfile):
self.trainfile = trainfile
self.testfile = testfile
self.__lr = LinearRegression()
# self.__dtree = DecisionTreeClassifier()
# self.__rforest = RandomForestClassifier()
# self.__svm = SVC(kernel='rbf')
self.lgb_params = {
'feature_fraction': 1,
'metric': 'rmse',
'min_data_in_leaf': 16,
'bagging_fraction': 0.85,
'learning_rate': 0.03,
'objective': 'mse',
'bagging_seed': 2 ** 7,
'num_leaves': 32,
'bagging_freq': 3,
'verbose': 0
}
self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=38,random_state=50)
self._xgb = XGBRegressor(max_depth=8,n_estimators=1000,min_child_weight=300,colsample_bytree=0.9,subsample=0.9,eta=0.15,seed=42)
self.train_data = None
self.train_labels = None
self.train_data1 = None
self.train_labels1 = None
self.val_data = None
self.val_labels = None
self.test_data = None
self.predicted_labels = None
self.x_train_val = None
self.y_train_val = None
def trainingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.trainfile,parse_dates=['date'],date_parser=parser)
df = df.dropna()
df = df.loc[df['item_cnt_day']>0]
subset_train = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_cnt_day']
drop_duplicate(df, sub_set=subset_train)
median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.date_block_num == 4) & (df.item_price > 0)].item_price.median()
df.loc[df.item_price < 0, 'item_price'] = median
df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)
df['item_price'] = df['item_price'].clip(0, 300000)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day','month','year','item_id', 'shop_id','item_price','item_cnt_day']]
df['item_id'] = np.log1p(df['item_id'])
self.train_labels1 = df['item_cnt_day']
self.train_data1 = df.drop(columns='item_cnt_day')
self.train_data,self.val_data,self.train_labels,self.val_labels=train_test_split(self.train_data1,self.train_labels1,test_size=0.3)
self.x_train_val = self.train_data[-100:]
self.y_train_val = self.train_labels[-100:]
def testingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.testfile,parse_dates=['date'],date_parser=parser)
subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']
drop_duplicate(df, sub_set=subset_test)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day','month','year','item_id', 'shop_id','item_price']]
df['item_id'] = np.log1p(df['item_id'])
self.test_data = df;
def data(self):
self.trainingdata()
self.testingdata()
def trainLinearRegression(self):
self.__lr.fit(self.train_data,self.train_labels)
def testLinearRegression(self):
self.predicted_labels = self.__lr.predict(self.val_data)
# print ("Linear Regression score " + str(self.__lr.score(self.val_data, self.val_labels)))
print ("Linear Regression score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainExtraTreeRegressor(self):
self.__tree_reg.fit(self.train_data,self.train_labels)
def testExtraTreeRegressor(self):
self.predicted_labels = self.__tree_reg.predict(self.val_data)
print ("ExtraTreeRegressor score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainLightGBM(self):
lgb.train(self.lgb_params,lgb.dataset(self.train_data,label=train_labels),300)
def testLightGBM(self):
self.predicted_labels = lgb.predict(self.val_data)
print ("LightGBM score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainXGBoost(self):
self.__xgb.fit(self.train_data,self.train_labels,eval_metric="rmse",eval_set=[(self.train_data, self.train_labels), (self.x_train_val, self.y_train_val)],verbose=True,early_stopping_rounds=10)
def testXGBoost(self):
self.predicted_labels = self.__xgb.predict(self.val_data)
print ("XGBoost score " + str(rmse(self.predicted_labels,self.val_labels)))
if __name__ == "__main__":
train_data_name = sys.argv[1]
test_data_name = sys.argv[2]
model = predict(train_data_name,test_data_name)
model.data()
# model.trainLinearRegression()
# model.testLinearRegression()
# model.trainExtraTreeRegressor()
# model.testExtraTreeRegressor()
# model.trainLightGBM()
# model.testLightGBM()
# model.trainXGBoost()
# model.testXGBoost()
# plotConfusionMatrix(model.test_labels,model.predicted_labels)
# model.trainDecesionTree()
# model.testDecesionTree()
# model.trainRandomForrest()
# model.testRandomForrest()
# model.trainSVM()
# model.testSVM()
|
normal
|
{
"blob_id": "ee49ce63951721458cb98b370285d04231bb2c20",
"index": 7438,
"step-1": "<mask token>\n\n\nclass predict(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n <mask token>\n <mask token>\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass predict(object):\n\n def __init__(self, trainfile, testfile):\n self.trainfile = trainfile\n self.testfile = testfile\n self.__lr = LinearRegression()\n self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',\n 'min_data_in_leaf': 16, 'bagging_fraction': 0.85,\n 'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **\n 7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}\n self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=\n 38, random_state=50)\n self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,\n min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=\n 0.15, seed=42)\n self.train_data = None\n self.train_labels = None\n self.train_data1 = None\n self.train_labels1 = None\n self.val_data = None\n self.val_labels = None\n self.test_data = None\n self.predicted_labels = None\n self.x_train_val = None\n self.y_train_val = None\n <mask token>\n <mask token>\n <mask token>\n\n def trainLinearRegression(self):\n self.__lr.fit(self.train_data, self.train_labels)\n <mask token>\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n <mask token>\n\n def trainLightGBM(self):\n lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=\n train_labels), 300)\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n\n def testXGBoost(self):\n self.predicted_labels = self.__xgb.predict(self.val_data)\n print('XGBoost score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef drop_duplicate(data, sub_set):\n print('Before drop shape:', data.shape)\n before = data.shape[0]\n data.drop_duplicates(sub_set, keep='first', inplace=True)\n data.reset_index(drop=True, inplace=True)\n print('After drop shape:', data.shape)\n after = data.shape[0]\n print('Total Duplicate:', before - after)\n\n\n<mask token>\n\n\nclass predict(object):\n\n def __init__(self, trainfile, testfile):\n self.trainfile = trainfile\n self.testfile = testfile\n self.__lr = LinearRegression()\n self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',\n 'min_data_in_leaf': 16, 'bagging_fraction': 0.85,\n 'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **\n 7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}\n self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=\n 38, random_state=50)\n self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,\n min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=\n 0.15, seed=42)\n self.train_data = None\n self.train_labels = None\n self.train_data1 = None\n self.train_labels1 = None\n self.val_data = None\n self.val_labels = None\n self.test_data = None\n self.predicted_labels = None\n self.x_train_val = None\n self.y_train_val = None\n\n def trainingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.trainfile, parse_dates=['date'], date_parser=\n parser)\n df = df.dropna()\n df = df.loc[df['item_cnt_day'] > 0]\n subset_train = ['date', 'date_block_num', 'shop_id', 'item_id',\n 'item_cnt_day']\n drop_duplicate(df, sub_set=subset_train)\n median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.\n date_block_num == 4) & (df.item_price > 0)].item_price.median()\n df.loc[df.item_price < 0, 'item_price'] = median\n df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)\n df['item_price'] = df['item_price'].clip(0, 300000)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price',\n 'item_cnt_day']]\n df['item_id'] = np.log1p(df['item_id'])\n self.train_labels1 = df['item_cnt_day']\n self.train_data1 = df.drop(columns='item_cnt_day')\n (self.train_data, self.val_data, self.train_labels, self.val_labels\n ) = (train_test_split(self.train_data1, self.train_labels1,\n test_size=0.3))\n self.x_train_val = self.train_data[-100:]\n self.y_train_val = self.train_labels[-100:]\n\n def testingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.testfile, parse_dates=['date'], date_parser=\n parser)\n subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']\n drop_duplicate(df, sub_set=subset_test)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price']]\n df['item_id'] = np.log1p(df['item_id'])\n self.test_data = df\n\n def data(self):\n self.trainingdata()\n self.testingdata()\n\n def trainLinearRegression(self):\n self.__lr.fit(self.train_data, self.train_labels)\n\n def testLinearRegression(self):\n self.predicted_labels = self.__lr.predict(self.val_data)\n print('Linear Regression score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n\n def testExtraTreeRegressor(self):\n self.predicted_labels = self.__tree_reg.predict(self.val_data)\n print('ExtraTreeRegressor score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainLightGBM(self):\n lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=\n train_labels), 300)\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n\n def testXGBoost(self):\n self.predicted_labels = self.__xgb.predict(self.val_data)\n print('XGBoost score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n\n<mask token>\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport numpy.random as nr\nimport math\nimport os\nfrom datetime import datetime\nfrom sklearn.linear_model import LinearRegression, SGDRegressor\nimport sys\nimport time\nimport imp\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom xgboost import XGBRegressor, plot_importance\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\n\n\ndef drop_duplicate(data, sub_set):\n print('Before drop shape:', data.shape)\n before = data.shape[0]\n data.drop_duplicates(sub_set, keep='first', inplace=True)\n data.reset_index(drop=True, inplace=True)\n print('After drop shape:', data.shape)\n after = data.shape[0]\n print('Total Duplicate:', before - after)\n\n\ndef rmse(predictions, targets):\n return np.sqrt(np.mean((predictions - targets) ** 2))\n\n\nclass predict(object):\n\n def __init__(self, trainfile, testfile):\n self.trainfile = trainfile\n self.testfile = testfile\n self.__lr = LinearRegression()\n self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',\n 'min_data_in_leaf': 16, 'bagging_fraction': 0.85,\n 'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **\n 7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}\n self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=\n 38, random_state=50)\n self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,\n min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=\n 0.15, seed=42)\n self.train_data = None\n self.train_labels = None\n self.train_data1 = None\n self.train_labels1 = None\n self.val_data = None\n self.val_labels = None\n self.test_data = None\n self.predicted_labels = None\n self.x_train_val = None\n self.y_train_val = None\n\n def trainingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.trainfile, parse_dates=['date'], date_parser=\n parser)\n df = df.dropna()\n df = df.loc[df['item_cnt_day'] > 0]\n subset_train = ['date', 'date_block_num', 'shop_id', 'item_id',\n 'item_cnt_day']\n drop_duplicate(df, sub_set=subset_train)\n median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.\n date_block_num == 4) & (df.item_price > 0)].item_price.median()\n df.loc[df.item_price < 0, 'item_price'] = median\n df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)\n df['item_price'] = df['item_price'].clip(0, 300000)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price',\n 'item_cnt_day']]\n df['item_id'] = np.log1p(df['item_id'])\n self.train_labels1 = df['item_cnt_day']\n self.train_data1 = df.drop(columns='item_cnt_day')\n (self.train_data, self.val_data, self.train_labels, self.val_labels\n ) = (train_test_split(self.train_data1, self.train_labels1,\n test_size=0.3))\n self.x_train_val = self.train_data[-100:]\n self.y_train_val = self.train_labels[-100:]\n\n def testingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.testfile, parse_dates=['date'], date_parser=\n parser)\n subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']\n drop_duplicate(df, sub_set=subset_test)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price']]\n df['item_id'] = np.log1p(df['item_id'])\n self.test_data = df\n\n def data(self):\n self.trainingdata()\n self.testingdata()\n\n def trainLinearRegression(self):\n self.__lr.fit(self.train_data, self.train_labels)\n\n def testLinearRegression(self):\n self.predicted_labels = self.__lr.predict(self.val_data)\n print('Linear Regression score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n\n def testExtraTreeRegressor(self):\n self.predicted_labels = self.__tree_reg.predict(self.val_data)\n print('ExtraTreeRegressor score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainLightGBM(self):\n lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=\n train_labels), 300)\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n\n def testXGBoost(self):\n self.predicted_labels = self.__xgb.predict(self.val_data)\n print('XGBoost score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n\nif __name__ == '__main__':\n train_data_name = sys.argv[1]\n test_data_name = sys.argv[2]\n model = predict(train_data_name, test_data_name)\n model.data()\n",
"step-5": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport numpy.random as nr\nimport math\nimport os\nfrom datetime import datetime\nfrom sklearn.linear_model import LinearRegression, SGDRegressor\nimport sys\nimport time\nimport imp\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom xgboost import XGBRegressor, plot_importance\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\n\n\n\n\n\n\ndef drop_duplicate(data, sub_set):\n print('Before drop shape:', data.shape)\n before = data.shape[0]\n data.drop_duplicates(sub_set, keep='first', inplace=True)\n data.reset_index(drop=True, inplace=True)\n print('After drop shape:', data.shape)\n after = data.shape[0]\n print('Total Duplicate:', before - after)\n\ndef rmse(predictions, targets):\n return np.sqrt(np.mean((predictions - targets) ** 2))\n\n\nclass predict(object):\n\n\tdef __init__(self,trainfile,testfile):\n\t\tself.trainfile = trainfile\n\t\tself.testfile = testfile\n\t\tself.__lr = LinearRegression()\n\t\t# self.__dtree = DecisionTreeClassifier()\n\t\t# self.__rforest = RandomForestClassifier()\n\t\t# self.__svm = SVC(kernel='rbf')\n\t\tself.lgb_params = {\n 'feature_fraction': 1,\n 'metric': 'rmse',\n 'min_data_in_leaf': 16,\n 'bagging_fraction': 0.85,\n 'learning_rate': 0.03,\n 'objective': 'mse',\n 'bagging_seed': 2 ** 7,\n 'num_leaves': 32,\n 'bagging_freq': 3,\n 'verbose': 0\n \t}\n\t\tself.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=38,random_state=50)\n\t\tself._xgb = XGBRegressor(max_depth=8,n_estimators=1000,min_child_weight=300,colsample_bytree=0.9,subsample=0.9,eta=0.15,seed=42)\n\t\tself.train_data = None\n\t\tself.train_labels = None\n\t\tself.train_data1 = None\n\t\tself.train_labels1 = None\n\t\tself.val_data = None\n\t\tself.val_labels = None\n\t\tself.test_data = None\n\t\tself.predicted_labels = None\n\t\tself.x_train_val = None\n\t\tself.y_train_val = None\n\n\tdef trainingdata(self):\n\t\tparser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n\t\tdf = pd.read_csv(self.trainfile,parse_dates=['date'],date_parser=parser)\n\t\tdf = df.dropna()\n\t\tdf = df.loc[df['item_cnt_day']>0]\n\t\tsubset_train = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_cnt_day']\n\t\tdrop_duplicate(df, sub_set=subset_train)\n\t\tmedian = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.date_block_num == 4) & (df.item_price > 0)].item_price.median()\n\t\tdf.loc[df.item_price < 0, 'item_price'] = median\n\t\tdf['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)\n\t\tdf['item_price'] = df['item_price'].clip(0, 300000)\n\t\tdf.loc[df.shop_id == 0, 'shop_id'] = 57\n\t\tdf.loc[df.shop_id == 1, 'shop_id'] = 58\n\t\tdf.loc[df.shop_id == 10, 'shop_id'] = 11\n\t\n\t\tdf['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n\t\tdf['day'] = df['day'].astype('int64')\n\t\tdf['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n\t\tdf['month'] = df['month'].astype('int64')\n\t\tdf['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n\t\tdf['year'] = df['year'].astype('int64')\n\t\tdf = df[['day','month','year','item_id', 'shop_id','item_price','item_cnt_day']]\n\t\tdf['item_id'] = np.log1p(df['item_id'])\n\t\tself.train_labels1 = df['item_cnt_day']\n\t\tself.train_data1 = df.drop(columns='item_cnt_day')\n\t\tself.train_data,self.val_data,self.train_labels,self.val_labels=train_test_split(self.train_data1,self.train_labels1,test_size=0.3)\n\t\tself.x_train_val = self.train_data[-100:]\n\t\tself.y_train_val = self.train_labels[-100:]\n\n\n\tdef testingdata(self):\n\t\tparser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n\t\tdf = pd.read_csv(self.testfile,parse_dates=['date'],date_parser=parser)\n\t\tsubset_test = ['date', 'date_block_num', 'shop_id', 'item_id']\n\t\tdrop_duplicate(df, sub_set=subset_test)\n\t\tdf.loc[df.shop_id == 0, 'shop_id'] = 57\n\t\tdf.loc[df.shop_id == 1, 'shop_id'] = 58\n\t\tdf.loc[df.shop_id == 10, 'shop_id'] = 11\n\t\tdf['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n\t\tdf['day'] = df['day'].astype('int64')\n\t\tdf['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n\t\tdf['month'] = df['month'].astype('int64')\n\t\tdf['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n\t\tdf['year'] = df['year'].astype('int64')\n\t\tdf = df[['day','month','year','item_id', 'shop_id','item_price']]\n\t\tdf['item_id'] = np.log1p(df['item_id'])\n\t\tself.test_data = df;\n\n\tdef data(self):\n\t\tself.trainingdata()\n\t\tself.testingdata()\n\n\tdef trainLinearRegression(self):\n\t\tself.__lr.fit(self.train_data,self.train_labels)\n\n\tdef testLinearRegression(self):\n\t\tself.predicted_labels = self.__lr.predict(self.val_data)\n\t\t# print (\"Linear Regression score \" + str(self.__lr.score(self.val_data, self.val_labels)))\n\t\tprint (\"Linear Regression score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\tdef trainExtraTreeRegressor(self):\n\t\tself.__tree_reg.fit(self.train_data,self.train_labels)\n\n\tdef testExtraTreeRegressor(self):\n\t\tself.predicted_labels = self.__tree_reg.predict(self.val_data)\n\t\tprint (\"ExtraTreeRegressor score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\tdef trainLightGBM(self):\n\t\tlgb.train(self.lgb_params,lgb.dataset(self.train_data,label=train_labels),300)\n\n\tdef testLightGBM(self):\n\t\tself.predicted_labels = lgb.predict(self.val_data)\n\t\tprint (\"LightGBM score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\tdef trainXGBoost(self):\n\t\tself.__xgb.fit(self.train_data,self.train_labels,eval_metric=\"rmse\",eval_set=[(self.train_data, self.train_labels), (self.x_train_val, self.y_train_val)],verbose=True,early_stopping_rounds=10)\n\n\tdef testXGBoost(self):\n\t\tself.predicted_labels = self.__xgb.predict(self.val_data)\n\t\tprint (\"XGBoost score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\ttrain_data_name = sys.argv[1]\n\ttest_data_name = sys.argv[2]\n\tmodel = predict(train_data_name,test_data_name)\n\tmodel.data()\n\t# model.trainLinearRegression()\n\t# model.testLinearRegression()\n\n\t# model.trainExtraTreeRegressor()\n\t# model.testExtraTreeRegressor()\n\n\t# model.trainLightGBM()\n\t# model.testLightGBM()\n\n\t# model.trainXGBoost()\n\t# model.testXGBoost()\n\n\n\t# plotConfusionMatrix(model.test_labels,model.predicted_labels)\n\t\n\t# model.trainDecesionTree()\n\t# model.testDecesionTree()\n\n\t# model.trainRandomForrest()\n\t# model.testRandomForrest()\n\n\t# model.trainSVM()\n\t# model.testSVM()\n\n\n\n\n\n",
"step-ids": [
4,
8,
14,
17,
18
]
}
|
[
4,
8,
14,
17,
18
] |
import torch
from torch import nn
from torch.nn import functional as F
from models.blocks import UnetConv3, MultiAttentionBlock, UnetGridGatingSignal3, UnetUp3_CT, UnetDsv3
class AttentionGatedUnet3D(nn.Module):
"""
Attention Gated Unet for 3D semantic segmentation.
Args:
config: Must contain following attributes:
num_classes (int): Number of output classes in the mask;
in_channels (int): Number of channels in the input image;
feature_scale (int, optional): factor by which to scale down the number of filters / channels in each block;
is_deconv (bool, optional): whether to use DeConvolutions;
is_batchnorm (bool, optional): whether to use Batch Normalization;
Attributes:
num_classes (int): Number of classes in the output mask
in_channels (int): Number of channels in the input image
is_batchnorm (bool)
is_deconv (bool)
feature_scale (int)
"""
def __init__(self, config):
super(AttentionGatedUnet3D, self).__init__()
assert hasattr(config, "num_classes")
assert hasattr(config, "in_channels")
if not hasattr(config, "feature_scale"):
print("feature_scale not specified in config, setting to default 4")
config.feature_scale = 4
if not hasattr(config, "is_deconv"):
print("is_deconv not specified in config, setting to default True")
config.is_deconv = True
if not hasattr(config, "is_batchnorm"):
print("is_batchnorm not specified in config, setting to default True")
config.is_batchnorm = True
self.num_classes = config.num_classes
self.in_channels = config.in_channels
self.is_deconv = config.is_deconv
self.is_batchnorm = config.is_batchnorm
self.feature_scale = config.feature_scale
nonlocal_mode = 'concatenation'
attention_dsample = (2, 2, 2)
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm)
self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm)
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm)
self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm)
self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm)
self.gating = UnetGridGatingSignal3(filters[4], filters[4], kernel_size=(1, 1, 1),
is_batchnorm=self.is_batchnorm)
# attention blocks
self.attentionblock2 = MultiAttentionBlock(in_size=filters[1], gate_size=filters[2], inter_size=filters[1],
nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)
self.attentionblock3 = MultiAttentionBlock(in_size=filters[2], gate_size=filters[3], inter_size=filters[2],
nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)
self.attentionblock4 = MultiAttentionBlock(in_size=filters[3], gate_size=filters[4], inter_size=filters[3],
nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)
# upsampling
self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv)
self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv)
self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv)
self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv)
# deep supervision
self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes, scale_factor=8)
self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes, scale_factor=4)
self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes, scale_factor=2)
self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.num_classes, kernel_size=1)
# final conv (without any concat)
self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
nn.init.normal(m.weight.data, 1.0, 0.02)
nn.init.constant(m.bias.data, 0.0)
def forward(self, inputs):
# Feature Extraction
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
# Gating Signal Generation
center = self.center(maxpool4)
gating = self.gating(center)
# Attention Mechanism
# Upscaling Part (Decoder)
g_conv4, att4 = self.attentionblock4(conv4, gating)
up4 = self.up_concat4(g_conv4, center)
g_conv3, att3 = self.attentionblock3(conv3, up4)
up3 = self.up_concat3(g_conv3, up4)
g_conv2, att2 = self.attentionblock2(conv2, up3)
up2 = self.up_concat2(g_conv2, up3)
up1 = self.up_concat1(conv1, up2)
# Deep Supervision
dsv4 = self.dsv4(up4)
dsv3 = self.dsv3(up3)
dsv2 = self.dsv2(up2)
dsv1 = self.dsv1(up1)
final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))
pred = F.softmax(final, dim=1)
return pred
# @staticmethod
# def apply_argmax_softmax(pred):
# log_p = F.softmax(pred, dim=1)
# return log_p
|
normal
|
{
"blob_id": "55a392d63838cbef027f9cf525999c41416e3575",
"index": 3875,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AttentionGatedUnet3D(nn.Module):\n <mask token>\n\n def __init__(self, config):\n super(AttentionGatedUnet3D, self).__init__()\n assert hasattr(config, 'num_classes')\n assert hasattr(config, 'in_channels')\n if not hasattr(config, 'feature_scale'):\n print('feature_scale not specified in config, setting to default 4'\n )\n config.feature_scale = 4\n if not hasattr(config, 'is_deconv'):\n print('is_deconv not specified in config, setting to default True')\n config.is_deconv = True\n if not hasattr(config, 'is_batchnorm'):\n print(\n 'is_batchnorm not specified in config, setting to default True'\n )\n config.is_batchnorm = True\n self.num_classes = config.num_classes\n self.in_channels = config.in_channels\n self.is_deconv = config.is_deconv\n self.is_batchnorm = config.is_batchnorm\n self.feature_scale = config.feature_scale\n nonlocal_mode = 'concatenation'\n attention_dsample = 2, 2, 2\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm)\n self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm)\n self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm)\n self.gating = UnetGridGatingSignal3(filters[4], filters[4],\n kernel_size=(1, 1, 1), is_batchnorm=self.is_batchnorm)\n self.attentionblock2 = MultiAttentionBlock(in_size=filters[1],\n gate_size=filters[2], inter_size=filters[1], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock3 = MultiAttentionBlock(in_size=filters[2],\n gate_size=filters[3], inter_size=filters[2], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock4 = MultiAttentionBlock(in_size=filters[3],\n gate_size=filters[4], inter_size=filters[3], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv)\n self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv)\n self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv)\n self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv)\n self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes,\n scale_factor=8)\n self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes,\n scale_factor=4)\n self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes,\n scale_factor=2)\n self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.\n num_classes, kernel_size=1)\n self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1)\n for m in self.modules():\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm') != -1:\n nn.init.normal(m.weight.data, 1.0, 0.02)\n nn.init.constant(m.bias.data, 0.0)\n\n def forward(self, inputs):\n conv1 = self.conv1(inputs)\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1)\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2)\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3)\n maxpool4 = self.maxpool4(conv4)\n center = self.center(maxpool4)\n gating = self.gating(center)\n g_conv4, att4 = self.attentionblock4(conv4, gating)\n up4 = self.up_concat4(g_conv4, center)\n g_conv3, att3 = self.attentionblock3(conv3, up4)\n up3 = self.up_concat3(g_conv3, up4)\n g_conv2, att2 = self.attentionblock2(conv2, up3)\n up2 = self.up_concat2(g_conv2, up3)\n up1 = self.up_concat1(conv1, up2)\n dsv4 = self.dsv4(up4)\n dsv3 = self.dsv3(up3)\n dsv2 = self.dsv2(up2)\n dsv1 = self.dsv1(up1)\n final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))\n pred = F.softmax(final, dim=1)\n return pred\n",
"step-3": "<mask token>\n\n\nclass AttentionGatedUnet3D(nn.Module):\n \"\"\"\n Attention Gated Unet for 3D semantic segmentation.\n\n Args:\n config: Must contain following attributes:\n num_classes (int): Number of output classes in the mask;\n in_channels (int): Number of channels in the input image;\n feature_scale (int, optional): factor by which to scale down the number of filters / channels in each block;\n is_deconv (bool, optional): whether to use DeConvolutions;\n is_batchnorm (bool, optional): whether to use Batch Normalization;\n\n Attributes:\n num_classes (int): Number of classes in the output mask\n in_channels (int): Number of channels in the input image\n is_batchnorm (bool)\n is_deconv (bool)\n feature_scale (int)\n \"\"\"\n\n def __init__(self, config):\n super(AttentionGatedUnet3D, self).__init__()\n assert hasattr(config, 'num_classes')\n assert hasattr(config, 'in_channels')\n if not hasattr(config, 'feature_scale'):\n print('feature_scale not specified in config, setting to default 4'\n )\n config.feature_scale = 4\n if not hasattr(config, 'is_deconv'):\n print('is_deconv not specified in config, setting to default True')\n config.is_deconv = True\n if not hasattr(config, 'is_batchnorm'):\n print(\n 'is_batchnorm not specified in config, setting to default True'\n )\n config.is_batchnorm = True\n self.num_classes = config.num_classes\n self.in_channels = config.in_channels\n self.is_deconv = config.is_deconv\n self.is_batchnorm = config.is_batchnorm\n self.feature_scale = config.feature_scale\n nonlocal_mode = 'concatenation'\n attention_dsample = 2, 2, 2\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm)\n self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm)\n self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm)\n self.gating = UnetGridGatingSignal3(filters[4], filters[4],\n kernel_size=(1, 1, 1), is_batchnorm=self.is_batchnorm)\n self.attentionblock2 = MultiAttentionBlock(in_size=filters[1],\n gate_size=filters[2], inter_size=filters[1], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock3 = MultiAttentionBlock(in_size=filters[2],\n gate_size=filters[3], inter_size=filters[2], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock4 = MultiAttentionBlock(in_size=filters[3],\n gate_size=filters[4], inter_size=filters[3], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv)\n self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv)\n self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv)\n self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv)\n self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes,\n scale_factor=8)\n self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes,\n scale_factor=4)\n self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes,\n scale_factor=2)\n self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.\n num_classes, kernel_size=1)\n self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1)\n for m in self.modules():\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm') != -1:\n nn.init.normal(m.weight.data, 1.0, 0.02)\n nn.init.constant(m.bias.data, 0.0)\n\n def forward(self, inputs):\n conv1 = self.conv1(inputs)\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1)\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2)\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3)\n maxpool4 = self.maxpool4(conv4)\n center = self.center(maxpool4)\n gating = self.gating(center)\n g_conv4, att4 = self.attentionblock4(conv4, gating)\n up4 = self.up_concat4(g_conv4, center)\n g_conv3, att3 = self.attentionblock3(conv3, up4)\n up3 = self.up_concat3(g_conv3, up4)\n g_conv2, att2 = self.attentionblock2(conv2, up3)\n up2 = self.up_concat2(g_conv2, up3)\n up1 = self.up_concat1(conv1, up2)\n dsv4 = self.dsv4(up4)\n dsv3 = self.dsv3(up3)\n dsv2 = self.dsv2(up2)\n dsv1 = self.dsv1(up1)\n final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))\n pred = F.softmax(final, dim=1)\n return pred\n",
"step-4": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom models.blocks import UnetConv3, MultiAttentionBlock, UnetGridGatingSignal3, UnetUp3_CT, UnetDsv3\n\n\nclass AttentionGatedUnet3D(nn.Module):\n \"\"\"\n Attention Gated Unet for 3D semantic segmentation.\n\n Args:\n config: Must contain following attributes:\n num_classes (int): Number of output classes in the mask;\n in_channels (int): Number of channels in the input image;\n feature_scale (int, optional): factor by which to scale down the number of filters / channels in each block;\n is_deconv (bool, optional): whether to use DeConvolutions;\n is_batchnorm (bool, optional): whether to use Batch Normalization;\n\n Attributes:\n num_classes (int): Number of classes in the output mask\n in_channels (int): Number of channels in the input image\n is_batchnorm (bool)\n is_deconv (bool)\n feature_scale (int)\n \"\"\"\n\n def __init__(self, config):\n super(AttentionGatedUnet3D, self).__init__()\n assert hasattr(config, 'num_classes')\n assert hasattr(config, 'in_channels')\n if not hasattr(config, 'feature_scale'):\n print('feature_scale not specified in config, setting to default 4'\n )\n config.feature_scale = 4\n if not hasattr(config, 'is_deconv'):\n print('is_deconv not specified in config, setting to default True')\n config.is_deconv = True\n if not hasattr(config, 'is_batchnorm'):\n print(\n 'is_batchnorm not specified in config, setting to default True'\n )\n config.is_batchnorm = True\n self.num_classes = config.num_classes\n self.in_channels = config.in_channels\n self.is_deconv = config.is_deconv\n self.is_batchnorm = config.is_batchnorm\n self.feature_scale = config.feature_scale\n nonlocal_mode = 'concatenation'\n attention_dsample = 2, 2, 2\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm)\n self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm)\n self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm)\n self.gating = UnetGridGatingSignal3(filters[4], filters[4],\n kernel_size=(1, 1, 1), is_batchnorm=self.is_batchnorm)\n self.attentionblock2 = MultiAttentionBlock(in_size=filters[1],\n gate_size=filters[2], inter_size=filters[1], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock3 = MultiAttentionBlock(in_size=filters[2],\n gate_size=filters[3], inter_size=filters[2], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock4 = MultiAttentionBlock(in_size=filters[3],\n gate_size=filters[4], inter_size=filters[3], nonlocal_mode=\n nonlocal_mode, sub_sample_factor=attention_dsample)\n self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv)\n self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv)\n self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv)\n self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv)\n self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes,\n scale_factor=8)\n self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes,\n scale_factor=4)\n self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes,\n scale_factor=2)\n self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.\n num_classes, kernel_size=1)\n self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1)\n for m in self.modules():\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm') != -1:\n nn.init.normal(m.weight.data, 1.0, 0.02)\n nn.init.constant(m.bias.data, 0.0)\n\n def forward(self, inputs):\n conv1 = self.conv1(inputs)\n maxpool1 = self.maxpool1(conv1)\n conv2 = self.conv2(maxpool1)\n maxpool2 = self.maxpool2(conv2)\n conv3 = self.conv3(maxpool2)\n maxpool3 = self.maxpool3(conv3)\n conv4 = self.conv4(maxpool3)\n maxpool4 = self.maxpool4(conv4)\n center = self.center(maxpool4)\n gating = self.gating(center)\n g_conv4, att4 = self.attentionblock4(conv4, gating)\n up4 = self.up_concat4(g_conv4, center)\n g_conv3, att3 = self.attentionblock3(conv3, up4)\n up3 = self.up_concat3(g_conv3, up4)\n g_conv2, att2 = self.attentionblock2(conv2, up3)\n up2 = self.up_concat2(g_conv2, up3)\n up1 = self.up_concat1(conv1, up2)\n dsv4 = self.dsv4(up4)\n dsv3 = self.dsv3(up3)\n dsv2 = self.dsv2(up2)\n dsv1 = self.dsv1(up1)\n final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))\n pred = F.softmax(final, dim=1)\n return pred\n",
"step-5": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom models.blocks import UnetConv3, MultiAttentionBlock, UnetGridGatingSignal3, UnetUp3_CT, UnetDsv3\n\n\nclass AttentionGatedUnet3D(nn.Module):\n \"\"\"\n Attention Gated Unet for 3D semantic segmentation.\n\n Args:\n config: Must contain following attributes:\n num_classes (int): Number of output classes in the mask;\n in_channels (int): Number of channels in the input image;\n feature_scale (int, optional): factor by which to scale down the number of filters / channels in each block;\n is_deconv (bool, optional): whether to use DeConvolutions;\n is_batchnorm (bool, optional): whether to use Batch Normalization;\n\n Attributes:\n num_classes (int): Number of classes in the output mask\n in_channels (int): Number of channels in the input image\n is_batchnorm (bool)\n is_deconv (bool)\n feature_scale (int)\n \"\"\"\n\n def __init__(self, config):\n super(AttentionGatedUnet3D, self).__init__()\n assert hasattr(config, \"num_classes\")\n assert hasattr(config, \"in_channels\")\n\n if not hasattr(config, \"feature_scale\"):\n print(\"feature_scale not specified in config, setting to default 4\")\n config.feature_scale = 4\n\n if not hasattr(config, \"is_deconv\"):\n print(\"is_deconv not specified in config, setting to default True\")\n config.is_deconv = True\n\n if not hasattr(config, \"is_batchnorm\"):\n print(\"is_batchnorm not specified in config, setting to default True\")\n config.is_batchnorm = True\n\n self.num_classes = config.num_classes\n self.in_channels = config.in_channels\n\n self.is_deconv = config.is_deconv\n self.is_batchnorm = config.is_batchnorm\n self.feature_scale = config.feature_scale\n\n nonlocal_mode = 'concatenation'\n attention_dsample = (2, 2, 2)\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm)\n self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm)\n self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm)\n self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm)\n self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))\n\n self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm)\n self.gating = UnetGridGatingSignal3(filters[4], filters[4], kernel_size=(1, 1, 1),\n is_batchnorm=self.is_batchnorm)\n\n # attention blocks\n self.attentionblock2 = MultiAttentionBlock(in_size=filters[1], gate_size=filters[2], inter_size=filters[1],\n nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock3 = MultiAttentionBlock(in_size=filters[2], gate_size=filters[3], inter_size=filters[2],\n nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)\n self.attentionblock4 = MultiAttentionBlock(in_size=filters[3], gate_size=filters[4], inter_size=filters[3],\n nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)\n\n # upsampling\n self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv)\n self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv)\n self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv)\n self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv)\n\n # deep supervision\n self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes, scale_factor=8)\n self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes, scale_factor=4)\n self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes, scale_factor=2)\n self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.num_classes, kernel_size=1)\n\n # final conv (without any concat)\n self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1)\n\n # initialise weights\n for m in self.modules():\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm') != -1:\n nn.init.normal(m.weight.data, 1.0, 0.02)\n nn.init.constant(m.bias.data, 0.0)\n\n def forward(self, inputs):\n # Feature Extraction\n conv1 = self.conv1(inputs)\n maxpool1 = self.maxpool1(conv1)\n\n conv2 = self.conv2(maxpool1)\n maxpool2 = self.maxpool2(conv2)\n\n conv3 = self.conv3(maxpool2)\n maxpool3 = self.maxpool3(conv3)\n\n conv4 = self.conv4(maxpool3)\n maxpool4 = self.maxpool4(conv4)\n\n # Gating Signal Generation\n center = self.center(maxpool4)\n gating = self.gating(center)\n\n # Attention Mechanism\n # Upscaling Part (Decoder)\n g_conv4, att4 = self.attentionblock4(conv4, gating)\n up4 = self.up_concat4(g_conv4, center)\n g_conv3, att3 = self.attentionblock3(conv3, up4)\n up3 = self.up_concat3(g_conv3, up4)\n g_conv2, att2 = self.attentionblock2(conv2, up3)\n up2 = self.up_concat2(g_conv2, up3)\n up1 = self.up_concat1(conv1, up2)\n\n # Deep Supervision\n dsv4 = self.dsv4(up4)\n dsv3 = self.dsv3(up3)\n dsv2 = self.dsv2(up2)\n dsv1 = self.dsv1(up1)\n final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))\n pred = F.softmax(final, dim=1)\n return pred\n\n# @staticmethod\n# def apply_argmax_softmax(pred):\n# log_p = F.softmax(pred, dim=1)\n\n# return log_p\n",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
#!/usr/bin/env python3
from pexpect import pxssh
import time
s = pxssh.pxssh()
ip = "" #replace ip address
username= "" #replace username
password= "" #replace password
s.login (ip, username, password)
print ("SSH session login successful")
s.sendline ('application stop')
s.prompt() # match the prompt
print("Stopping the app")
print("\nStarting the app")
s.sendline ('application start')
s.prompt()
print ("\nLogout")
s.logout()
|
normal
|
{
"blob_id": "dd9574ea08beb9bc5f1413afd63c751fd42cba67",
"index": 6406,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns.login(ip, username, password)\nprint('SSH session login successful')\ns.sendline('application stop')\ns.prompt()\nprint('Stopping the app')\nprint(\"\"\"\nStarting the app\"\"\")\ns.sendline('application start')\ns.prompt()\nprint('\\nLogout')\ns.logout()\n",
"step-3": "<mask token>\ns = pxssh.pxssh()\nip = ''\nusername = ''\npassword = ''\ns.login(ip, username, password)\nprint('SSH session login successful')\ns.sendline('application stop')\ns.prompt()\nprint('Stopping the app')\nprint(\"\"\"\nStarting the app\"\"\")\ns.sendline('application start')\ns.prompt()\nprint('\\nLogout')\ns.logout()\n",
"step-4": "from pexpect import pxssh\nimport time\ns = pxssh.pxssh()\nip = ''\nusername = ''\npassword = ''\ns.login(ip, username, password)\nprint('SSH session login successful')\ns.sendline('application stop')\ns.prompt()\nprint('Stopping the app')\nprint(\"\"\"\nStarting the app\"\"\")\ns.sendline('application start')\ns.prompt()\nprint('\\nLogout')\ns.logout()\n",
"step-5": "#!/usr/bin/env python3\n\nfrom pexpect import pxssh\nimport time\ns = pxssh.pxssh()\nip = \"\" #replace ip address\nusername= \"\" #replace username\npassword= \"\" #replace password\ns.login (ip, username, password)\nprint (\"SSH session login successful\")\ns.sendline ('application stop')\ns.prompt() # match the prompt\nprint(\"Stopping the app\")\n\nprint(\"\\nStarting the app\") \ns.sendline ('application start')\ns.prompt() \nprint (\"\\nLogout\")\ns.logout()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import time
class Block:
def __init__(self, index, transactions, previous_hash, nonce=0):
self.index = index
self.transaction = transactions
self.timestamp = time.time()
self.previous_hash = previous_hash
self.nonce = nonce
self.hash = None
|
normal
|
{
"blob_id": "43a23958b8c8779e3292f0f523a37b6d712fdbac",
"index": 4448,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Block:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Block:\n\n def __init__(self, index, transactions, previous_hash, nonce=0):\n self.index = index\n self.transaction = transactions\n self.timestamp = time.time()\n self.previous_hash = previous_hash\n self.nonce = nonce\n self.hash = None\n",
"step-4": "import time\n\n\nclass Block:\n\n def __init__(self, index, transactions, previous_hash, nonce=0):\n self.index = index\n self.transaction = transactions\n self.timestamp = time.time()\n self.previous_hash = previous_hash\n self.nonce = nonce\n self.hash = None\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
import sys
import yaml
def add_sub_path(yaml_path):
file = open(yaml_path, "r", encoding="utf-8")
file_data = file.read()
file.close()
data = yaml.safe_load(file_data)
for p, p_info in data.get("paths", {}).items():
for method, m_info in p_info.items():
url_path = m_info["x-bk-apigateway-resource"]["backend"]["path"]
m_info["x-bk-apigateway-resource"]["backend"]["path"] = "{}{}".format(
"/{env.api_sub_path}", url_path[0:]
)
file = open(yaml_path, "w")
yaml.dump(data, file)
file.close()
if __name__ == "__main__":
# 为所有path添加env.api_sub_path前缀
path = sys.argv[1]
add_sub_path(path)
|
normal
|
{
"blob_id": "bbd50c40bc0897fe7a93f277bcfdcba3ba6d6f2a",
"index": 1531,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef add_sub_path(yaml_path):\n file = open(yaml_path, 'r', encoding='utf-8')\n file_data = file.read()\n file.close()\n data = yaml.safe_load(file_data)\n for p, p_info in data.get('paths', {}).items():\n for method, m_info in p_info.items():\n url_path = m_info['x-bk-apigateway-resource']['backend']['path']\n m_info['x-bk-apigateway-resource']['backend']['path'\n ] = '{}{}'.format('/{env.api_sub_path}', url_path[0:])\n file = open(yaml_path, 'w')\n yaml.dump(data, file)\n file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef add_sub_path(yaml_path):\n file = open(yaml_path, 'r', encoding='utf-8')\n file_data = file.read()\n file.close()\n data = yaml.safe_load(file_data)\n for p, p_info in data.get('paths', {}).items():\n for method, m_info in p_info.items():\n url_path = m_info['x-bk-apigateway-resource']['backend']['path']\n m_info['x-bk-apigateway-resource']['backend']['path'\n ] = '{}{}'.format('/{env.api_sub_path}', url_path[0:])\n file = open(yaml_path, 'w')\n yaml.dump(data, file)\n file.close()\n\n\nif __name__ == '__main__':\n path = sys.argv[1]\n add_sub_path(path)\n",
"step-4": "import sys\nimport yaml\n\n\ndef add_sub_path(yaml_path):\n file = open(yaml_path, 'r', encoding='utf-8')\n file_data = file.read()\n file.close()\n data = yaml.safe_load(file_data)\n for p, p_info in data.get('paths', {}).items():\n for method, m_info in p_info.items():\n url_path = m_info['x-bk-apigateway-resource']['backend']['path']\n m_info['x-bk-apigateway-resource']['backend']['path'\n ] = '{}{}'.format('/{env.api_sub_path}', url_path[0:])\n file = open(yaml_path, 'w')\n yaml.dump(data, file)\n file.close()\n\n\nif __name__ == '__main__':\n path = sys.argv[1]\n add_sub_path(path)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport sys\nimport yaml\n\n\ndef add_sub_path(yaml_path):\n file = open(yaml_path, \"r\", encoding=\"utf-8\")\n file_data = file.read()\n file.close()\n\n data = yaml.safe_load(file_data)\n\n for p, p_info in data.get(\"paths\", {}).items():\n for method, m_info in p_info.items():\n url_path = m_info[\"x-bk-apigateway-resource\"][\"backend\"][\"path\"]\n m_info[\"x-bk-apigateway-resource\"][\"backend\"][\"path\"] = \"{}{}\".format(\n \"/{env.api_sub_path}\", url_path[0:]\n )\n\n file = open(yaml_path, \"w\")\n yaml.dump(data, file)\n file.close()\n\n\nif __name__ == \"__main__\":\n # 为所有path添加env.api_sub_path前缀\n path = sys.argv[1]\n add_sub_path(path)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
""" Crie um programa onde o usuario possa digitar sete valores numericos e cadastre-os em uma lisa unicaque mantenha
separados os valores pares e impares. No final, mostre os valores ares e impares em ordem crescente """
n = [[],[]]
for c in range(0,7):
num = int(input(f'Digite o {c+1} valor: '))
res = num % 2
if res == 0:
n[0].append(num)
else:
n[1].append(num)
n[0].sort()
n[1].sort()
print(f'Numeros pares: {n[0]}')
print(f'Numeros impares {n[1]}')
|
normal
|
{
"blob_id": "72bbbe78db746febc9a36a676e0fa2d97bf5e81e",
"index": 8849,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor c in range(0, 7):\n num = int(input(f'Digite o {c + 1} valor: '))\n res = num % 2\n if res == 0:\n n[0].append(num)\n else:\n n[1].append(num)\nn[0].sort()\nn[1].sort()\nprint(f'Numeros pares: {n[0]}')\nprint(f'Numeros impares {n[1]}')\n",
"step-3": "<mask token>\nn = [[], []]\nfor c in range(0, 7):\n num = int(input(f'Digite o {c + 1} valor: '))\n res = num % 2\n if res == 0:\n n[0].append(num)\n else:\n n[1].append(num)\nn[0].sort()\nn[1].sort()\nprint(f'Numeros pares: {n[0]}')\nprint(f'Numeros impares {n[1]}')\n",
"step-4": "\"\"\" Crie um programa onde o usuario possa digitar sete valores numericos e cadastre-os em uma lisa unicaque mantenha\nseparados os valores pares e impares. No final, mostre os valores ares e impares em ordem crescente \"\"\"\n\nn = [[],[]]\n\nfor c in range(0,7):\n num = int(input(f'Digite o {c+1} valor: '))\n res = num % 2\n if res == 0:\n n[0].append(num)\n else:\n n[1].append(num)\nn[0].sort()\nn[1].sort()\nprint(f'Numeros pares: {n[0]}')\nprint(f'Numeros impares {n[1]}')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import datetime
import discord
def getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge, outlaws, spark,
spitfire, excelsior, eternal, fusion, dynasty, shock, dragons, defiant, valiant, titans,
justice) :
teamList = discord.Embed(
title="Overwatch League Teams",
description="2021 Season\n"+
"**"+reign+"ATL-Atlanta Reign**\n"+
"**"+uprising+"BOS-Boston Uprising**\n"+
"**"+hunters+"CDH-Chengdu Hunters**\n"+
"**"+fuel+"DAL-Dallas Fuel**\n"+
"**"+mayhem+"FLA-Florida Mayhem**\n"+
"**"+gladiators+"GLA-Los Angeles Gladiators**\n"+
"**"+charge+"GZC-Guangzhou Charge**\n"+
"**"+outlaws+"HOU-Houston Outlaws**\n"+
"**"+spark+"HZS-Hangzhou Spark**\n"+
"**"+spitfire+"LDN-London Spitfire**\n"+
"**"+excelsior+"NYE-New York Excelsior**\n"+
"**"+eternal+"PAR-Paris Eternal**\n"+
"**"+fusion+"PHI-Philadelphia Fustion**\n"+
"**"+dynasty+"SEO-Seoul Dynasty**\n"+
"**"+shock+"SFS-San Francisco Shock**\n"+
"**"+dragons+"SHD-Shanghai Dragons**\n"+
"**"+defiant+"TOR-Toronto Defiant**\n"
"**"+valiant+"VAL-Los Angeles Valiant**\n"+
"**"+titans+"VAN-Vancouver Titans**\n"+
"**"+justice+"WAS-Washington Justice**",
color=discord.Colour.gold(),
timestamp=datetime.datetime.utcnow()
)
return teamList
|
normal
|
{
"blob_id": "9a02e09cbfe2c9b6ebb9d20ba6cea639871f0838",
"index": 7647,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge,\n outlaws, spark, spitfire, excelsior, eternal, fusion, dynasty, shock,\n dragons, defiant, valiant, titans, justice):\n teamList = discord.Embed(title='Overwatch League Teams', description=\n '2021 Season\\n' + '**' + reign + \"\"\"ATL-Atlanta Reign**\n\"\"\" + '**' +\n uprising + 'BOS-Boston Uprising**\\n' + '**' + hunters +\n 'CDH-Chengdu Hunters**\\n' + '**' + fuel + \"\"\"DAL-Dallas Fuel**\n\"\"\" +\n '**' + mayhem + 'FLA-Florida Mayhem**\\n' + '**' + gladiators +\n \"\"\"GLA-Los Angeles Gladiators**\n\"\"\" + '**' + charge +\n 'GZC-Guangzhou Charge**\\n' + '**' + outlaws +\n 'HOU-Houston Outlaws**\\n' + '**' + spark +\n \"\"\"HZS-Hangzhou Spark**\n\"\"\" + '**' + spitfire +\n 'LDN-London Spitfire**\\n' + '**' + excelsior +\n 'NYE-New York Excelsior**\\n' + '**' + eternal +\n \"\"\"PAR-Paris Eternal**\n\"\"\" + '**' + fusion +\n 'PHI-Philadelphia Fustion**\\n' + '**' + dynasty +\n 'SEO-Seoul Dynasty**\\n' + '**' + shock +\n \"\"\"SFS-San Francisco Shock**\n\"\"\" + '**' + dragons +\n 'SHD-Shanghai Dragons**\\n' + '**' + defiant +\n \"\"\"TOR-Toronto Defiant**\n**\"\"\" + valiant +\n 'VAL-Los Angeles Valiant**\\n' + '**' + titans +\n 'VAN-Vancouver Titans**\\n' + '**' + justice +\n 'WAS-Washington Justice**', color=discord.Colour.gold(), timestamp=\n datetime.datetime.utcnow())\n return teamList\n",
"step-3": "import datetime\nimport discord\n\n\ndef getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge,\n outlaws, spark, spitfire, excelsior, eternal, fusion, dynasty, shock,\n dragons, defiant, valiant, titans, justice):\n teamList = discord.Embed(title='Overwatch League Teams', description=\n '2021 Season\\n' + '**' + reign + \"\"\"ATL-Atlanta Reign**\n\"\"\" + '**' +\n uprising + 'BOS-Boston Uprising**\\n' + '**' + hunters +\n 'CDH-Chengdu Hunters**\\n' + '**' + fuel + \"\"\"DAL-Dallas Fuel**\n\"\"\" +\n '**' + mayhem + 'FLA-Florida Mayhem**\\n' + '**' + gladiators +\n \"\"\"GLA-Los Angeles Gladiators**\n\"\"\" + '**' + charge +\n 'GZC-Guangzhou Charge**\\n' + '**' + outlaws +\n 'HOU-Houston Outlaws**\\n' + '**' + spark +\n \"\"\"HZS-Hangzhou Spark**\n\"\"\" + '**' + spitfire +\n 'LDN-London Spitfire**\\n' + '**' + excelsior +\n 'NYE-New York Excelsior**\\n' + '**' + eternal +\n \"\"\"PAR-Paris Eternal**\n\"\"\" + '**' + fusion +\n 'PHI-Philadelphia Fustion**\\n' + '**' + dynasty +\n 'SEO-Seoul Dynasty**\\n' + '**' + shock +\n \"\"\"SFS-San Francisco Shock**\n\"\"\" + '**' + dragons +\n 'SHD-Shanghai Dragons**\\n' + '**' + defiant +\n \"\"\"TOR-Toronto Defiant**\n**\"\"\" + valiant +\n 'VAL-Los Angeles Valiant**\\n' + '**' + titans +\n 'VAN-Vancouver Titans**\\n' + '**' + justice +\n 'WAS-Washington Justice**', color=discord.Colour.gold(), timestamp=\n datetime.datetime.utcnow())\n return teamList\n",
"step-4": "import datetime\nimport discord\n\ndef getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge, outlaws, spark,\nspitfire, excelsior, eternal, fusion, dynasty, shock, dragons, defiant, valiant, titans,\njustice) :\n teamList = discord.Embed(\n title=\"Overwatch League Teams\",\n description=\"2021 Season\\n\"+\n \"**\"+reign+\"ATL-Atlanta Reign**\\n\"+\n \"**\"+uprising+\"BOS-Boston Uprising**\\n\"+\n \"**\"+hunters+\"CDH-Chengdu Hunters**\\n\"+\n \"**\"+fuel+\"DAL-Dallas Fuel**\\n\"+\n \"**\"+mayhem+\"FLA-Florida Mayhem**\\n\"+\n \"**\"+gladiators+\"GLA-Los Angeles Gladiators**\\n\"+\n \"**\"+charge+\"GZC-Guangzhou Charge**\\n\"+\n \"**\"+outlaws+\"HOU-Houston Outlaws**\\n\"+\n \"**\"+spark+\"HZS-Hangzhou Spark**\\n\"+\n \"**\"+spitfire+\"LDN-London Spitfire**\\n\"+\n \"**\"+excelsior+\"NYE-New York Excelsior**\\n\"+\n \"**\"+eternal+\"PAR-Paris Eternal**\\n\"+\n \"**\"+fusion+\"PHI-Philadelphia Fustion**\\n\"+\n \"**\"+dynasty+\"SEO-Seoul Dynasty**\\n\"+\n \"**\"+shock+\"SFS-San Francisco Shock**\\n\"+\n \"**\"+dragons+\"SHD-Shanghai Dragons**\\n\"+\n \"**\"+defiant+\"TOR-Toronto Defiant**\\n\"\n \"**\"+valiant+\"VAL-Los Angeles Valiant**\\n\"+\n \"**\"+titans+\"VAN-Vancouver Titans**\\n\"+\n \"**\"+justice+\"WAS-Washington Justice**\",\n color=discord.Colour.gold(),\n timestamp=datetime.datetime.utcnow()\n )\n return teamList\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.modules import ChartModule
from mesa.batchrunner import BatchRunner
from agentPortrayal import agent_portrayal
import metrics
from matplotlib import pyplot as plt
from ArchitectureModel import MASArchitecture
import os
import random
import sys
runBatch = True
architecture = 'Inter-Firm'
saveResults = True
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
if(runBatch):
fixed_params = {'width': 60, 'height': 60,'splitSize':1,'distributed':True,'verbose':False,'searchSize':1,'batchRun':True}
variable_params = {'quantity':[10,20,50,80,100,120,150],'ordersPerWeek':[1,5,20,40,80,120]}
batch_run = BatchRunner(
MASArchitecture,
variable_params,
fixed_params,
iterations=10,
max_steps=800,
model_reporters={
"Utilisation": metrics.machineUtilisation,
"CompleteOrders": metrics.ordersComplete,
'AverageOrderWaitTime': metrics.averageOrderWaitTime,
'TotalMessagesSent': metrics.totalMessagesSent,
'AverageMessagesSent': metrics.averageMessagesSent,
"SuccessfulOrders":metrics.successfulOrders,
"noProposalOrders":metrics.noProposalOrders,
'OutsourcedOrders': metrics.outsourcedOrders,
'LateOrders':metrics.lateOrders,
'WIPBacklog':metrics.totalWIPSize,
'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,
'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,
'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,
'MaxMessagesReceivedFactory': metrics.maxMessagesReceivedByFactory,
'AverageSatisfactionScore':metrics.averageSatisfactionScore,
'AverageSuccessfulSatisfactionScore':metrics.averageSuccessfulSatisfactionScore,
'CheapOrdersWithCheapMachines':metrics.cheapOrdersWithCheapMachines,
'AsapOrdersWithFastMachines':metrics.asapOrdersWithFastMachines,
'AverageSuccessfulPrice': metrics.averageSuccessfulOrderPrice,
'AverageSuccessfulOrderPriceCheap':metrics.averageSuccessfulOrderPriceCheap,
'AverageSuccessfulOrderPriceNeutral':metrics.averageSuccessfulOrderPriceNeutral,
'AverageSuccessfulOrderPriceAsap':metrics.averageSuccessfulOrderPriceAsap,
'AverageSuccessfulMakespan': metrics.averageSuccessfulOrderMakeSpan,
'AverageSuccessfulOrderMakespanCheap':metrics.averageSuccessfulOrderMakespanCheap,
'AverageSuccessfulOrderMakespanNeutral':metrics.averageSuccessfulOrderMakespanNeutral,
'AverageSuccessfulOrderMakespanAsap':metrics.averageSuccessfulOrderMakespanAsap,
'SuccessfulAsapOrders':metrics.percentageOfSuccessfulASAPOrders,
'SuccessfulCheapOrders':metrics.percentageOfSuccessfulCheapOrders,
'SuccessfulNeutralOrders':metrics.percentageOfSuccessfulNeutralOrders
},
agent_reporters={
'id':'unique_id',
# # TODO: add in other agent reports that you would like to use
}
)
batch_run.run_all()
model_data = batch_run.get_model_vars_dataframe()
agent_data = batch_run.get_agent_vars_dataframe()
# Save results
if(saveResults):
number = 0
### CHANGE PATH TO WHERE YOU WANT RESULTS TO BE SAVED
while (os.path.exists('{}/results/test_{}'.format(dir_path,number)) == True):
number += 1
# TODO: maybe make a text file that describes the test that has been run
os.makedirs(
'{}/results/test_{}'.format(dir_path,number))
model_data.to_pickle(
'{}/results/test_{}/model_data.pkl'.format(dir_path,number))
agent_data.to_pickle(
'{}/results/test_{}/agent_data.pkl'.format(dir_path,number))
else:
# TODO: rename all of these
grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)
chart = ChartModule([{'Label': 'Utilisation', "Color": 'Black'}],data_collector_name='datacollector')
chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'}], data_collector_name='datacollector')
chart3 = ChartModule([{'Label': 'Average Order Wait Time','Color': 'Red'}], data_collector_name='datacollector')
chart4 = ChartModule([{'Label': 'Total Messages Sent','Color': 'Red'}], data_collector_name='datacollector')
averageMessagesSentChart = ChartModule([{'Label': 'Average Messages Sent','Color': 'Red'}], data_collector_name='datacollector')
chart5 = ChartModule([{'Label': 'Successful Orders','Color': 'Green'}], data_collector_name='datacollector')
chart6 = ChartModule([{'Label': 'Outsourced Orders','Color': 'Blue'}], data_collector_name='datacollector')
chart7 = ChartModule([{'Label': 'Late Orders','Color': 'Red'}], data_collector_name='datacollector')
chart8 = ChartModule([{'Label': 'WIP Backlog','Color': 'Blue'}], data_collector_name='datacollector')
chart9 = ChartModule([{'Label': 'Max Messages Sent - Order','Color': 'Blue'}], data_collector_name='datacollector')
chart10 = ChartModule([{'Label': 'Max Messages Received - Order','Color': 'Blue'}], data_collector_name='datacollector')
chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory','Color': 'Red'}], data_collector_name='datacollector')
chart12 = ChartModule([{'Label': 'Max Messages Received - Factory','Color': 'Red'}], data_collector_name='datacollector')
chart13 = ChartModule([{'Label': 'Average satisfaction score','Color': 'Blue'}], data_collector_name='datacollector')
chart14 = ChartModule([{'Label': 'Average successful satisfaction score','Color': 'Blue'}], data_collector_name='datacollector')
chart15 = ChartModule([{'Label': '% Cheap orders with cheap machines','Color': 'Green'}], data_collector_name='datacollector')
chart16 = ChartModule([{'Label': '% Asap orders with fast machines','Color': 'Green'}], data_collector_name='datacollector')
chart17 = ChartModule([{'Label': 'Average successful price','Color': 'Blue'}], data_collector_name='datacollector')
chart18 = ChartModule([{'Label': 'Average successful price Cheap','Color': 'Blue'}], data_collector_name='datacollector')
chart19 = ChartModule([{'Label': 'Average successful price Neutral','Color': 'Blue'}], data_collector_name='datacollector')
chart20 = ChartModule([{'Label': 'Average successful price Asap','Color': 'Blue'}], data_collector_name='datacollector')
chart21 = ChartModule([{'Label': 'Average successful makespan','Color': 'Green'}], data_collector_name='datacollector')
chart22 = ChartModule([{'Label': 'Average successful makespan Cheap','Color': 'Green'}], data_collector_name='datacollector')
chart23 = ChartModule([{'Label': 'Average successful makespan Neutral','Color': 'Green'}], data_collector_name='datacollector')
chart24 = ChartModule([{'Label': 'Average successful makespan Asap','Color': 'Green'}], data_collector_name='datacollector')
chart25 = ChartModule([{'Label': 'Successful Cheap Orders','Color': 'Red'}], data_collector_name='datacollector')
chart26 = ChartModule([{'Label': 'Successful Neutral Orders','Color': 'Red'}], data_collector_name='datacollector')
chart27 = ChartModule([{'Label': 'Successful Asap Orders','Color': 'Red'}], data_collector_name='datacollector')
noProposalOrdersChart = ChartModule([{'Label': 'Orders that received no proposals','Color': 'Red'}], data_collector_name='datacollector')
server = ModularServer(MASArchitecture,
[grid,
chart,
chart2,
chart3,
chart4,
averageMessagesSentChart,
chart5,
noProposalOrdersChart,
chart6,
chart7,
chart8, chart9, chart10,chart11, chart12,
chart13,chart14,
chart15,
chart16,chart17,
chart18, chart19, chart20,chart21,chart22,chart23,chart24,chart25,chart26,chart27
],
'Festo-Fetch.ai',
{'width': 60, 'height': 60, 'distributed':True,'quantity':10,'splitSize':1,'newOrderProbability':5,'verbose':True,'ordersPerWeek':40,
'model_reporters_dict': {
"Utilisation": metrics.machineUtilisation,
"Complete Orders": metrics.ordersComplete,
'Average Order Wait Time': metrics.averageOrderWaitTime,
"Successful Orders":metrics.successfulOrders,
'Total Messages Sent': metrics.totalMessagesSent,
'Average Messages Sent': metrics.averageMessagesSent,
'Late Orders':metrics.lateOrders,
'WIP Backlog':metrics.totalWIPSize,
'Max Messages Sent - Order': metrics.maxMessagesSentFromOrder,
'Max Messages Received - Order': metrics.maxMessagesReceivedByOrder,
'Max Messages Sent - Factory': metrics.maxMessagesSentFromFactory,
'Max Messages Received - Factory': metrics.maxMessagesReceivedByFactory,
'Outsourced Orders': metrics.outsourcedOrders,
'Orders that received no proposals':metrics.noProposalOrders,
'Average successful satisfaction score':metrics.averageSuccessfulSatisfactionScore,
'Average satisfaction score':metrics.averageSatisfactionScore,
'% Cheap orders with cheap machines':metrics.cheapOrdersWithCheapMachines,
'% Asap orders with fast machines':metrics.asapOrdersWithFastMachines,
'Average successful price': metrics.averageSuccessfulOrderPrice,
'Average successful price Cheap':metrics.averageSuccessfulOrderPriceCheap,
'Average successful price Neutral':metrics.averageSuccessfulOrderPriceNeutral,
'Average successful price Asap':metrics.averageSuccessfulOrderPriceAsap,
'Average successful makespan': metrics.averageSuccessfulOrderMakeSpan,
'Average successful makespan Cheap':metrics.averageSuccessfulOrderMakespanCheap,
'Average successful makespan Neutral':metrics.averageSuccessfulOrderMakespanNeutral,
'Average successful makespan Asap':metrics.averageSuccessfulOrderMakespanAsap,
'Successful Cheap Orders':metrics.percentageOfSuccessfulASAPOrders,
'Successful Neutral Orders':metrics.percentageOfSuccessfulCheapOrders,
'Successful Asap Orders':metrics.percentageOfSuccessfulNeutralOrders
}})
server.port = 8521
server.launch()
|
normal
|
{
"blob_id": "57b51ea36e9e2a095cf7e9646db2cc400cc72b83",
"index": 1082,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n dir_path = os.path.dirname(os.path.realpath(__file__))\n if runBatch:\n fixed_params = {'width': 60, 'height': 60, 'splitSize': 1,\n 'distributed': True, 'verbose': False, 'searchSize': 1,\n 'batchRun': True}\n variable_params = {'quantity': [10, 20, 50, 80, 100, 120, 150],\n 'ordersPerWeek': [1, 5, 20, 40, 80, 120]}\n batch_run = BatchRunner(MASArchitecture, variable_params,\n fixed_params, iterations=10, max_steps=800, model_reporters={\n 'Utilisation': metrics.machineUtilisation, 'CompleteOrders':\n metrics.ordersComplete, 'AverageOrderWaitTime': metrics.\n averageOrderWaitTime, 'TotalMessagesSent': metrics.\n totalMessagesSent, 'AverageMessagesSent': metrics.\n averageMessagesSent, 'SuccessfulOrders': metrics.\n successfulOrders, 'noProposalOrders': metrics.noProposalOrders,\n 'OutsourcedOrders': metrics.outsourcedOrders, 'LateOrders':\n metrics.lateOrders, 'WIPBacklog': metrics.totalWIPSize,\n 'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,\n 'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,\n 'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,\n 'MaxMessagesReceivedFactory': metrics.\n maxMessagesReceivedByFactory, 'AverageSatisfactionScore':\n metrics.averageSatisfactionScore,\n 'AverageSuccessfulSatisfactionScore': metrics.\n averageSuccessfulSatisfactionScore,\n 'CheapOrdersWithCheapMachines': metrics.\n cheapOrdersWithCheapMachines, 'AsapOrdersWithFastMachines':\n metrics.asapOrdersWithFastMachines, 'AverageSuccessfulPrice':\n metrics.averageSuccessfulOrderPrice,\n 'AverageSuccessfulOrderPriceCheap': metrics.\n averageSuccessfulOrderPriceCheap,\n 'AverageSuccessfulOrderPriceNeutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'AverageSuccessfulOrderPriceAsap': metrics.\n averageSuccessfulOrderPriceAsap, 'AverageSuccessfulMakespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'AverageSuccessfulOrderMakespanCheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'AverageSuccessfulOrderMakespanNeutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'AverageSuccessfulOrderMakespanAsap': metrics.\n averageSuccessfulOrderMakespanAsap, 'SuccessfulAsapOrders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'SuccessfulCheapOrders': metrics.\n percentageOfSuccessfulCheapOrders, 'SuccessfulNeutralOrders':\n metrics.percentageOfSuccessfulNeutralOrders}, agent_reporters={\n 'id': 'unique_id'})\n batch_run.run_all()\n model_data = batch_run.get_model_vars_dataframe()\n agent_data = batch_run.get_agent_vars_dataframe()\n if saveResults:\n number = 0\n while os.path.exists('{}/results/test_{}'.format(dir_path, number)\n ) == True:\n number += 1\n os.makedirs('{}/results/test_{}'.format(dir_path, number))\n model_data.to_pickle('{}/results/test_{}/model_data.pkl'.format\n (dir_path, number))\n agent_data.to_pickle('{}/results/test_{}/agent_data.pkl'.format\n (dir_path, number))\n else:\n grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)\n chart = ChartModule([{'Label': 'Utilisation', 'Color': 'Black'}],\n data_collector_name='datacollector')\n chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'\n }], data_collector_name='datacollector')\n chart3 = ChartModule([{'Label': 'Average Order Wait Time', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart4 = ChartModule([{'Label': 'Total Messages Sent', 'Color':\n 'Red'}], data_collector_name='datacollector')\n averageMessagesSentChart = ChartModule([{'Label':\n 'Average Messages Sent', 'Color': 'Red'}], data_collector_name=\n 'datacollector')\n chart5 = ChartModule([{'Label': 'Successful Orders', 'Color':\n 'Green'}], data_collector_name='datacollector')\n chart6 = ChartModule([{'Label': 'Outsourced Orders', 'Color':\n 'Blue'}], data_collector_name='datacollector')\n chart7 = ChartModule([{'Label': 'Late Orders', 'Color': 'Red'}],\n data_collector_name='datacollector')\n chart8 = ChartModule([{'Label': 'WIP Backlog', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart9 = ChartModule([{'Label': 'Max Messages Sent - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart10 = ChartModule([{'Label': 'Max Messages Received - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart12 = ChartModule([{'Label': 'Max Messages Received - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart13 = ChartModule([{'Label': 'Average satisfaction score',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart14 = ChartModule([{'Label':\n 'Average successful satisfaction score', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart15 = ChartModule([{'Label':\n '% Cheap orders with cheap machines', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart16 = ChartModule([{'Label': '% Asap orders with fast machines',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart17 = ChartModule([{'Label': 'Average successful price',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart18 = ChartModule([{'Label': 'Average successful price Cheap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart19 = ChartModule([{'Label': 'Average successful price Neutral',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart20 = ChartModule([{'Label': 'Average successful price Asap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart21 = ChartModule([{'Label': 'Average successful makespan',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart22 = ChartModule([{'Label':\n 'Average successful makespan Cheap', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart23 = ChartModule([{'Label':\n 'Average successful makespan Neutral', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart24 = ChartModule([{'Label': 'Average successful makespan Asap',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart25 = ChartModule([{'Label': 'Successful Cheap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart26 = ChartModule([{'Label': 'Successful Neutral Orders',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart27 = ChartModule([{'Label': 'Successful Asap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n noProposalOrdersChart = ChartModule([{'Label':\n 'Orders that received no proposals', 'Color': 'Red'}],\n data_collector_name='datacollector')\n server = ModularServer(MASArchitecture, [grid, chart, chart2,\n chart3, chart4, averageMessagesSentChart, chart5,\n noProposalOrdersChart, chart6, chart7, chart8, chart9, chart10,\n chart11, chart12, chart13, chart14, chart15, chart16, chart17,\n chart18, chart19, chart20, chart21, chart22, chart23, chart24,\n chart25, chart26, chart27], 'Festo-Fetch.ai', {'width': 60,\n 'height': 60, 'distributed': True, 'quantity': 10, 'splitSize':\n 1, 'newOrderProbability': 5, 'verbose': True, 'ordersPerWeek': \n 40, 'model_reporters_dict': {'Utilisation': metrics.\n machineUtilisation, 'Complete Orders': metrics.ordersComplete,\n 'Average Order Wait Time': metrics.averageOrderWaitTime,\n 'Successful Orders': metrics.successfulOrders,\n 'Total Messages Sent': metrics.totalMessagesSent,\n 'Average Messages Sent': metrics.averageMessagesSent,\n 'Late Orders': metrics.lateOrders, 'WIP Backlog': metrics.\n totalWIPSize, 'Max Messages Sent - Order': metrics.\n maxMessagesSentFromOrder, 'Max Messages Received - Order':\n metrics.maxMessagesReceivedByOrder,\n 'Max Messages Sent - Factory': metrics.\n maxMessagesSentFromFactory, 'Max Messages Received - Factory':\n metrics.maxMessagesReceivedByFactory, 'Outsourced Orders':\n metrics.outsourcedOrders, 'Orders that received no proposals':\n metrics.noProposalOrders,\n 'Average successful satisfaction score': metrics.\n averageSuccessfulSatisfactionScore,\n 'Average satisfaction score': metrics.averageSatisfactionScore,\n '% Cheap orders with cheap machines': metrics.\n cheapOrdersWithCheapMachines,\n '% Asap orders with fast machines': metrics.\n asapOrdersWithFastMachines, 'Average successful price': metrics\n .averageSuccessfulOrderPrice, 'Average successful price Cheap':\n metrics.averageSuccessfulOrderPriceCheap,\n 'Average successful price Neutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'Average successful price Asap': metrics.\n averageSuccessfulOrderPriceAsap, 'Average successful makespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'Average successful makespan Cheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'Average successful makespan Neutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'Average successful makespan Asap': metrics.\n averageSuccessfulOrderMakespanAsap, 'Successful Cheap Orders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'Successful Neutral Orders': metrics.\n percentageOfSuccessfulCheapOrders, 'Successful Asap Orders':\n metrics.percentageOfSuccessfulNeutralOrders}})\n server.port = 8521\n server.launch()\n",
"step-3": "<mask token>\nrunBatch = True\narchitecture = 'Inter-Firm'\nsaveResults = True\nif __name__ == '__main__':\n dir_path = os.path.dirname(os.path.realpath(__file__))\n if runBatch:\n fixed_params = {'width': 60, 'height': 60, 'splitSize': 1,\n 'distributed': True, 'verbose': False, 'searchSize': 1,\n 'batchRun': True}\n variable_params = {'quantity': [10, 20, 50, 80, 100, 120, 150],\n 'ordersPerWeek': [1, 5, 20, 40, 80, 120]}\n batch_run = BatchRunner(MASArchitecture, variable_params,\n fixed_params, iterations=10, max_steps=800, model_reporters={\n 'Utilisation': metrics.machineUtilisation, 'CompleteOrders':\n metrics.ordersComplete, 'AverageOrderWaitTime': metrics.\n averageOrderWaitTime, 'TotalMessagesSent': metrics.\n totalMessagesSent, 'AverageMessagesSent': metrics.\n averageMessagesSent, 'SuccessfulOrders': metrics.\n successfulOrders, 'noProposalOrders': metrics.noProposalOrders,\n 'OutsourcedOrders': metrics.outsourcedOrders, 'LateOrders':\n metrics.lateOrders, 'WIPBacklog': metrics.totalWIPSize,\n 'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,\n 'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,\n 'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,\n 'MaxMessagesReceivedFactory': metrics.\n maxMessagesReceivedByFactory, 'AverageSatisfactionScore':\n metrics.averageSatisfactionScore,\n 'AverageSuccessfulSatisfactionScore': metrics.\n averageSuccessfulSatisfactionScore,\n 'CheapOrdersWithCheapMachines': metrics.\n cheapOrdersWithCheapMachines, 'AsapOrdersWithFastMachines':\n metrics.asapOrdersWithFastMachines, 'AverageSuccessfulPrice':\n metrics.averageSuccessfulOrderPrice,\n 'AverageSuccessfulOrderPriceCheap': metrics.\n averageSuccessfulOrderPriceCheap,\n 'AverageSuccessfulOrderPriceNeutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'AverageSuccessfulOrderPriceAsap': metrics.\n averageSuccessfulOrderPriceAsap, 'AverageSuccessfulMakespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'AverageSuccessfulOrderMakespanCheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'AverageSuccessfulOrderMakespanNeutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'AverageSuccessfulOrderMakespanAsap': metrics.\n averageSuccessfulOrderMakespanAsap, 'SuccessfulAsapOrders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'SuccessfulCheapOrders': metrics.\n percentageOfSuccessfulCheapOrders, 'SuccessfulNeutralOrders':\n metrics.percentageOfSuccessfulNeutralOrders}, agent_reporters={\n 'id': 'unique_id'})\n batch_run.run_all()\n model_data = batch_run.get_model_vars_dataframe()\n agent_data = batch_run.get_agent_vars_dataframe()\n if saveResults:\n number = 0\n while os.path.exists('{}/results/test_{}'.format(dir_path, number)\n ) == True:\n number += 1\n os.makedirs('{}/results/test_{}'.format(dir_path, number))\n model_data.to_pickle('{}/results/test_{}/model_data.pkl'.format\n (dir_path, number))\n agent_data.to_pickle('{}/results/test_{}/agent_data.pkl'.format\n (dir_path, number))\n else:\n grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)\n chart = ChartModule([{'Label': 'Utilisation', 'Color': 'Black'}],\n data_collector_name='datacollector')\n chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'\n }], data_collector_name='datacollector')\n chart3 = ChartModule([{'Label': 'Average Order Wait Time', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart4 = ChartModule([{'Label': 'Total Messages Sent', 'Color':\n 'Red'}], data_collector_name='datacollector')\n averageMessagesSentChart = ChartModule([{'Label':\n 'Average Messages Sent', 'Color': 'Red'}], data_collector_name=\n 'datacollector')\n chart5 = ChartModule([{'Label': 'Successful Orders', 'Color':\n 'Green'}], data_collector_name='datacollector')\n chart6 = ChartModule([{'Label': 'Outsourced Orders', 'Color':\n 'Blue'}], data_collector_name='datacollector')\n chart7 = ChartModule([{'Label': 'Late Orders', 'Color': 'Red'}],\n data_collector_name='datacollector')\n chart8 = ChartModule([{'Label': 'WIP Backlog', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart9 = ChartModule([{'Label': 'Max Messages Sent - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart10 = ChartModule([{'Label': 'Max Messages Received - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart12 = ChartModule([{'Label': 'Max Messages Received - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart13 = ChartModule([{'Label': 'Average satisfaction score',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart14 = ChartModule([{'Label':\n 'Average successful satisfaction score', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart15 = ChartModule([{'Label':\n '% Cheap orders with cheap machines', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart16 = ChartModule([{'Label': '% Asap orders with fast machines',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart17 = ChartModule([{'Label': 'Average successful price',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart18 = ChartModule([{'Label': 'Average successful price Cheap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart19 = ChartModule([{'Label': 'Average successful price Neutral',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart20 = ChartModule([{'Label': 'Average successful price Asap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart21 = ChartModule([{'Label': 'Average successful makespan',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart22 = ChartModule([{'Label':\n 'Average successful makespan Cheap', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart23 = ChartModule([{'Label':\n 'Average successful makespan Neutral', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart24 = ChartModule([{'Label': 'Average successful makespan Asap',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart25 = ChartModule([{'Label': 'Successful Cheap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart26 = ChartModule([{'Label': 'Successful Neutral Orders',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart27 = ChartModule([{'Label': 'Successful Asap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n noProposalOrdersChart = ChartModule([{'Label':\n 'Orders that received no proposals', 'Color': 'Red'}],\n data_collector_name='datacollector')\n server = ModularServer(MASArchitecture, [grid, chart, chart2,\n chart3, chart4, averageMessagesSentChart, chart5,\n noProposalOrdersChart, chart6, chart7, chart8, chart9, chart10,\n chart11, chart12, chart13, chart14, chart15, chart16, chart17,\n chart18, chart19, chart20, chart21, chart22, chart23, chart24,\n chart25, chart26, chart27], 'Festo-Fetch.ai', {'width': 60,\n 'height': 60, 'distributed': True, 'quantity': 10, 'splitSize':\n 1, 'newOrderProbability': 5, 'verbose': True, 'ordersPerWeek': \n 40, 'model_reporters_dict': {'Utilisation': metrics.\n machineUtilisation, 'Complete Orders': metrics.ordersComplete,\n 'Average Order Wait Time': metrics.averageOrderWaitTime,\n 'Successful Orders': metrics.successfulOrders,\n 'Total Messages Sent': metrics.totalMessagesSent,\n 'Average Messages Sent': metrics.averageMessagesSent,\n 'Late Orders': metrics.lateOrders, 'WIP Backlog': metrics.\n totalWIPSize, 'Max Messages Sent - Order': metrics.\n maxMessagesSentFromOrder, 'Max Messages Received - Order':\n metrics.maxMessagesReceivedByOrder,\n 'Max Messages Sent - Factory': metrics.\n maxMessagesSentFromFactory, 'Max Messages Received - Factory':\n metrics.maxMessagesReceivedByFactory, 'Outsourced Orders':\n metrics.outsourcedOrders, 'Orders that received no proposals':\n metrics.noProposalOrders,\n 'Average successful satisfaction score': metrics.\n averageSuccessfulSatisfactionScore,\n 'Average satisfaction score': metrics.averageSatisfactionScore,\n '% Cheap orders with cheap machines': metrics.\n cheapOrdersWithCheapMachines,\n '% Asap orders with fast machines': metrics.\n asapOrdersWithFastMachines, 'Average successful price': metrics\n .averageSuccessfulOrderPrice, 'Average successful price Cheap':\n metrics.averageSuccessfulOrderPriceCheap,\n 'Average successful price Neutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'Average successful price Asap': metrics.\n averageSuccessfulOrderPriceAsap, 'Average successful makespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'Average successful makespan Cheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'Average successful makespan Neutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'Average successful makespan Asap': metrics.\n averageSuccessfulOrderMakespanAsap, 'Successful Cheap Orders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'Successful Neutral Orders': metrics.\n percentageOfSuccessfulCheapOrders, 'Successful Asap Orders':\n metrics.percentageOfSuccessfulNeutralOrders}})\n server.port = 8521\n server.launch()\n",
"step-4": "from mesa.visualization.modules import CanvasGrid\nfrom mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.modules import ChartModule\nfrom mesa.batchrunner import BatchRunner\nfrom agentPortrayal import agent_portrayal\nimport metrics\nfrom matplotlib import pyplot as plt\nfrom ArchitectureModel import MASArchitecture\nimport os\nimport random\nimport sys\nrunBatch = True\narchitecture = 'Inter-Firm'\nsaveResults = True\nif __name__ == '__main__':\n dir_path = os.path.dirname(os.path.realpath(__file__))\n if runBatch:\n fixed_params = {'width': 60, 'height': 60, 'splitSize': 1,\n 'distributed': True, 'verbose': False, 'searchSize': 1,\n 'batchRun': True}\n variable_params = {'quantity': [10, 20, 50, 80, 100, 120, 150],\n 'ordersPerWeek': [1, 5, 20, 40, 80, 120]}\n batch_run = BatchRunner(MASArchitecture, variable_params,\n fixed_params, iterations=10, max_steps=800, model_reporters={\n 'Utilisation': metrics.machineUtilisation, 'CompleteOrders':\n metrics.ordersComplete, 'AverageOrderWaitTime': metrics.\n averageOrderWaitTime, 'TotalMessagesSent': metrics.\n totalMessagesSent, 'AverageMessagesSent': metrics.\n averageMessagesSent, 'SuccessfulOrders': metrics.\n successfulOrders, 'noProposalOrders': metrics.noProposalOrders,\n 'OutsourcedOrders': metrics.outsourcedOrders, 'LateOrders':\n metrics.lateOrders, 'WIPBacklog': metrics.totalWIPSize,\n 'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,\n 'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,\n 'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,\n 'MaxMessagesReceivedFactory': metrics.\n maxMessagesReceivedByFactory, 'AverageSatisfactionScore':\n metrics.averageSatisfactionScore,\n 'AverageSuccessfulSatisfactionScore': metrics.\n averageSuccessfulSatisfactionScore,\n 'CheapOrdersWithCheapMachines': metrics.\n cheapOrdersWithCheapMachines, 'AsapOrdersWithFastMachines':\n metrics.asapOrdersWithFastMachines, 'AverageSuccessfulPrice':\n metrics.averageSuccessfulOrderPrice,\n 'AverageSuccessfulOrderPriceCheap': metrics.\n averageSuccessfulOrderPriceCheap,\n 'AverageSuccessfulOrderPriceNeutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'AverageSuccessfulOrderPriceAsap': metrics.\n averageSuccessfulOrderPriceAsap, 'AverageSuccessfulMakespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'AverageSuccessfulOrderMakespanCheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'AverageSuccessfulOrderMakespanNeutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'AverageSuccessfulOrderMakespanAsap': metrics.\n averageSuccessfulOrderMakespanAsap, 'SuccessfulAsapOrders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'SuccessfulCheapOrders': metrics.\n percentageOfSuccessfulCheapOrders, 'SuccessfulNeutralOrders':\n metrics.percentageOfSuccessfulNeutralOrders}, agent_reporters={\n 'id': 'unique_id'})\n batch_run.run_all()\n model_data = batch_run.get_model_vars_dataframe()\n agent_data = batch_run.get_agent_vars_dataframe()\n if saveResults:\n number = 0\n while os.path.exists('{}/results/test_{}'.format(dir_path, number)\n ) == True:\n number += 1\n os.makedirs('{}/results/test_{}'.format(dir_path, number))\n model_data.to_pickle('{}/results/test_{}/model_data.pkl'.format\n (dir_path, number))\n agent_data.to_pickle('{}/results/test_{}/agent_data.pkl'.format\n (dir_path, number))\n else:\n grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)\n chart = ChartModule([{'Label': 'Utilisation', 'Color': 'Black'}],\n data_collector_name='datacollector')\n chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'\n }], data_collector_name='datacollector')\n chart3 = ChartModule([{'Label': 'Average Order Wait Time', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart4 = ChartModule([{'Label': 'Total Messages Sent', 'Color':\n 'Red'}], data_collector_name='datacollector')\n averageMessagesSentChart = ChartModule([{'Label':\n 'Average Messages Sent', 'Color': 'Red'}], data_collector_name=\n 'datacollector')\n chart5 = ChartModule([{'Label': 'Successful Orders', 'Color':\n 'Green'}], data_collector_name='datacollector')\n chart6 = ChartModule([{'Label': 'Outsourced Orders', 'Color':\n 'Blue'}], data_collector_name='datacollector')\n chart7 = ChartModule([{'Label': 'Late Orders', 'Color': 'Red'}],\n data_collector_name='datacollector')\n chart8 = ChartModule([{'Label': 'WIP Backlog', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart9 = ChartModule([{'Label': 'Max Messages Sent - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart10 = ChartModule([{'Label': 'Max Messages Received - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart12 = ChartModule([{'Label': 'Max Messages Received - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart13 = ChartModule([{'Label': 'Average satisfaction score',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart14 = ChartModule([{'Label':\n 'Average successful satisfaction score', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart15 = ChartModule([{'Label':\n '% Cheap orders with cheap machines', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart16 = ChartModule([{'Label': '% Asap orders with fast machines',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart17 = ChartModule([{'Label': 'Average successful price',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart18 = ChartModule([{'Label': 'Average successful price Cheap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart19 = ChartModule([{'Label': 'Average successful price Neutral',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart20 = ChartModule([{'Label': 'Average successful price Asap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart21 = ChartModule([{'Label': 'Average successful makespan',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart22 = ChartModule([{'Label':\n 'Average successful makespan Cheap', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart23 = ChartModule([{'Label':\n 'Average successful makespan Neutral', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart24 = ChartModule([{'Label': 'Average successful makespan Asap',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart25 = ChartModule([{'Label': 'Successful Cheap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart26 = ChartModule([{'Label': 'Successful Neutral Orders',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart27 = ChartModule([{'Label': 'Successful Asap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n noProposalOrdersChart = ChartModule([{'Label':\n 'Orders that received no proposals', 'Color': 'Red'}],\n data_collector_name='datacollector')\n server = ModularServer(MASArchitecture, [grid, chart, chart2,\n chart3, chart4, averageMessagesSentChart, chart5,\n noProposalOrdersChart, chart6, chart7, chart8, chart9, chart10,\n chart11, chart12, chart13, chart14, chart15, chart16, chart17,\n chart18, chart19, chart20, chart21, chart22, chart23, chart24,\n chart25, chart26, chart27], 'Festo-Fetch.ai', {'width': 60,\n 'height': 60, 'distributed': True, 'quantity': 10, 'splitSize':\n 1, 'newOrderProbability': 5, 'verbose': True, 'ordersPerWeek': \n 40, 'model_reporters_dict': {'Utilisation': metrics.\n machineUtilisation, 'Complete Orders': metrics.ordersComplete,\n 'Average Order Wait Time': metrics.averageOrderWaitTime,\n 'Successful Orders': metrics.successfulOrders,\n 'Total Messages Sent': metrics.totalMessagesSent,\n 'Average Messages Sent': metrics.averageMessagesSent,\n 'Late Orders': metrics.lateOrders, 'WIP Backlog': metrics.\n totalWIPSize, 'Max Messages Sent - Order': metrics.\n maxMessagesSentFromOrder, 'Max Messages Received - Order':\n metrics.maxMessagesReceivedByOrder,\n 'Max Messages Sent - Factory': metrics.\n maxMessagesSentFromFactory, 'Max Messages Received - Factory':\n metrics.maxMessagesReceivedByFactory, 'Outsourced Orders':\n metrics.outsourcedOrders, 'Orders that received no proposals':\n metrics.noProposalOrders,\n 'Average successful satisfaction score': metrics.\n averageSuccessfulSatisfactionScore,\n 'Average satisfaction score': metrics.averageSatisfactionScore,\n '% Cheap orders with cheap machines': metrics.\n cheapOrdersWithCheapMachines,\n '% Asap orders with fast machines': metrics.\n asapOrdersWithFastMachines, 'Average successful price': metrics\n .averageSuccessfulOrderPrice, 'Average successful price Cheap':\n metrics.averageSuccessfulOrderPriceCheap,\n 'Average successful price Neutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'Average successful price Asap': metrics.\n averageSuccessfulOrderPriceAsap, 'Average successful makespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'Average successful makespan Cheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'Average successful makespan Neutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'Average successful makespan Asap': metrics.\n averageSuccessfulOrderMakespanAsap, 'Successful Cheap Orders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'Successful Neutral Orders': metrics.\n percentageOfSuccessfulCheapOrders, 'Successful Asap Orders':\n metrics.percentageOfSuccessfulNeutralOrders}})\n server.port = 8521\n server.launch()\n",
"step-5": "from mesa.visualization.modules import CanvasGrid\nfrom mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.modules import ChartModule\nfrom mesa.batchrunner import BatchRunner\nfrom agentPortrayal import agent_portrayal\nimport metrics\nfrom matplotlib import pyplot as plt\nfrom ArchitectureModel import MASArchitecture\nimport os\nimport random\nimport sys\n\nrunBatch = True\narchitecture = 'Inter-Firm'\nsaveResults = True\n\n\n\nif __name__ == '__main__':\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n \n\n if(runBatch):\n fixed_params = {'width': 60, 'height': 60,'splitSize':1,'distributed':True,'verbose':False,'searchSize':1,'batchRun':True}\n\n variable_params = {'quantity':[10,20,50,80,100,120,150],'ordersPerWeek':[1,5,20,40,80,120]}\n\n batch_run = BatchRunner(\n MASArchitecture,\n variable_params,\n fixed_params,\n iterations=10,\n max_steps=800,\n model_reporters={\n \"Utilisation\": metrics.machineUtilisation,\n \"CompleteOrders\": metrics.ordersComplete,\n 'AverageOrderWaitTime': metrics.averageOrderWaitTime,\n 'TotalMessagesSent': metrics.totalMessagesSent, \n 'AverageMessagesSent': metrics.averageMessagesSent, \n \"SuccessfulOrders\":metrics.successfulOrders,\n \"noProposalOrders\":metrics.noProposalOrders,\n 'OutsourcedOrders': metrics.outsourcedOrders,\n 'LateOrders':metrics.lateOrders,\n 'WIPBacklog':metrics.totalWIPSize, \n 'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder, \n 'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,\n 'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory, \n 'MaxMessagesReceivedFactory': metrics.maxMessagesReceivedByFactory,\n \n 'AverageSatisfactionScore':metrics.averageSatisfactionScore,\n 'AverageSuccessfulSatisfactionScore':metrics.averageSuccessfulSatisfactionScore,\n 'CheapOrdersWithCheapMachines':metrics.cheapOrdersWithCheapMachines,\n 'AsapOrdersWithFastMachines':metrics.asapOrdersWithFastMachines,\n \n 'AverageSuccessfulPrice': metrics.averageSuccessfulOrderPrice,\n 'AverageSuccessfulOrderPriceCheap':metrics.averageSuccessfulOrderPriceCheap,\n 'AverageSuccessfulOrderPriceNeutral':metrics.averageSuccessfulOrderPriceNeutral,\n 'AverageSuccessfulOrderPriceAsap':metrics.averageSuccessfulOrderPriceAsap,\n \n 'AverageSuccessfulMakespan': metrics.averageSuccessfulOrderMakeSpan,\n 'AverageSuccessfulOrderMakespanCheap':metrics.averageSuccessfulOrderMakespanCheap,\n 'AverageSuccessfulOrderMakespanNeutral':metrics.averageSuccessfulOrderMakespanNeutral,\n 'AverageSuccessfulOrderMakespanAsap':metrics.averageSuccessfulOrderMakespanAsap,\n\n 'SuccessfulAsapOrders':metrics.percentageOfSuccessfulASAPOrders,\n 'SuccessfulCheapOrders':metrics.percentageOfSuccessfulCheapOrders,\n 'SuccessfulNeutralOrders':metrics.percentageOfSuccessfulNeutralOrders\n },\n agent_reporters={\n 'id':'unique_id',\n # # TODO: add in other agent reports that you would like to use\n }\n )\n\n batch_run.run_all()\n\n model_data = batch_run.get_model_vars_dataframe()\n agent_data = batch_run.get_agent_vars_dataframe()\n\n \n # Save results\n if(saveResults):\n number = 0\n ### CHANGE PATH TO WHERE YOU WANT RESULTS TO BE SAVED\n while (os.path.exists('{}/results/test_{}'.format(dir_path,number)) == True):\n number += 1\n\n # TODO: maybe make a text file that describes the test that has been run\n os.makedirs(\n '{}/results/test_{}'.format(dir_path,number))\n\n model_data.to_pickle(\n '{}/results/test_{}/model_data.pkl'.format(dir_path,number))\n agent_data.to_pickle(\n '{}/results/test_{}/agent_data.pkl'.format(dir_path,number))\n\n \n \n else:\n # TODO: rename all of these\n grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)\n chart = ChartModule([{'Label': 'Utilisation', \"Color\": 'Black'}],data_collector_name='datacollector')\n chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'}], data_collector_name='datacollector')\n chart3 = ChartModule([{'Label': 'Average Order Wait Time','Color': 'Red'}], data_collector_name='datacollector')\n chart4 = ChartModule([{'Label': 'Total Messages Sent','Color': 'Red'}], data_collector_name='datacollector')\n averageMessagesSentChart = ChartModule([{'Label': 'Average Messages Sent','Color': 'Red'}], data_collector_name='datacollector')\n chart5 = ChartModule([{'Label': 'Successful Orders','Color': 'Green'}], data_collector_name='datacollector')\n chart6 = ChartModule([{'Label': 'Outsourced Orders','Color': 'Blue'}], data_collector_name='datacollector')\n chart7 = ChartModule([{'Label': 'Late Orders','Color': 'Red'}], data_collector_name='datacollector')\n chart8 = ChartModule([{'Label': 'WIP Backlog','Color': 'Blue'}], data_collector_name='datacollector')\n chart9 = ChartModule([{'Label': 'Max Messages Sent - Order','Color': 'Blue'}], data_collector_name='datacollector')\n chart10 = ChartModule([{'Label': 'Max Messages Received - Order','Color': 'Blue'}], data_collector_name='datacollector')\n chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory','Color': 'Red'}], data_collector_name='datacollector')\n chart12 = ChartModule([{'Label': 'Max Messages Received - Factory','Color': 'Red'}], data_collector_name='datacollector')\n \n \n\n chart13 = ChartModule([{'Label': 'Average satisfaction score','Color': 'Blue'}], data_collector_name='datacollector')\n chart14 = ChartModule([{'Label': 'Average successful satisfaction score','Color': 'Blue'}], data_collector_name='datacollector')\n chart15 = ChartModule([{'Label': '% Cheap orders with cheap machines','Color': 'Green'}], data_collector_name='datacollector')\n chart16 = ChartModule([{'Label': '% Asap orders with fast machines','Color': 'Green'}], data_collector_name='datacollector')\n\n chart17 = ChartModule([{'Label': 'Average successful price','Color': 'Blue'}], data_collector_name='datacollector')\n chart18 = ChartModule([{'Label': 'Average successful price Cheap','Color': 'Blue'}], data_collector_name='datacollector')\n chart19 = ChartModule([{'Label': 'Average successful price Neutral','Color': 'Blue'}], data_collector_name='datacollector')\n chart20 = ChartModule([{'Label': 'Average successful price Asap','Color': 'Blue'}], data_collector_name='datacollector')\n\n chart21 = ChartModule([{'Label': 'Average successful makespan','Color': 'Green'}], data_collector_name='datacollector')\n chart22 = ChartModule([{'Label': 'Average successful makespan Cheap','Color': 'Green'}], data_collector_name='datacollector')\n chart23 = ChartModule([{'Label': 'Average successful makespan Neutral','Color': 'Green'}], data_collector_name='datacollector')\n chart24 = ChartModule([{'Label': 'Average successful makespan Asap','Color': 'Green'}], data_collector_name='datacollector')\n\n chart25 = ChartModule([{'Label': 'Successful Cheap Orders','Color': 'Red'}], data_collector_name='datacollector')\n chart26 = ChartModule([{'Label': 'Successful Neutral Orders','Color': 'Red'}], data_collector_name='datacollector')\n chart27 = ChartModule([{'Label': 'Successful Asap Orders','Color': 'Red'}], data_collector_name='datacollector')\n noProposalOrdersChart = ChartModule([{'Label': 'Orders that received no proposals','Color': 'Red'}], data_collector_name='datacollector')\n\n\n\n \n\n \n server = ModularServer(MASArchitecture,\n [grid,\n chart,\n chart2,\n chart3,\n chart4,\n averageMessagesSentChart,\n chart5, \n noProposalOrdersChart,\n chart6,\n chart7, \n chart8, chart9, chart10,chart11, chart12,\n chart13,chart14,\n chart15,\n chart16,chart17,\n chart18, chart19, chart20,chart21,chart22,chart23,chart24,chart25,chart26,chart27\n ],\n 'Festo-Fetch.ai',\n\n {'width': 60, 'height': 60, 'distributed':True,'quantity':10,'splitSize':1,'newOrderProbability':5,'verbose':True,'ordersPerWeek':40,\n 'model_reporters_dict': {\n \"Utilisation\": metrics.machineUtilisation,\n \"Complete Orders\": metrics.ordersComplete,\n 'Average Order Wait Time': metrics.averageOrderWaitTime, \n \"Successful Orders\":metrics.successfulOrders,\n 'Total Messages Sent': metrics.totalMessagesSent, \n 'Average Messages Sent': metrics.averageMessagesSent, \n 'Late Orders':metrics.lateOrders,\n 'WIP Backlog':metrics.totalWIPSize, \n 'Max Messages Sent - Order': metrics.maxMessagesSentFromOrder, \n 'Max Messages Received - Order': metrics.maxMessagesReceivedByOrder,\n 'Max Messages Sent - Factory': metrics.maxMessagesSentFromFactory, \n 'Max Messages Received - Factory': metrics.maxMessagesReceivedByFactory,\n 'Outsourced Orders': metrics.outsourcedOrders,\n 'Orders that received no proposals':metrics.noProposalOrders,\n \n 'Average successful satisfaction score':metrics.averageSuccessfulSatisfactionScore,\n 'Average satisfaction score':metrics.averageSatisfactionScore,\n '% Cheap orders with cheap machines':metrics.cheapOrdersWithCheapMachines,\n '% Asap orders with fast machines':metrics.asapOrdersWithFastMachines,\n\n 'Average successful price': metrics.averageSuccessfulOrderPrice,\n\n 'Average successful price Cheap':metrics.averageSuccessfulOrderPriceCheap,\n 'Average successful price Neutral':metrics.averageSuccessfulOrderPriceNeutral,\n 'Average successful price Asap':metrics.averageSuccessfulOrderPriceAsap,\n \n 'Average successful makespan': metrics.averageSuccessfulOrderMakeSpan,\n\n 'Average successful makespan Cheap':metrics.averageSuccessfulOrderMakespanCheap,\n 'Average successful makespan Neutral':metrics.averageSuccessfulOrderMakespanNeutral,\n 'Average successful makespan Asap':metrics.averageSuccessfulOrderMakespanAsap,\n \n 'Successful Cheap Orders':metrics.percentageOfSuccessfulASAPOrders,\n 'Successful Neutral Orders':metrics.percentageOfSuccessfulCheapOrders,\n 'Successful Asap Orders':metrics.percentageOfSuccessfulNeutralOrders\n\n }})\n\n server.port = 8521\n server.launch()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import tensorflow as tf
import numpy as np
import time
import os
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
from src.model import get_args
from src.funcs import linear
from src.youtubeface import load_ytf_data
from src.lfw import load_lfw_data
from src.facescrub import load_fs_data
from src.wrapper_basicImg import wrapper_basicImg
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
total_iteration = 300000
m = 512
q = 32
lam = 0.01
beta = 1.
margin = 0.5
s = 32
batch_size = 256
class_num = 1595
train_dataset = 'FS'
eval_dataset = "LFW"
args = get_args()
### Get image and label from tfrecord
image, label, iterator = {}, {}, {}
if train_dataset == 'YTF':
image['train'], label['train'], iterator['train'] = load_ytf_data(batch_size, 'train')
elif train_dataset == 'FS':
image['train'], label['train'], iterator['train'] = load_fs_data(batch_size, 'train')
else:
print("Select proper dataset")
### Get evaluation dataset. Wrapper
wrapper = wrapper_basicImg(dataset=eval_dataset)
if eval_dataset == 'YTF':
image['gallery'], label['gallery'], iterator['gallery'] = load_ytf_data(batch_size, 'train', eval=True)
image['test'], label['test'], iterator['test'] = load_ytf_data(batch_size, 'test')
elif eval_dataset == 'LFW':
image['gallery'], label['gallery'], iterator['gallery'] = load_lfw_data(batch_size, 'gallery')
image['test'], label['test'], iterator['test'] = load_lfw_data(batch_size, 'probe')
### Backbone network (Arcface)
embedding_tensor = tf.placeholder(name='img_inputs', shape=[None, 512], dtype=tf.float32)
labels = tf.placeholder(name='label', shape=[None, ], dtype=tf.int32)
### Global step & learning rate
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.003
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, total_iteration, 0.96)
### My implementation (DIom algorithm)
with tf.variable_scope('DIom'):
fc1 = linear(tf.nn.relu(embedding_tensor), 1024, 'fc1')
fc2 = linear(tf.nn.relu(fc1), 1024, 'fc2')
fc3 = linear(tf.nn.relu(fc2), m * q, 'fc3')
h_k = tf.reshape(fc3, [-1, m, q])
h_k = tf.nn.softmax(beta * h_k, axis=2)
index_matrix = tf.range(1, q + 1, dtype=tf.float32)
h = tf.reduce_sum(h_k * index_matrix, axis=2)
h = tf.reshape(h, [-1, m])
h_norm = tf.math.l2_normalize(h, axis=1)
### Loss function
l = tf.one_hot(labels, class_num)
l = tf.matmul(l, tf.transpose(l))
l_float = tf.cast(l, tf.float32)
l = tf.reshape(tf.clip_by_value(l_float, 0., 1.), (-1, 1))
label_int = tf.cast(tf.squeeze(l, 1), tf.int32)
inner_prod = tf.reshape(tf.matmul(h_norm, tf.transpose(h_norm)), (-1, 1))
cos_t = tf.clip_by_value(inner_prod, -1., 1. - 1e-6)
theta = tf.math.acos(cos_t)
sin_t = tf.math.sin(theta)
cos_mt = tf.math.cos(theta + margin)
sin_mt = tf.math.sin(theta + margin)
logit = l * s * (tf.concat([sin_t, cos_mt], 1)) + (1 - l) * s * (tf.concat([sin_mt, cos_t], 1))
l_ij_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=label_int)
c_ij = tf.abs(tf.reduce_mean(h, axis=0) - (q + 1) / 2)
# Baseline pairwise-CE
# label_ce = tf.cast(labels, tf.float32)
# l_ij = l * tf.log(tf.square(inner_prod)) + (1 - l) * tf.log(tf.maximum(1e-6, 1 - tf.square(inner_prod)))
# l_ij = -tf.reduce_mean(l_ij)
# My novel cosine loss
l_ij = tf.reduce_mean(l_ij_logit)
c_ij = tf.reduce_mean(c_ij)
loss = l_ij + lam * c_ij
gradient = tf.gradients(loss, sin_t)
### Optimizer
t_vars = tf.global_variables()
train_vars = [var for var in t_vars if 'DIom' in var.name]
opt_t = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(loss, var_list=train_vars, global_step=global_step)
with tf.Session() as sess:
tf.global_variables_initializer().run()
sess.run(iterator['train'].initializer)
### Training
iteration = sess.run(global_step)
t_opt = [opt_t, loss, l_ij, c_ij]
start_time = time.time()
while iteration != total_iteration:
img, lbl = sess.run([image['train'], label['train']])
train_dict = {
embedding_tensor: img,
labels: lbl
}
_, train_loss, loss_l, loss_c = sess.run(t_opt, feed_dict=train_dict)
iteration += 1
if iteration % 10000 == 0:
### Evaluation after training
### Get gallery hash code
# gallery = []
# gallery_label = []
# sess.run(iterator['gallery'].initializer)
# try:
# while True:
# img, lbl = sess.run([image['gallery'], label['gallery']])
#
# gallery_dict = {
# embedding_tensor: img
# }
#
# hash_code = sess.run(h_norm, feed_dict=gallery_dict)
#
# if gallery == []:
# gallery = hash_code
# gallery_label = lbl
# else:
# gallery = np.concatenate((gallery, hash_code), axis=0)
# gallery_label = np.concatenate((gallery_label, lbl), axis=0)
#
# except tf.errors.OutOfRangeError:
# pass
#
# ### Get probe hash code
# probe = []
# probe_label = []
# code_arr = []
# sess.run(iterator['test'].initializer)
# try:
# while True:
# img, lbl = sess.run([image['test'], label['test']])
#
# gallery_dict = {
# embedding_tensor: img
# }
#
# code, hash_code = sess.run([h, h_norm], feed_dict=gallery_dict)
#
# if probe == []:
# probe = hash_code
# probe_label = lbl
# code_arr = code
# else:
# probe = np.concatenate((probe, hash_code), axis=0)
# probe_label = np.concatenate((probe_label, lbl), axis=0)
# code_arr = np.concatenate((code_arr, code), axis=0)
#
# except tf.errors.OutOfRangeError:
# pass
#
# ### Code frequency
# code_arr = np.around(code_arr)
# count_arr = []
# for i in range(q):
# count_arr.append(np.count_nonzero(code_arr == i + 1))
#
# plt.clf()
# plt.bar(range(1, q+1), count_arr)
# plt.savefig('./plt/code_' + str(iteration) + '.png')
# ### Calculate MAP
# gtp = 40
# k = 50
#
# distance = np.matmul(probe, gallery.T)
# arg_idx = np.argsort(-distance, axis=1)
#
# max_label = gallery_label[arg_idx[:, :k]]
# match_matrix = np.equal(max_label, probe_label[:,np.newaxis])
#
# tp_seen = match_matrix * np.cumsum(match_matrix, axis=1)
# ap = np.sum(tp_seen / np.arange(1, k + 1)[np.newaxis, :], axis=1) / gtp
# MAP = np.mean(ap)
### Calculate EER
dist_list = []
label_list = []
code_list = []
while wrapper.samples_left > 0:
imgs, lbls = wrapper.get_next_batch(100)
imgs = np.reshape(imgs, [-1, 512])
eer_dict = {
embedding_tensor: imgs
}
code, int_code = sess.run([h_norm, h], feed_dict=eer_dict)
code = np.reshape(code, [-1, 2, m])
distance = np.sum(np.prod(code, axis=1), axis=1)
if dist_list == []:
dist_list = distance
label_list = lbls
code_list = int_code
else:
dist_list = np.concatenate((dist_list, distance), axis=0)
label_list = np.concatenate((label_list, lbls), axis=0)
code_list = np.concatenate((code_list, int_code), axis=0)
wrapper.samples_left= np.size(wrapper.labels, axis=0)
wrapper.next_batch_pointer = 0
fpr, tpr, threshold = roc_curve(label_list, dist_list, pos_label=1)
fnr = 1 - tpr
# eer_threshold = threshold(np.nanargmin(np.absolute((fnr - fpr))))
eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
### Code frequency
code_arr = np.around(code_list)
count_arr = []
for i in range(q):
count_arr.append(np.count_nonzero(code_arr == i + 1))
plt.clf()
plt.bar(range(1, q + 1), count_arr)
plt.savefig('./plt/code_' + str(iteration) + '.png')
time_taken = time.time() - start_time
MAP = 0
# print("good")
print("[Iteration %d] Train Loss: %.4f, Loss_l: %.4f, Loss_c: %.4f, MAP: %.4f, EER: %.4f, Taken time: %.4f"
% (iteration, train_loss, loss_l, loss_c, MAP, eer, time_taken))
start_time = time.time()
# np.save('CP.npy', np.concatenate((fpr[np.newaxis, :], tpr[np.newaxis, :]), axis=0))
### Save model.
# save_vars = [var for var in t_vars if 'DIom' in var.name]
# saver = tf.train.Saver(var_list=save_vars)
# saver.save(sess, './model/DIom_layer')
|
normal
|
{
"blob_id": "459dd9302f7100ad02119cc94b735b19287f21e5",
"index": 5956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n total_iteration = 300000\n m = 512\n q = 32\n lam = 0.01\n beta = 1.0\n margin = 0.5\n s = 32\n batch_size = 256\n class_num = 1595\n train_dataset = 'FS'\n eval_dataset = 'LFW'\n args = get_args()\n image, label, iterator = {}, {}, {}\n if train_dataset == 'YTF':\n image['train'], label['train'], iterator['train'] = load_ytf_data(\n batch_size, 'train')\n elif train_dataset == 'FS':\n image['train'], label['train'], iterator['train'] = load_fs_data(\n batch_size, 'train')\n else:\n print('Select proper dataset')\n wrapper = wrapper_basicImg(dataset=eval_dataset)\n if eval_dataset == 'YTF':\n image['gallery'], label['gallery'], iterator['gallery'\n ] = load_ytf_data(batch_size, 'train', eval=True)\n image['test'], label['test'], iterator['test'] = load_ytf_data(\n batch_size, 'test')\n elif eval_dataset == 'LFW':\n image['gallery'], label['gallery'], iterator['gallery'\n ] = load_lfw_data(batch_size, 'gallery')\n image['test'], label['test'], iterator['test'] = load_lfw_data(\n batch_size, 'probe')\n embedding_tensor = tf.placeholder(name='img_inputs', shape=[None, 512],\n dtype=tf.float32)\n labels = tf.placeholder(name='label', shape=[None], dtype=tf.int32)\n global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = 0.003\n learning_rate = tf.train.exponential_decay(starter_learning_rate,\n global_step, total_iteration, 0.96)\n with tf.variable_scope('DIom'):\n fc1 = linear(tf.nn.relu(embedding_tensor), 1024, 'fc1')\n fc2 = linear(tf.nn.relu(fc1), 1024, 'fc2')\n fc3 = linear(tf.nn.relu(fc2), m * q, 'fc3')\n h_k = tf.reshape(fc3, [-1, m, q])\n h_k = tf.nn.softmax(beta * h_k, axis=2)\n index_matrix = tf.range(1, q + 1, dtype=tf.float32)\n h = tf.reduce_sum(h_k * index_matrix, axis=2)\n h = tf.reshape(h, [-1, m])\n h_norm = tf.math.l2_normalize(h, axis=1)\n l = tf.one_hot(labels, class_num)\n l = tf.matmul(l, tf.transpose(l))\n l_float = tf.cast(l, tf.float32)\n l = tf.reshape(tf.clip_by_value(l_float, 0.0, 1.0), (-1, 1))\n label_int = tf.cast(tf.squeeze(l, 1), tf.int32)\n inner_prod = tf.reshape(tf.matmul(h_norm, tf.transpose(h_norm)), (-1, 1))\n cos_t = tf.clip_by_value(inner_prod, -1.0, 1.0 - 1e-06)\n theta = tf.math.acos(cos_t)\n sin_t = tf.math.sin(theta)\n cos_mt = tf.math.cos(theta + margin)\n sin_mt = tf.math.sin(theta + margin)\n logit = l * s * tf.concat([sin_t, cos_mt], 1) + (1 - l) * s * tf.concat([\n sin_mt, cos_t], 1)\n l_ij_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=\n logit, labels=label_int)\n c_ij = tf.abs(tf.reduce_mean(h, axis=0) - (q + 1) / 2)\n l_ij = tf.reduce_mean(l_ij_logit)\n c_ij = tf.reduce_mean(c_ij)\n loss = l_ij + lam * c_ij\n gradient = tf.gradients(loss, sin_t)\n t_vars = tf.global_variables()\n train_vars = [var for var in t_vars if 'DIom' in var.name]\n opt_t = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(\n loss, var_list=train_vars, global_step=global_step)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n sess.run(iterator['train'].initializer)\n iteration = sess.run(global_step)\n t_opt = [opt_t, loss, l_ij, c_ij]\n start_time = time.time()\n while iteration != total_iteration:\n img, lbl = sess.run([image['train'], label['train']])\n train_dict = {embedding_tensor: img, labels: lbl}\n _, train_loss, loss_l, loss_c = sess.run(t_opt, feed_dict=\n train_dict)\n iteration += 1\n if iteration % 10000 == 0:\n dist_list = []\n label_list = []\n code_list = []\n while wrapper.samples_left > 0:\n imgs, lbls = wrapper.get_next_batch(100)\n imgs = np.reshape(imgs, [-1, 512])\n eer_dict = {embedding_tensor: imgs}\n code, int_code = sess.run([h_norm, h], feed_dict=eer_dict)\n code = np.reshape(code, [-1, 2, m])\n distance = np.sum(np.prod(code, axis=1), axis=1)\n if dist_list == []:\n dist_list = distance\n label_list = lbls\n code_list = int_code\n else:\n dist_list = np.concatenate((dist_list, distance),\n axis=0)\n label_list = np.concatenate((label_list, lbls), axis=0)\n code_list = np.concatenate((code_list, int_code),\n axis=0)\n wrapper.samples_left = np.size(wrapper.labels, axis=0)\n wrapper.next_batch_pointer = 0\n fpr, tpr, threshold = roc_curve(label_list, dist_list,\n pos_label=1)\n fnr = 1 - tpr\n eer = fpr[np.nanargmin(np.absolute(fnr - fpr))]\n code_arr = np.around(code_list)\n count_arr = []\n for i in range(q):\n count_arr.append(np.count_nonzero(code_arr == i + 1))\n plt.clf()\n plt.bar(range(1, q + 1), count_arr)\n plt.savefig('./plt/code_' + str(iteration) + '.png')\n time_taken = time.time() - start_time\n MAP = 0\n print(\n '[Iteration %d] Train Loss: %.4f, Loss_l: %.4f, Loss_c: %.4f, MAP: %.4f, EER: %.4f, Taken time: %.4f'\n % (iteration, train_loss, loss_l, loss_c, MAP, eer,\n time_taken))\n start_time = time.time()\n",
"step-3": "import tensorflow as tf\nimport numpy as np\nimport time\nimport os\nfrom sklearn.metrics import roc_curve\nimport matplotlib.pyplot as plt\nfrom src.model import get_args\nfrom src.funcs import linear\nfrom src.youtubeface import load_ytf_data\nfrom src.lfw import load_lfw_data\nfrom src.facescrub import load_fs_data\nfrom src.wrapper_basicImg import wrapper_basicImg\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n total_iteration = 300000\n m = 512\n q = 32\n lam = 0.01\n beta = 1.0\n margin = 0.5\n s = 32\n batch_size = 256\n class_num = 1595\n train_dataset = 'FS'\n eval_dataset = 'LFW'\n args = get_args()\n image, label, iterator = {}, {}, {}\n if train_dataset == 'YTF':\n image['train'], label['train'], iterator['train'] = load_ytf_data(\n batch_size, 'train')\n elif train_dataset == 'FS':\n image['train'], label['train'], iterator['train'] = load_fs_data(\n batch_size, 'train')\n else:\n print('Select proper dataset')\n wrapper = wrapper_basicImg(dataset=eval_dataset)\n if eval_dataset == 'YTF':\n image['gallery'], label['gallery'], iterator['gallery'\n ] = load_ytf_data(batch_size, 'train', eval=True)\n image['test'], label['test'], iterator['test'] = load_ytf_data(\n batch_size, 'test')\n elif eval_dataset == 'LFW':\n image['gallery'], label['gallery'], iterator['gallery'\n ] = load_lfw_data(batch_size, 'gallery')\n image['test'], label['test'], iterator['test'] = load_lfw_data(\n batch_size, 'probe')\n embedding_tensor = tf.placeholder(name='img_inputs', shape=[None, 512],\n dtype=tf.float32)\n labels = tf.placeholder(name='label', shape=[None], dtype=tf.int32)\n global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = 0.003\n learning_rate = tf.train.exponential_decay(starter_learning_rate,\n global_step, total_iteration, 0.96)\n with tf.variable_scope('DIom'):\n fc1 = linear(tf.nn.relu(embedding_tensor), 1024, 'fc1')\n fc2 = linear(tf.nn.relu(fc1), 1024, 'fc2')\n fc3 = linear(tf.nn.relu(fc2), m * q, 'fc3')\n h_k = tf.reshape(fc3, [-1, m, q])\n h_k = tf.nn.softmax(beta * h_k, axis=2)\n index_matrix = tf.range(1, q + 1, dtype=tf.float32)\n h = tf.reduce_sum(h_k * index_matrix, axis=2)\n h = tf.reshape(h, [-1, m])\n h_norm = tf.math.l2_normalize(h, axis=1)\n l = tf.one_hot(labels, class_num)\n l = tf.matmul(l, tf.transpose(l))\n l_float = tf.cast(l, tf.float32)\n l = tf.reshape(tf.clip_by_value(l_float, 0.0, 1.0), (-1, 1))\n label_int = tf.cast(tf.squeeze(l, 1), tf.int32)\n inner_prod = tf.reshape(tf.matmul(h_norm, tf.transpose(h_norm)), (-1, 1))\n cos_t = tf.clip_by_value(inner_prod, -1.0, 1.0 - 1e-06)\n theta = tf.math.acos(cos_t)\n sin_t = tf.math.sin(theta)\n cos_mt = tf.math.cos(theta + margin)\n sin_mt = tf.math.sin(theta + margin)\n logit = l * s * tf.concat([sin_t, cos_mt], 1) + (1 - l) * s * tf.concat([\n sin_mt, cos_t], 1)\n l_ij_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=\n logit, labels=label_int)\n c_ij = tf.abs(tf.reduce_mean(h, axis=0) - (q + 1) / 2)\n l_ij = tf.reduce_mean(l_ij_logit)\n c_ij = tf.reduce_mean(c_ij)\n loss = l_ij + lam * c_ij\n gradient = tf.gradients(loss, sin_t)\n t_vars = tf.global_variables()\n train_vars = [var for var in t_vars if 'DIom' in var.name]\n opt_t = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(\n loss, var_list=train_vars, global_step=global_step)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n sess.run(iterator['train'].initializer)\n iteration = sess.run(global_step)\n t_opt = [opt_t, loss, l_ij, c_ij]\n start_time = time.time()\n while iteration != total_iteration:\n img, lbl = sess.run([image['train'], label['train']])\n train_dict = {embedding_tensor: img, labels: lbl}\n _, train_loss, loss_l, loss_c = sess.run(t_opt, feed_dict=\n train_dict)\n iteration += 1\n if iteration % 10000 == 0:\n dist_list = []\n label_list = []\n code_list = []\n while wrapper.samples_left > 0:\n imgs, lbls = wrapper.get_next_batch(100)\n imgs = np.reshape(imgs, [-1, 512])\n eer_dict = {embedding_tensor: imgs}\n code, int_code = sess.run([h_norm, h], feed_dict=eer_dict)\n code = np.reshape(code, [-1, 2, m])\n distance = np.sum(np.prod(code, axis=1), axis=1)\n if dist_list == []:\n dist_list = distance\n label_list = lbls\n code_list = int_code\n else:\n dist_list = np.concatenate((dist_list, distance),\n axis=0)\n label_list = np.concatenate((label_list, lbls), axis=0)\n code_list = np.concatenate((code_list, int_code),\n axis=0)\n wrapper.samples_left = np.size(wrapper.labels, axis=0)\n wrapper.next_batch_pointer = 0\n fpr, tpr, threshold = roc_curve(label_list, dist_list,\n pos_label=1)\n fnr = 1 - tpr\n eer = fpr[np.nanargmin(np.absolute(fnr - fpr))]\n code_arr = np.around(code_list)\n count_arr = []\n for i in range(q):\n count_arr.append(np.count_nonzero(code_arr == i + 1))\n plt.clf()\n plt.bar(range(1, q + 1), count_arr)\n plt.savefig('./plt/code_' + str(iteration) + '.png')\n time_taken = time.time() - start_time\n MAP = 0\n print(\n '[Iteration %d] Train Loss: %.4f, Loss_l: %.4f, Loss_c: %.4f, MAP: %.4f, EER: %.4f, Taken time: %.4f'\n % (iteration, train_loss, loss_l, loss_c, MAP, eer,\n time_taken))\n start_time = time.time()\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport time\nimport os\nfrom sklearn.metrics import roc_curve\nimport matplotlib.pyplot as plt\n\n\nfrom src.model import get_args\nfrom src.funcs import linear\nfrom src.youtubeface import load_ytf_data\nfrom src.lfw import load_lfw_data\nfrom src.facescrub import load_fs_data\nfrom src.wrapper_basicImg import wrapper_basicImg\n\n\n\n\nif __name__ == '__main__':\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n total_iteration = 300000\n m = 512\n q = 32\n lam = 0.01\n beta = 1.\n margin = 0.5\n s = 32\n batch_size = 256\n class_num = 1595\n train_dataset = 'FS'\n eval_dataset = \"LFW\"\n args = get_args()\n\n ### Get image and label from tfrecord\n image, label, iterator = {}, {}, {}\n if train_dataset == 'YTF':\n image['train'], label['train'], iterator['train'] = load_ytf_data(batch_size, 'train')\n\n elif train_dataset == 'FS':\n image['train'], label['train'], iterator['train'] = load_fs_data(batch_size, 'train')\n\n else:\n print(\"Select proper dataset\")\n\n ### Get evaluation dataset. Wrapper\n wrapper = wrapper_basicImg(dataset=eval_dataset)\n if eval_dataset == 'YTF':\n image['gallery'], label['gallery'], iterator['gallery'] = load_ytf_data(batch_size, 'train', eval=True)\n image['test'], label['test'], iterator['test'] = load_ytf_data(batch_size, 'test')\n\n elif eval_dataset == 'LFW':\n image['gallery'], label['gallery'], iterator['gallery'] = load_lfw_data(batch_size, 'gallery')\n image['test'], label['test'], iterator['test'] = load_lfw_data(batch_size, 'probe')\n\n\n ### Backbone network (Arcface)\n embedding_tensor = tf.placeholder(name='img_inputs', shape=[None, 512], dtype=tf.float32)\n labels = tf.placeholder(name='label', shape=[None, ], dtype=tf.int32)\n\n ### Global step & learning rate\n global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = 0.003\n learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, total_iteration, 0.96)\n\n ### My implementation (DIom algorithm)\n with tf.variable_scope('DIom'):\n fc1 = linear(tf.nn.relu(embedding_tensor), 1024, 'fc1')\n fc2 = linear(tf.nn.relu(fc1), 1024, 'fc2')\n fc3 = linear(tf.nn.relu(fc2), m * q, 'fc3')\n\n h_k = tf.reshape(fc3, [-1, m, q])\n h_k = tf.nn.softmax(beta * h_k, axis=2)\n\n index_matrix = tf.range(1, q + 1, dtype=tf.float32)\n h = tf.reduce_sum(h_k * index_matrix, axis=2)\n h = tf.reshape(h, [-1, m])\n h_norm = tf.math.l2_normalize(h, axis=1)\n\n ### Loss function\n l = tf.one_hot(labels, class_num)\n l = tf.matmul(l, tf.transpose(l))\n l_float = tf.cast(l, tf.float32)\n l = tf.reshape(tf.clip_by_value(l_float, 0., 1.), (-1, 1))\n label_int = tf.cast(tf.squeeze(l, 1), tf.int32)\n\n inner_prod = tf.reshape(tf.matmul(h_norm, tf.transpose(h_norm)), (-1, 1))\n cos_t = tf.clip_by_value(inner_prod, -1., 1. - 1e-6)\n theta = tf.math.acos(cos_t)\n\n sin_t = tf.math.sin(theta)\n cos_mt = tf.math.cos(theta + margin)\n sin_mt = tf.math.sin(theta + margin)\n\n logit = l * s * (tf.concat([sin_t, cos_mt], 1)) + (1 - l) * s * (tf.concat([sin_mt, cos_t], 1))\n\n l_ij_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=label_int)\n c_ij = tf.abs(tf.reduce_mean(h, axis=0) - (q + 1) / 2)\n\n # Baseline pairwise-CE\n # label_ce = tf.cast(labels, tf.float32)\n # l_ij = l * tf.log(tf.square(inner_prod)) + (1 - l) * tf.log(tf.maximum(1e-6, 1 - tf.square(inner_prod)))\n # l_ij = -tf.reduce_mean(l_ij)\n\n # My novel cosine loss\n l_ij = tf.reduce_mean(l_ij_logit)\n c_ij = tf.reduce_mean(c_ij)\n\n loss = l_ij + lam * c_ij\n\n gradient = tf.gradients(loss, sin_t)\n\n ### Optimizer\n t_vars = tf.global_variables()\n train_vars = [var for var in t_vars if 'DIom' in var.name]\n\n\n opt_t = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(loss, var_list=train_vars, global_step=global_step)\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n sess.run(iterator['train'].initializer)\n\n ### Training\n iteration = sess.run(global_step)\n t_opt = [opt_t, loss, l_ij, c_ij]\n start_time = time.time()\n while iteration != total_iteration:\n img, lbl = sess.run([image['train'], label['train']])\n\n train_dict = {\n embedding_tensor: img,\n labels: lbl\n }\n\n _, train_loss, loss_l, loss_c = sess.run(t_opt, feed_dict=train_dict)\n iteration += 1\n\n if iteration % 10000 == 0:\n ### Evaluation after training\n ### Get gallery hash code\n # gallery = []\n # gallery_label = []\n # sess.run(iterator['gallery'].initializer)\n # try:\n # while True:\n # img, lbl = sess.run([image['gallery'], label['gallery']])\n #\n # gallery_dict = {\n # embedding_tensor: img\n # }\n #\n # hash_code = sess.run(h_norm, feed_dict=gallery_dict)\n #\n # if gallery == []:\n # gallery = hash_code\n # gallery_label = lbl\n # else:\n # gallery = np.concatenate((gallery, hash_code), axis=0)\n # gallery_label = np.concatenate((gallery_label, lbl), axis=0)\n #\n # except tf.errors.OutOfRangeError:\n # pass\n #\n # ### Get probe hash code\n # probe = []\n # probe_label = []\n # code_arr = []\n # sess.run(iterator['test'].initializer)\n # try:\n # while True:\n # img, lbl = sess.run([image['test'], label['test']])\n #\n # gallery_dict = {\n # embedding_tensor: img\n # }\n #\n # code, hash_code = sess.run([h, h_norm], feed_dict=gallery_dict)\n #\n # if probe == []:\n # probe = hash_code\n # probe_label = lbl\n # code_arr = code\n # else:\n # probe = np.concatenate((probe, hash_code), axis=0)\n # probe_label = np.concatenate((probe_label, lbl), axis=0)\n # code_arr = np.concatenate((code_arr, code), axis=0)\n #\n # except tf.errors.OutOfRangeError:\n # pass\n #\n # ### Code frequency\n # code_arr = np.around(code_arr)\n # count_arr = []\n # for i in range(q):\n # count_arr.append(np.count_nonzero(code_arr == i + 1))\n #\n # plt.clf()\n # plt.bar(range(1, q+1), count_arr)\n # plt.savefig('./plt/code_' + str(iteration) + '.png')\n\n # ### Calculate MAP\n # gtp = 40\n # k = 50\n #\n # distance = np.matmul(probe, gallery.T)\n # arg_idx = np.argsort(-distance, axis=1)\n #\n # max_label = gallery_label[arg_idx[:, :k]]\n # match_matrix = np.equal(max_label, probe_label[:,np.newaxis])\n #\n # tp_seen = match_matrix * np.cumsum(match_matrix, axis=1)\n # ap = np.sum(tp_seen / np.arange(1, k + 1)[np.newaxis, :], axis=1) / gtp\n # MAP = np.mean(ap)\n\n ### Calculate EER\n dist_list = []\n label_list = []\n code_list = []\n while wrapper.samples_left > 0:\n imgs, lbls = wrapper.get_next_batch(100)\n\n imgs = np.reshape(imgs, [-1, 512])\n\n eer_dict = {\n embedding_tensor: imgs\n }\n\n code, int_code = sess.run([h_norm, h], feed_dict=eer_dict)\n code = np.reshape(code, [-1, 2, m])\n\n distance = np.sum(np.prod(code, axis=1), axis=1)\n\n if dist_list == []:\n dist_list = distance\n label_list = lbls\n code_list = int_code\n\n else:\n dist_list = np.concatenate((dist_list, distance), axis=0)\n label_list = np.concatenate((label_list, lbls), axis=0)\n code_list = np.concatenate((code_list, int_code), axis=0)\n\n wrapper.samples_left= np.size(wrapper.labels, axis=0)\n wrapper.next_batch_pointer = 0\n\n fpr, tpr, threshold = roc_curve(label_list, dist_list, pos_label=1)\n fnr = 1 - tpr\n # eer_threshold = threshold(np.nanargmin(np.absolute((fnr - fpr))))\n eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))]\n\n ### Code frequency\n code_arr = np.around(code_list)\n count_arr = []\n for i in range(q):\n count_arr.append(np.count_nonzero(code_arr == i + 1))\n\n plt.clf()\n plt.bar(range(1, q + 1), count_arr)\n plt.savefig('./plt/code_' + str(iteration) + '.png')\n\n time_taken = time.time() - start_time\n MAP = 0\n # print(\"good\")\n print(\"[Iteration %d] Train Loss: %.4f, Loss_l: %.4f, Loss_c: %.4f, MAP: %.4f, EER: %.4f, Taken time: %.4f\"\n % (iteration, train_loss, loss_l, loss_c, MAP, eer, time_taken))\n\n start_time = time.time()\n\n # np.save('CP.npy', np.concatenate((fpr[np.newaxis, :], tpr[np.newaxis, :]), axis=0))\n ### Save model.\n # save_vars = [var for var in t_vars if 'DIom' in var.name]\n # saver = tf.train.Saver(var_list=save_vars)\n # saver.save(sess, './model/DIom_layer')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# 심사문제 22
# 표준 입력으로 정수 두 개가 입력됩니다(첫 번째 입력 값의 범위는 1~20, 두 번째 입력 값의 범위는 10~30이며 첫 번째 입력 값은 두 번째 입력 값보다 항상 작습니다).
# 첫 번째 정수부터 두 번째 정수까지를 지수로 하는 2의 거듭제곱 리스트를 출력하는 프로그램을 만드세요
# (input에서 안내 문자열은 출력하지 않아야 합니다). 단, 리스트의 두 번째 요소와 뒤에서 두 번째 요소는 삭제한 뒤 출력하세요. 출력 결과는 리스트 형태라야 합니다.
start, stop = list(map(int, input().split()))
1 10
i = 0
my = [2 ** i for i in range(start, stop+1)]
my.pop(1)
my.pop(-2)
print(my)
# 심사문제 23
col, row = list(map(int, input().split()))
matrix = []
for i in range(row):
matrix.append(list(input()))
|
normal
|
{
"blob_id": "1f8040776a55d6fe52b64c714d4003469460e454",
"index": 7186,
"step-1": "# 심사문제 22\n# 표준 입력으로 정수 두 개가 입력됩니다(첫 번째 입력 값의 범위는 1~20, 두 번째 입력 값의 범위는 10~30이며 첫 번째 입력 값은 두 번째 입력 값보다 항상 작습니다).\n# 첫 번째 정수부터 두 번째 정수까지를 지수로 하는 2의 거듭제곱 리스트를 출력하는 프로그램을 만드세요\n# (input에서 안내 문자열은 출력하지 않아야 합니다). 단, 리스트의 두 번째 요소와 뒤에서 두 번째 요소는 삭제한 뒤 출력하세요. 출력 결과는 리스트 형태라야 합니다.\n\nstart, stop = list(map(int, input().split()))\n1 10\ni = 0\nmy = [2 ** i for i in range(start, stop+1)]\nmy.pop(1)\nmy.pop(-2)\nprint(my)\n\n# 심사문제 23\ncol, row = list(map(int, input().split()))\n\nmatrix = []\nfor i in range(row):\n matrix.append(list(input()))\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.