code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
#! /usr/bin/python2
# Copyright 2007 John Kasunich and Jeff Epler
#
# modified by Rudy du Preez to fit with the kinematics component pumakins.c
# Note: DH parameters in pumakins halfile should bet set to
# A2=400, A3=50, D3=100, D4=400, D6=95
#
# z |
# |
# |__________y top of the base.
# /
# / A2
# x /
# /_______
# D3 /
# / A3
# |
# |
# | D4
# |___
# |
# tooltip | D6
#
# or they should be changed below to fit. Otherwise you wont get straight lines
# moving x or y or z in world mode. If all is correct the tool should rotate
# about its tip with no x,y,z movement for changes in A,B,C at any point in the
# workspace.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from vismach import *
import hal
c = hal.component("pumagui")
c.newpin("joint1", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint2", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint3", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint4", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint5", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint6", hal.HAL_FLOAT, hal.HAL_IN)
c.ready()
###################
# tool or finger
finger1 = CylinderZ(0, 5, 50, 5)
# "tooltip" for backplot will be the tip of the finger
tooltip = Capture()
# "hand" - the part the finger is attached to
link6 = Collection([
finger1,
Box(-25, -25, -10, 25, 25, 0)])
link6 = Translate([link6],0,0,-50)
link6 = Collection([tooltip,link6])
# assembly fingers, and make it rotate
link6 = HalRotate([link6],c,"joint6",1,0,0,1)
# moving part of wrist joint
link5 = Collection([
CylinderZ( 27, 30, 35, 30),
CylinderX(-13, 25, 13, 25),
Box(-11, -25, 0, 11, 25, 27)])
# move gripper to end of wrist and attach D6=95
link5 = Collection([
link5,
Translate([link6],0,0,95)])
# make wrist bend
link5 = HalRotate([link5],c,"joint5",1,1,0,0)
# fixed part of wrist joint (rotates on end of arm)
link4 = Collection([
CylinderX(-13, 22, -27, 22),
CylinderX( 13, 22, 27, 22),
Box(-15, -22, -30, -25, 22, 0),
Box( 15, -22, -30, 25, 22, 0),
Box(-25, -25, -45, 25, 25, -30)])
# attach wrist, move whole assembly forward so joint 4 is at origin
link4 = Translate([link4,link5], 0, 0, 0)
# make joint 4 rotate
link4 = HalRotate([link4],c,"joint4",1,0,0,1)
# next chunk link length is D4=400
link3 = Collection([
CylinderY(-50,35,25,35),
CylinderZ(0.0, 35, 400-45, 25)])
link3 = Translate([link3],0,50,0)
link3 = Collection([
link3,
CylinderX(-50,40,40,40)])
# move link4 forward and sideways (A3=50) and attach
link3 = Collection([
link3,
Translate([link4],0.0, 50, 400)])
# move whole assembly over so joint 3 is at origin (D3=100)
link3 = Translate([link3],100, 0, 0.0)
# rotate to J3 zero position
link3 = Rotate([link3],90,1,0,0)
# make joint 3 rotate
link3 = HalRotate([link3],c,"joint3",1,1,0,0)
# elbow stuff
link2 = CylinderX(-50,50,50,50)
# move elbow to end of upper arm
link2 = Translate([link2],0.0,0.0,400)
# rest of upper arm (A2 = 400)
link2 = Collection([
link2,
CylinderZ(400, 40, 0, 50),
CylinderX(-70,85,70,85)])
# move link 3 into place and attach
link2 = Collection([
link2,
Translate([link3], 0,0.0,400)])
# rotate into zero J2 position
link2 = Rotate([link2],90,1,0,0)
# make joint 2 rotate
link2 = HalRotate([link2],c,"joint2",1,1,0,0)
# shoulder stuff
link1 = Collection([
CylinderX(-70,70,70,70),
Box(-70,-70,0,70,70,-100)])
# move link2 to end and attach
link1 = Collection([
link1,
link2])
# move whole assembly up so joint 1 is at origin
link1 = Translate([link1],0.0, 0.0, 100)
# make joint 1 rotate
link1 = HalRotate([link1],c,"joint1",1,0,0,1)
# stationary base
link0 = Collection([
CylinderZ(750, 75, 800, 75),
CylinderZ(25, 90, 750, 50),
CylinderZ(0, 200, 35, 200)])
# move link1 to top and attach
link0 = Collection([
link0,
Translate([link1],0.0,0.0,800)])
# add a floor
floor = Box(-500,-500,-10,500,500,0.0)
work = Capture()
model = Collection([link0, floor, work])
main(model, tooltip, work, 1500)
|
normal
|
{
"blob_id": "ae83a0e1ebf1190ab55459563bc7b86d240de89a",
"index": 4146,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nc.newpin('joint1', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint2', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint3', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint4', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint5', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint6', hal.HAL_FLOAT, hal.HAL_IN)\nc.ready()\n<mask token>\nmain(model, tooltip, work, 1500)\n",
"step-3": "<mask token>\nc = hal.component('pumagui')\nc.newpin('joint1', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint2', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint3', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint4', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint5', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint6', hal.HAL_FLOAT, hal.HAL_IN)\nc.ready()\nfinger1 = CylinderZ(0, 5, 50, 5)\ntooltip = Capture()\nlink6 = Collection([finger1, Box(-25, -25, -10, 25, 25, 0)])\nlink6 = Translate([link6], 0, 0, -50)\nlink6 = Collection([tooltip, link6])\nlink6 = HalRotate([link6], c, 'joint6', 1, 0, 0, 1)\nlink5 = Collection([CylinderZ(27, 30, 35, 30), CylinderX(-13, 25, 13, 25),\n Box(-11, -25, 0, 11, 25, 27)])\nlink5 = Collection([link5, Translate([link6], 0, 0, 95)])\nlink5 = HalRotate([link5], c, 'joint5', 1, 1, 0, 0)\nlink4 = Collection([CylinderX(-13, 22, -27, 22), CylinderX(13, 22, 27, 22),\n Box(-15, -22, -30, -25, 22, 0), Box(15, -22, -30, 25, 22, 0), Box(-25, \n -25, -45, 25, 25, -30)])\nlink4 = Translate([link4, link5], 0, 0, 0)\nlink4 = HalRotate([link4], c, 'joint4', 1, 0, 0, 1)\nlink3 = Collection([CylinderY(-50, 35, 25, 35), CylinderZ(0.0, 35, 400 - 45,\n 25)])\nlink3 = Translate([link3], 0, 50, 0)\nlink3 = Collection([link3, CylinderX(-50, 40, 40, 40)])\nlink3 = Collection([link3, Translate([link4], 0.0, 50, 400)])\nlink3 = Translate([link3], 100, 0, 0.0)\nlink3 = Rotate([link3], 90, 1, 0, 0)\nlink3 = HalRotate([link3], c, 'joint3', 1, 1, 0, 0)\nlink2 = CylinderX(-50, 50, 50, 50)\nlink2 = Translate([link2], 0.0, 0.0, 400)\nlink2 = Collection([link2, CylinderZ(400, 40, 0, 50), CylinderX(-70, 85, 70,\n 85)])\nlink2 = Collection([link2, Translate([link3], 0, 0.0, 400)])\nlink2 = Rotate([link2], 90, 1, 0, 0)\nlink2 = HalRotate([link2], c, 'joint2', 1, 1, 0, 0)\nlink1 = Collection([CylinderX(-70, 70, 70, 70), Box(-70, -70, 0, 70, 70, -100)]\n )\nlink1 = Collection([link1, link2])\nlink1 = Translate([link1], 0.0, 0.0, 100)\nlink1 = HalRotate([link1], c, 'joint1', 1, 0, 0, 1)\nlink0 = Collection([CylinderZ(750, 75, 800, 75), CylinderZ(25, 90, 750, 50),\n CylinderZ(0, 200, 35, 200)])\nlink0 = Collection([link0, Translate([link1], 0.0, 0.0, 800)])\nfloor = Box(-500, -500, -10, 500, 500, 0.0)\nwork = Capture()\nmodel = Collection([link0, floor, work])\nmain(model, tooltip, work, 1500)\n",
"step-4": "from vismach import *\nimport hal\nc = hal.component('pumagui')\nc.newpin('joint1', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint2', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint3', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint4', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint5', hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin('joint6', hal.HAL_FLOAT, hal.HAL_IN)\nc.ready()\nfinger1 = CylinderZ(0, 5, 50, 5)\ntooltip = Capture()\nlink6 = Collection([finger1, Box(-25, -25, -10, 25, 25, 0)])\nlink6 = Translate([link6], 0, 0, -50)\nlink6 = Collection([tooltip, link6])\nlink6 = HalRotate([link6], c, 'joint6', 1, 0, 0, 1)\nlink5 = Collection([CylinderZ(27, 30, 35, 30), CylinderX(-13, 25, 13, 25),\n Box(-11, -25, 0, 11, 25, 27)])\nlink5 = Collection([link5, Translate([link6], 0, 0, 95)])\nlink5 = HalRotate([link5], c, 'joint5', 1, 1, 0, 0)\nlink4 = Collection([CylinderX(-13, 22, -27, 22), CylinderX(13, 22, 27, 22),\n Box(-15, -22, -30, -25, 22, 0), Box(15, -22, -30, 25, 22, 0), Box(-25, \n -25, -45, 25, 25, -30)])\nlink4 = Translate([link4, link5], 0, 0, 0)\nlink4 = HalRotate([link4], c, 'joint4', 1, 0, 0, 1)\nlink3 = Collection([CylinderY(-50, 35, 25, 35), CylinderZ(0.0, 35, 400 - 45,\n 25)])\nlink3 = Translate([link3], 0, 50, 0)\nlink3 = Collection([link3, CylinderX(-50, 40, 40, 40)])\nlink3 = Collection([link3, Translate([link4], 0.0, 50, 400)])\nlink3 = Translate([link3], 100, 0, 0.0)\nlink3 = Rotate([link3], 90, 1, 0, 0)\nlink3 = HalRotate([link3], c, 'joint3', 1, 1, 0, 0)\nlink2 = CylinderX(-50, 50, 50, 50)\nlink2 = Translate([link2], 0.0, 0.0, 400)\nlink2 = Collection([link2, CylinderZ(400, 40, 0, 50), CylinderX(-70, 85, 70,\n 85)])\nlink2 = Collection([link2, Translate([link3], 0, 0.0, 400)])\nlink2 = Rotate([link2], 90, 1, 0, 0)\nlink2 = HalRotate([link2], c, 'joint2', 1, 1, 0, 0)\nlink1 = Collection([CylinderX(-70, 70, 70, 70), Box(-70, -70, 0, 70, 70, -100)]\n )\nlink1 = Collection([link1, link2])\nlink1 = Translate([link1], 0.0, 0.0, 100)\nlink1 = HalRotate([link1], c, 'joint1', 1, 0, 0, 1)\nlink0 = Collection([CylinderZ(750, 75, 800, 75), CylinderZ(25, 90, 750, 50),\n CylinderZ(0, 200, 35, 200)])\nlink0 = Collection([link0, Translate([link1], 0.0, 0.0, 800)])\nfloor = Box(-500, -500, -10, 500, 500, 0.0)\nwork = Capture()\nmodel = Collection([link0, floor, work])\nmain(model, tooltip, work, 1500)\n",
"step-5": "#! /usr/bin/python2\n# Copyright 2007 John Kasunich and Jeff Epler\n# \n# modified by Rudy du Preez to fit with the kinematics component pumakins.c\n# Note: DH parameters in pumakins halfile should bet set to \n# A2=400, A3=50, D3=100, D4=400, D6=95\n#\n# z | \n# | \n# |__________y top of the base.\n# /\n# / A2\n# x /\n# /_______\n# D3 /\n# / A3\n# |\n# |\n# | D4\n# |___\n# |\n# tooltip | D6\n#\n# or they should be changed below to fit. Otherwise you wont get straight lines\n# moving x or y or z in world mode. If all is correct the tool should rotate \n# about its tip with no x,y,z movement for changes in A,B,C at any point in the \n# workspace.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\n\nfrom vismach import *\nimport hal\n\nc = hal.component(\"pumagui\")\nc.newpin(\"joint1\", hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin(\"joint2\", hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin(\"joint3\", hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin(\"joint4\", hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin(\"joint5\", hal.HAL_FLOAT, hal.HAL_IN)\nc.newpin(\"joint6\", hal.HAL_FLOAT, hal.HAL_IN)\nc.ready()\n\n###################\n\n# tool or finger \nfinger1 = CylinderZ(0, 5, 50, 5)\n\n# \"tooltip\" for backplot will be the tip of the finger\ntooltip = Capture()\n\n# \"hand\" - the part the finger is attached to\nlink6 = Collection([\n finger1,\n\tBox(-25, -25, -10, 25, 25, 0)])\nlink6 = Translate([link6],0,0,-50)\nlink6 = Collection([tooltip,link6])\n# assembly fingers, and make it rotate\nlink6 = HalRotate([link6],c,\"joint6\",1,0,0,1)\n\n# moving part of wrist joint\nlink5 = Collection([\n\tCylinderZ( 27, 30, 35, 30),\n\tCylinderX(-13, 25, 13, 25),\n\tBox(-11, -25, 0, 11, 25, 27)])\n# move gripper to end of wrist and attach D6=95\nlink5 = Collection([\n\tlink5,\n\tTranslate([link6],0,0,95)])\n# make wrist bend\nlink5 = HalRotate([link5],c,\"joint5\",1,1,0,0)\n\n# fixed part of wrist joint (rotates on end of arm)\nlink4 = Collection([\n\tCylinderX(-13, 22, -27, 22),\n\tCylinderX( 13, 22, 27, 22),\n\tBox(-15, -22, -30, -25, 22, 0),\n\tBox( 15, -22, -30, 25, 22, 0),\n\tBox(-25, -25, -45, 25, 25, -30)])\n# attach wrist, move whole assembly forward so joint 4 is at origin\nlink4 = Translate([link4,link5], 0, 0, 0)\n# make joint 4 rotate\nlink4 = HalRotate([link4],c,\"joint4\",1,0,0,1)\n\n# next chunk link length is D4=400\nlink3 = Collection([\n\tCylinderY(-50,35,25,35),\n\tCylinderZ(0.0, 35, 400-45, 25)])\nlink3 = Translate([link3],0,50,0)\nlink3 = Collection([\n link3,\n CylinderX(-50,40,40,40)])\n# move link4 forward and sideways (A3=50) and attach\nlink3 = Collection([\n\tlink3,\n\tTranslate([link4],0.0, 50, 400)])\n# move whole assembly over so joint 3 is at origin (D3=100)\nlink3 = Translate([link3],100, 0, 0.0)\n# rotate to J3 zero position\nlink3 = Rotate([link3],90,1,0,0)\n# make joint 3 rotate\nlink3 = HalRotate([link3],c,\"joint3\",1,1,0,0)\n\n# elbow stuff\nlink2 = CylinderX(-50,50,50,50)\n# move elbow to end of upper arm\nlink2 = Translate([link2],0.0,0.0,400)\n# rest of upper arm (A2 = 400)\nlink2 = Collection([\n\tlink2,\n\tCylinderZ(400, 40, 0, 50),\n\tCylinderX(-70,85,70,85)])\n# move link 3 into place and attach\nlink2 = Collection([\n\tlink2,\n\tTranslate([link3], 0,0.0,400)])\n# rotate into zero J2 position\nlink2 = Rotate([link2],90,1,0,0)\n# make joint 2 rotate\nlink2 = HalRotate([link2],c,\"joint2\",1,1,0,0)\n\n# shoulder stuff\nlink1 = Collection([\n\tCylinderX(-70,70,70,70),\n\tBox(-70,-70,0,70,70,-100)])\n# move link2 to end and attach\nlink1 = Collection([\n\tlink1,\n\tlink2])\n# move whole assembly up so joint 1 is at origin\nlink1 = Translate([link1],0.0, 0.0, 100)\n# make joint 1 rotate\nlink1 = HalRotate([link1],c,\"joint1\",1,0,0,1)\n\n# stationary base\nlink0 = Collection([\n\tCylinderZ(750, 75, 800, 75),\n\tCylinderZ(25, 90, 750, 50),\n\tCylinderZ(0, 200, 35, 200)])\n# move link1 to top and attach\nlink0 = Collection([\n\tlink0,\n\tTranslate([link1],0.0,0.0,800)])\n\n# add a floor\nfloor = Box(-500,-500,-10,500,500,0.0)\nwork = Capture()\n\nmodel = Collection([link0, floor, work])\n\nmain(model, tooltip, work, 1500)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
num1, num2 = float(num1), float(num2)
if operation == 'add':
result = num1 + num2
print(result)
elif operation == 'subtract':
result = num1 - num2
print(result)
elif operation == 'multiply':
result = num1 * num2
print(result)
elif operation == 'divide':
result = num1 / num2
print(result)
else:
print('You didi choose the right operation')
except:
print('Impoper numbers or Operation')
<|reserved_special_token_1|>
operation = input('operation type: ').lower()
num1 = input('First number: ')
num2 = input('First number: ')
try:
num1, num2 = float(num1), float(num2)
if operation == 'add':
result = num1 + num2
print(result)
elif operation == 'subtract':
result = num1 - num2
print(result)
elif operation == 'multiply':
result = num1 * num2
print(result)
elif operation == 'divide':
result = num1 / num2
print(result)
else:
print('You didi choose the right operation')
except:
print('Impoper numbers or Operation')
<|reserved_special_token_1|>
operation = input('operation type: ').lower()
num1 = input("First number: ")
num2 = input("First number: ")
try:
num1, num2 = float(num1), float(num2)
if operation == 'add':
result = num1 + num2
print(result)
elif operation == 'subtract':
result = num1 - num2
print(result)
elif operation == 'multiply':
result = num1 * num2
print(result)
elif operation == 'divide':
result = num1 / num2
print(result)
else:
print('You didi choose the right operation')
except:
#
print("Impoper numbers or Operation")
|
flexible
|
{
"blob_id": "bafb6c09ecd0017428441e109733ebcb189863ad",
"index": 3598,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n num1, num2 = float(num1), float(num2)\n if operation == 'add':\n result = num1 + num2\n print(result)\n elif operation == 'subtract':\n result = num1 - num2\n print(result)\n elif operation == 'multiply':\n result = num1 * num2\n print(result)\n elif operation == 'divide':\n result = num1 / num2\n print(result)\n else:\n print('You didi choose the right operation')\nexcept:\n print('Impoper numbers or Operation')\n",
"step-3": "operation = input('operation type: ').lower()\nnum1 = input('First number: ')\nnum2 = input('First number: ')\ntry:\n num1, num2 = float(num1), float(num2)\n if operation == 'add':\n result = num1 + num2\n print(result)\n elif operation == 'subtract':\n result = num1 - num2\n print(result)\n elif operation == 'multiply':\n result = num1 * num2\n print(result)\n elif operation == 'divide':\n result = num1 / num2\n print(result)\n else:\n print('You didi choose the right operation')\nexcept:\n print('Impoper numbers or Operation')\n",
"step-4": "operation = input('operation type: ').lower()\nnum1 = input(\"First number: \")\nnum2 = input(\"First number: \")\n\ntry:\n num1, num2 = float(num1), float(num2)\n if operation == 'add':\n result = num1 + num2\n print(result)\n elif operation == 'subtract':\n result = num1 - num2\n print(result)\n elif operation == 'multiply':\n result = num1 * num2\n print(result)\n elif operation == 'divide':\n result = num1 / num2\n print(result)\n else:\n print('You didi choose the right operation')\n\nexcept:\n #\n print(\"Impoper numbers or Operation\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pytest
import problem22 as p22
def test_burst():
"""Test burst() in Cluster"""
print('\nTesting burst()')
cluster = p22.Cluster('..#\n#..\n...')
assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean
cluster.burst()
assert cluster.virus.direction == p22.Directions.left
assert cluster.virus.pos == p22.Position(1,0)
assert cluster.infected[p22.Position(1,1)] == p22.State.Infected
assert cluster.infected[cluster.virus.pos] == p22.State.Infected
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.up # turned right
assert cluster.virus.pos == p22.Position(0, 0) # moved up
assert cluster.infected[prev_pos] == p22.State.Clean # cleaned
# four times in a row finds clean and infects
for _ in range(4):
assert cluster.infected[cluster.virus.pos] == p22.State.Clean
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.infected[prev_pos] == p22.State.Infected
assert cluster.virus.pos == p22.Position(0, 0)
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.right
assert cluster.virus.pos == p22.Position(0, 1)
assert cluster.infected[prev_pos] == p22.State.Clean
assert cluster.infections_caused == 5
def test_solve_a():
"""Tests for solve_b()"""
print('\nTesting solve_a()')
assert p22.solve_a(7, '..#\n#..\n...') == 5
assert p22.solve_a(70, '..#\n#..\n...') == 41
assert p22.solve_a(10000, '..#\n#..\n...') == 5587
def test_burst_evolved():
"""Test burst() in EvolvedCluster"""
cluster = p22.EvolvedCluster('..#\n#..\n...')
assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean
cluster.burst()
assert cluster.virus.direction == p22.Directions.left
assert cluster.virus.pos == p22.Position(1,0)
assert cluster.infected[p22.Position(1,1)] == p22.State.Weakened
assert cluster.infected[cluster.virus.pos] == p22.State.Infected
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.up
assert cluster.virus.pos == p22.Position(0,0)
assert cluster.infected[prev_pos] == p22.State.Flagged
assert cluster.infected[cluster.virus.pos] == p22.State.Clean
@pytest.mark.skip(reason="too slow to test")
def test_solve_b():
"""Tests for solve_b()"""
print('\nTesting solve_b()')
assert p22.solve_b(100, '..#\n#..\n...') == 26
assert p22.solve_b(10000000, '..#\n#..\n...') == 2511944
def test_solve_a0():
"""Tests for solve_a0()"""
print('\nTesting solve_a0()')
assert p22.solve_a0(7, '..#\n#..\n...') == 5
assert p22.solve_a0(70, '..#\n#..\n...') == 41
assert p22.solve_a0(10000, '..#\n#..\n...') == 5587
def test_solve_b0():
"""Tests for solve_b0()"""
print('\nTesting solve_b0()')
assert p22.solve_b0(100, '..#\n#..\n...') == 26
assert p22.solve_b0(10000000, '..#\n#..\n...') == 2511944
|
normal
|
{
"blob_id": "f0a3778e74d113a5de778fa17ec321c6680c56c2",
"index": 1143,
"step-1": "<mask token>\n\n\ndef test_burst_evolved():\n \"\"\"Test burst() in EvolvedCluster\"\"\"\n cluster = p22.EvolvedCluster('..#\\n#..\\n...')\n assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.left\n assert cluster.virus.pos == p22.Position(1, 0)\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Weakened\n assert cluster.infected[cluster.virus.pos] == p22.State.Infected\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.up\n assert cluster.virus.pos == p22.Position(0, 0)\n assert cluster.infected[prev_pos] == p22.State.Flagged\n assert cluster.infected[cluster.virus.pos] == p22.State.Clean\n\n\[email protected](reason='too slow to test')\ndef test_solve_b():\n \"\"\"Tests for solve_b()\"\"\"\n print('\\nTesting solve_b()')\n assert p22.solve_b(100, '..#\\n#..\\n...') == 26\n assert p22.solve_b(10000000, '..#\\n#..\\n...') == 2511944\n\n\ndef test_solve_a0():\n \"\"\"Tests for solve_a0()\"\"\"\n print('\\nTesting solve_a0()')\n assert p22.solve_a0(7, '..#\\n#..\\n...') == 5\n assert p22.solve_a0(70, '..#\\n#..\\n...') == 41\n assert p22.solve_a0(10000, '..#\\n#..\\n...') == 5587\n\n\ndef test_solve_b0():\n \"\"\"Tests for solve_b0()\"\"\"\n print('\\nTesting solve_b0()')\n assert p22.solve_b0(100, '..#\\n#..\\n...') == 26\n assert p22.solve_b0(10000000, '..#\\n#..\\n...') == 2511944\n",
"step-2": "<mask token>\n\n\ndef test_solve_a():\n \"\"\"Tests for solve_b()\"\"\"\n print('\\nTesting solve_a()')\n assert p22.solve_a(7, '..#\\n#..\\n...') == 5\n assert p22.solve_a(70, '..#\\n#..\\n...') == 41\n assert p22.solve_a(10000, '..#\\n#..\\n...') == 5587\n\n\ndef test_burst_evolved():\n \"\"\"Test burst() in EvolvedCluster\"\"\"\n cluster = p22.EvolvedCluster('..#\\n#..\\n...')\n assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.left\n assert cluster.virus.pos == p22.Position(1, 0)\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Weakened\n assert cluster.infected[cluster.virus.pos] == p22.State.Infected\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.up\n assert cluster.virus.pos == p22.Position(0, 0)\n assert cluster.infected[prev_pos] == p22.State.Flagged\n assert cluster.infected[cluster.virus.pos] == p22.State.Clean\n\n\[email protected](reason='too slow to test')\ndef test_solve_b():\n \"\"\"Tests for solve_b()\"\"\"\n print('\\nTesting solve_b()')\n assert p22.solve_b(100, '..#\\n#..\\n...') == 26\n assert p22.solve_b(10000000, '..#\\n#..\\n...') == 2511944\n\n\ndef test_solve_a0():\n \"\"\"Tests for solve_a0()\"\"\"\n print('\\nTesting solve_a0()')\n assert p22.solve_a0(7, '..#\\n#..\\n...') == 5\n assert p22.solve_a0(70, '..#\\n#..\\n...') == 41\n assert p22.solve_a0(10000, '..#\\n#..\\n...') == 5587\n\n\ndef test_solve_b0():\n \"\"\"Tests for solve_b0()\"\"\"\n print('\\nTesting solve_b0()')\n assert p22.solve_b0(100, '..#\\n#..\\n...') == 26\n assert p22.solve_b0(10000000, '..#\\n#..\\n...') == 2511944\n",
"step-3": "<mask token>\n\n\ndef test_burst():\n \"\"\"Test burst() in Cluster\"\"\"\n print('\\nTesting burst()')\n cluster = p22.Cluster('..#\\n#..\\n...')\n assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.left\n assert cluster.virus.pos == p22.Position(1, 0)\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Infected\n assert cluster.infected[cluster.virus.pos] == p22.State.Infected\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.up\n assert cluster.virus.pos == p22.Position(0, 0)\n assert cluster.infected[prev_pos] == p22.State.Clean\n for _ in range(4):\n assert cluster.infected[cluster.virus.pos] == p22.State.Clean\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.infected[prev_pos] == p22.State.Infected\n assert cluster.virus.pos == p22.Position(0, 0)\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.right\n assert cluster.virus.pos == p22.Position(0, 1)\n assert cluster.infected[prev_pos] == p22.State.Clean\n assert cluster.infections_caused == 5\n\n\ndef test_solve_a():\n \"\"\"Tests for solve_b()\"\"\"\n print('\\nTesting solve_a()')\n assert p22.solve_a(7, '..#\\n#..\\n...') == 5\n assert p22.solve_a(70, '..#\\n#..\\n...') == 41\n assert p22.solve_a(10000, '..#\\n#..\\n...') == 5587\n\n\ndef test_burst_evolved():\n \"\"\"Test burst() in EvolvedCluster\"\"\"\n cluster = p22.EvolvedCluster('..#\\n#..\\n...')\n assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.left\n assert cluster.virus.pos == p22.Position(1, 0)\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Weakened\n assert cluster.infected[cluster.virus.pos] == p22.State.Infected\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.up\n assert cluster.virus.pos == p22.Position(0, 0)\n assert cluster.infected[prev_pos] == p22.State.Flagged\n assert cluster.infected[cluster.virus.pos] == p22.State.Clean\n\n\[email protected](reason='too slow to test')\ndef test_solve_b():\n \"\"\"Tests for solve_b()\"\"\"\n print('\\nTesting solve_b()')\n assert p22.solve_b(100, '..#\\n#..\\n...') == 26\n assert p22.solve_b(10000000, '..#\\n#..\\n...') == 2511944\n\n\ndef test_solve_a0():\n \"\"\"Tests for solve_a0()\"\"\"\n print('\\nTesting solve_a0()')\n assert p22.solve_a0(7, '..#\\n#..\\n...') == 5\n assert p22.solve_a0(70, '..#\\n#..\\n...') == 41\n assert p22.solve_a0(10000, '..#\\n#..\\n...') == 5587\n\n\ndef test_solve_b0():\n \"\"\"Tests for solve_b0()\"\"\"\n print('\\nTesting solve_b0()')\n assert p22.solve_b0(100, '..#\\n#..\\n...') == 26\n assert p22.solve_b0(10000000, '..#\\n#..\\n...') == 2511944\n",
"step-4": "import pytest\nimport problem22 as p22\n\n\ndef test_burst():\n \"\"\"Test burst() in Cluster\"\"\"\n print('\\nTesting burst()')\n cluster = p22.Cluster('..#\\n#..\\n...')\n assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.left\n assert cluster.virus.pos == p22.Position(1, 0)\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Infected\n assert cluster.infected[cluster.virus.pos] == p22.State.Infected\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.up\n assert cluster.virus.pos == p22.Position(0, 0)\n assert cluster.infected[prev_pos] == p22.State.Clean\n for _ in range(4):\n assert cluster.infected[cluster.virus.pos] == p22.State.Clean\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.infected[prev_pos] == p22.State.Infected\n assert cluster.virus.pos == p22.Position(0, 0)\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.right\n assert cluster.virus.pos == p22.Position(0, 1)\n assert cluster.infected[prev_pos] == p22.State.Clean\n assert cluster.infections_caused == 5\n\n\ndef test_solve_a():\n \"\"\"Tests for solve_b()\"\"\"\n print('\\nTesting solve_a()')\n assert p22.solve_a(7, '..#\\n#..\\n...') == 5\n assert p22.solve_a(70, '..#\\n#..\\n...') == 41\n assert p22.solve_a(10000, '..#\\n#..\\n...') == 5587\n\n\ndef test_burst_evolved():\n \"\"\"Test burst() in EvolvedCluster\"\"\"\n cluster = p22.EvolvedCluster('..#\\n#..\\n...')\n assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.left\n assert cluster.virus.pos == p22.Position(1, 0)\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Weakened\n assert cluster.infected[cluster.virus.pos] == p22.State.Infected\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.up\n assert cluster.virus.pos == p22.Position(0, 0)\n assert cluster.infected[prev_pos] == p22.State.Flagged\n assert cluster.infected[cluster.virus.pos] == p22.State.Clean\n\n\[email protected](reason='too slow to test')\ndef test_solve_b():\n \"\"\"Tests for solve_b()\"\"\"\n print('\\nTesting solve_b()')\n assert p22.solve_b(100, '..#\\n#..\\n...') == 26\n assert p22.solve_b(10000000, '..#\\n#..\\n...') == 2511944\n\n\ndef test_solve_a0():\n \"\"\"Tests for solve_a0()\"\"\"\n print('\\nTesting solve_a0()')\n assert p22.solve_a0(7, '..#\\n#..\\n...') == 5\n assert p22.solve_a0(70, '..#\\n#..\\n...') == 41\n assert p22.solve_a0(10000, '..#\\n#..\\n...') == 5587\n\n\ndef test_solve_b0():\n \"\"\"Tests for solve_b0()\"\"\"\n print('\\nTesting solve_b0()')\n assert p22.solve_b0(100, '..#\\n#..\\n...') == 26\n assert p22.solve_b0(10000000, '..#\\n#..\\n...') == 2511944\n",
"step-5": "import pytest\nimport problem22 as p22\n\ndef test_burst():\n \"\"\"Test burst() in Cluster\"\"\"\n print('\\nTesting burst()')\n cluster = p22.Cluster('..#\\n#..\\n...')\n assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.left\n assert cluster.virus.pos == p22.Position(1,0)\n assert cluster.infected[p22.Position(1,1)] == p22.State.Infected\n assert cluster.infected[cluster.virus.pos] == p22.State.Infected\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.up # turned right\n assert cluster.virus.pos == p22.Position(0, 0) # moved up\n assert cluster.infected[prev_pos] == p22.State.Clean # cleaned\n # four times in a row finds clean and infects\n\n for _ in range(4):\n assert cluster.infected[cluster.virus.pos] == p22.State.Clean\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.infected[prev_pos] == p22.State.Infected\n assert cluster.virus.pos == p22.Position(0, 0)\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.right\n assert cluster.virus.pos == p22.Position(0, 1)\n assert cluster.infected[prev_pos] == p22.State.Clean\n assert cluster.infections_caused == 5\n\ndef test_solve_a():\n \"\"\"Tests for solve_b()\"\"\"\n print('\\nTesting solve_a()')\n assert p22.solve_a(7, '..#\\n#..\\n...') == 5\n assert p22.solve_a(70, '..#\\n#..\\n...') == 41\n assert p22.solve_a(10000, '..#\\n#..\\n...') == 5587\n\ndef test_burst_evolved():\n \"\"\"Test burst() in EvolvedCluster\"\"\"\n cluster = p22.EvolvedCluster('..#\\n#..\\n...')\n assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected\n assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.left\n assert cluster.virus.pos == p22.Position(1,0)\n assert cluster.infected[p22.Position(1,1)] == p22.State.Weakened\n assert cluster.infected[cluster.virus.pos] == p22.State.Infected\n prev_pos = cluster.virus.pos\n cluster.burst()\n assert cluster.virus.direction == p22.Directions.up\n assert cluster.virus.pos == p22.Position(0,0)\n assert cluster.infected[prev_pos] == p22.State.Flagged\n assert cluster.infected[cluster.virus.pos] == p22.State.Clean\n\n\[email protected](reason=\"too slow to test\")\ndef test_solve_b():\n \"\"\"Tests for solve_b()\"\"\"\n print('\\nTesting solve_b()')\n assert p22.solve_b(100, '..#\\n#..\\n...') == 26\n assert p22.solve_b(10000000, '..#\\n#..\\n...') == 2511944\n\ndef test_solve_a0():\n \"\"\"Tests for solve_a0()\"\"\"\n print('\\nTesting solve_a0()')\n assert p22.solve_a0(7, '..#\\n#..\\n...') == 5\n assert p22.solve_a0(70, '..#\\n#..\\n...') == 41\n assert p22.solve_a0(10000, '..#\\n#..\\n...') == 5587\n\ndef test_solve_b0():\n \"\"\"Tests for solve_b0()\"\"\"\n print('\\nTesting solve_b0()')\n assert p22.solve_b0(100, '..#\\n#..\\n...') == 26\n assert p22.solve_b0(10000000, '..#\\n#..\\n...') == 2511944",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Purpose(Enum):
halide_nightly = 1
halide_testbranch = 2
llvm_nightly = 3
class BuildSystem(Enum):
make = 0
cmake = 1
class BuilderType:
"""A class to encapsulate the settings for a specific Builder.
(Do not confuse with CMake's 'BUILD_TYPE', which is something else.)
It includes:
- Halide 'target' in the form of arch-bits-os
- LLVM branch to be used
- CMake vs Make
- halide-nightly vs halide-testbranch vs llvm-nightly
- sanitizers vs none
It doesn't currently include any 'features' because we don't currently
bake any in at build time.
It doesn't currently include the C++ compiler used (eg gcc7 vs gcc8 vs clang),
mainly because we currently never test with multiple compilers for a given
setup. (If we ever need to do so, compiler should be added to this.)
"""
def __init__(self, arch, bits, os, halide_branch, llvm_branch, purpose,
sanitizer=None, buildsystem=BuildSystem.cmake):
assert arch in ['arm', 'x86']
assert bits in [32, 64]
assert os in ['linux', 'windows', 'osx']
assert llvm_branch in LLVM_BRANCHES, f'{llvm_branch} not recognized'
self.arch = arch
self.bits = bits
self.os = os
self.halide_branch = halide_branch
self.llvm_branch = llvm_branch
self.buildsystem = buildsystem
self.purpose = purpose
self.sanitizer = sanitizer
if self.halide_branch:
assert self.purpose != Purpose.llvm_nightly
assert self.halide_branch in HALIDE_BRANCHES, f'unknown branch {self.halide_branch}'
assert self.purpose == Purpose.halide_testbranch or self.llvm_branch in LLVM_FOR_HALIDE[
self.halide_branch]
else:
assert self.purpose == Purpose.llvm_nightly
if self.sanitizer:
assert self.sanitizer in _SANITIZERS
def handles_python(self):
if self.bits == 32:
return False
if self.arch == 'arm' and self.os == 'linux':
return False
if self.sanitizer_preset() is not None:
return False
return True
def handles_sanitizers(self):
if self.buildsystem != BuildSystem.cmake:
return False
return (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux' and self.llvm_branch == LLVM_MAIN)
def sanitizer_preset(self):
if self.handles_sanitizers():
if self.sanitizer == 'asan':
return 'linux-x64-asan'
if self.sanitizer == 'fuzzer':
return 'linux-x64-fuzzer'
return None
def handles_riscv(self):
return self.llvm_branch not in [LLVM_RELEASE_15]
def handles_hexagon(self):
return (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux' and self.llvm_branch == LLVM_MAIN)
def handles_wasm(self):
is_linux_x64 = (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux')
return self.llvm_branch == LLVM_MAIN and (is_linux_x64 or self.os ==
'osx')
def handles_wasm_wabt(self):
return self.handles_wasm()
def handles_wasm_v8(self):
return self.handles_wasm() and self.os == 'linux'
def has_nvidia(self):
return self.arch == 'x86' and self.bits == 64 and self.os in ['windows'
, 'linux']
def handles_vulkan(self):
return False
def handles_webgpu(self):
return self.os == 'osx' and self.halide_branch not in [
HALIDE_RELEASE_15]
def has_tflite(self):
if self.arch == 'x86' and self.bits == 64 and self.os == 'linux':
return True
if self.arch == 'arm' and self.bits == 64 and self.os == 'osx':
return True
return False
def has_ccache(self):
return self.os in ['osx', 'linux']
def halide_target(self):
return '%s-%d-%s' % (self.arch, self.bits, self.os)
def llvm_builder_label(self):
return 'llvm-%s-%s' % (LLVM_BRANCHES[self.llvm_branch].version.
major, self.halide_target())
def halide_builder_label(self):
a = ['halide']
if self.sanitizer:
a.append(self.sanitizer)
if self.purpose == Purpose.halide_testbranch:
a.append('testbranch')
elif self.purpose == Purpose.halide_nightly:
a.append('nightly')
a.append(self.halide_branch)
if self.halide_branch == HALIDE_MAIN:
a.append(f'llvm{LLVM_BRANCHES[self.llvm_branch].version.major}')
a.append(self.halide_target())
a.append(self.buildsystem.name)
return '-'.join(a)
def builder_label(self):
if self.purpose == Purpose.llvm_nightly:
return self.llvm_builder_label()
else:
return self.halide_builder_label()
def builder_tags(self):
return self.builder_label().split('-')
def get_worker_names(self):
return [n for n, cfg in _WORKERS if self.arch == cfg.arch and self.
bits in cfg.bits and self.os == cfg.os]
def __str__(self):
return self.halide_target()
def get_builddir_subpath(subpath):
return Transform(lambda x: x.replace('\\', '/'), Interpolate(
f'%(prop:builddir)s/{subpath}'))
<|reserved_special_token_0|>
def get_llvm_source_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-project', *subpaths))
<|reserved_special_token_0|>
def get_halide_source_path(*subpaths):
return get_builddir_subpath(os.path.join('halide-source', *subpaths))
<|reserved_special_token_0|>
def get_halide_install_path(builder_type, *subpaths):
s = 'halide-install'
if builder_type.sanitizer:
s += '-' + builder_type.sanitizer
return get_builddir_subpath(os.path.join(s, *subpaths))
<|reserved_special_token_0|>
def merge_renderable(_base, _extn):
@renderer
@defer.inlineCallbacks
def render(props):
base = yield props.render(_base)
extn = yield props.render(_extn)
base.update(extn)
return base
return render
<|reserved_special_token_0|>
def get_cmake_generator(builder_type):
return 'Ninja'
<|reserved_special_token_0|>
def get_halide_cmake_definitions(builder_type, halide_target='host',
wasm_jit='wabt'):
cmake_definitions = {'Clang_DIR': get_llvm_install_path(builder_type,
'lib/cmake/clang'), 'CMAKE_INSTALL_PREFIX': get_halide_install_path
(builder_type), 'Halide_TARGET': halide_target, 'LLD_DIR':
get_llvm_install_path(builder_type, 'lib/cmake/lld'), 'LLVM_DIR':
get_llvm_install_path(builder_type, 'lib/cmake/llvm'), 'LLVM_ROOT':
get_llvm_install_path(builder_type), 'WITH_PYTHON_BINDINGS': 'ON' if
builder_type.handles_python() else 'OFF', 'WITH_TEST_FUZZ': 'ON' if
builder_type.sanitizer == 'fuzzer' else 'OFF'}
if builder_type.sanitizer and builder_type.handles_sanitizers():
pass
else:
cmake_definitions['CMAKE_BUILD_TYPE'] = 'Release'
if builder_type.has_ccache() and not builder_type.sanitizer_preset():
cmake_definitions['Halide_CCACHE_BUILD'] = 'ON'
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path(
'cmake', 'toolchain.linux-arm32.cmake')
if builder_type.os == 'windows':
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(
'%(prop:VCPKG_ROOT)s/scripts/buildsystems/vcpkg.cmake')
cmake_definitions['pybind11_DIR'] = Interpolate(
'%(prop:VIRTUAL_ENV)s/share/cmake/pybind11')
if 'wasm' in halide_target:
cmake_definitions['WITH_PYTHON_BINDINGS'] = 'OFF'
if builder_type.handles_wasm() and halide_target.startswith('wasm-'):
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(
'%(prop:EMSDK)s/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake'
)
cmake_definitions['NODE_JS_EXECUTABLE'] = Property(
'HALIDE_NODE_JS_PATH')
if wasm_jit == 'v8':
cmake_definitions['WITH_WABT'] = 'OFF'
cmake_definitions['WITH_V8'] = 'ON'
cmake_definitions['V8_INCLUDE_PATH'
] = '/home/halidenightly/v8/v8/include'
cmake_definitions['V8_LIB_PATH'] = (
'/home/halidenightly/v8/v8/out/x64.release.static/obj/libv8_monolith.a'
)
elif wasm_jit == 'wabt':
cmake_definitions['WITH_WABT'] = 'ON'
cmake_definitions['WITH_V8'] = 'OFF'
cmake_definitions['V8_INCLUDE_PATH'] = ''
cmake_definitions['V8_LIB_PATH'] = ''
else:
assert False, 'Unknown wasm jit ' + str(wasm_jit)
if builder_type.handles_webgpu() and 'webgpu' in halide_target:
cmake_definitions['WEBGPU_NODE_BINDINGS'] = Property(
'HL_WEBGPU_NODE_BINDINGS')
cmake_definitions['WEBGPU_NATIVE_LIB'] = Property(
'HL_WEBGPU_NATIVE_LIB')
if builder_type.handles_hexagon() and 'hvx' in halide_target:
cmake_definitions['Halide_BUILD_HEXAGON_REMOTE_RUNTIME'] = 'ON'
return cmake_definitions
<|reserved_special_token_0|>
def get_llvm_cmake_definitions(builder_type):
definitions = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_INSTALL_PREFIX':
get_llvm_install_path(builder_type), 'LLVM_BUILD_32_BITS': 'ON' if
builder_type.bits == 32 else 'OFF', 'LLVM_ENABLE_ASSERTIONS': 'ON',
'LLVM_ENABLE_BINDINGS': 'OFF', 'LLVM_ENABLE_CURL': 'OFF',
'LLVM_ENABLE_DIA_SDK': 'OFF', 'LLVM_ENABLE_HTTPLIB': 'OFF',
'LLVM_ENABLE_IDE': 'OFF', 'LLVM_ENABLE_LIBXML2': 'OFF',
'LLVM_ENABLE_OCAMLDOC': 'OFF', 'LLVM_ENABLE_RTTI': 'ON',
'LLVM_ENABLE_TERMINFO': 'OFF', 'LLVM_ENABLE_WARNINGS': 'OFF',
'LLVM_ENABLE_ZLIB': 'ON', 'LLVM_ENABLE_ZSTD': 'OFF',
'LLVM_INCLUDE_BENCHMARKS': 'OFF', 'LLVM_INCLUDE_EXAMPLES': 'OFF',
'LLVM_INCLUDE_TESTS': 'OFF', 'LLVM_TARGETS_TO_BUILD':
'X86;ARM;NVPTX;AArch64;Hexagon;PowerPC;WebAssembly'}
if builder_type.bits == 32:
definitions['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = 'NEVER'
if builder_type.handles_riscv():
definitions['LLVM_TARGETS_TO_BUILD'] += ';RISCV'
if builder_type.handles_sanitizers():
definitions['LLVM_ENABLE_RUNTIMES'
] = 'compiler-rt;libcxx;libcxxabi;libunwind'
definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld;clang-tools-extra'
else:
definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld'
if builder_type.os != 'windows':
definitions['CMAKE_CXX_FLAGS'] = '-Wno-psabi'
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
definitions['CMAKE_TOOLCHAIN_FILE'] = get_llvm_toolchains_path(
'toolchain.linux-arm32.cmake')
definitions['LLVM_TARGET_ARCH'] = 'ARM'
definitions['LLVM_DEFAULT_TARGET_TRIPLE'] = 'arm-linux-gnueabihf'
if (builder_type.arch == 'x86' and builder_type.bits == 32 and
builder_type.os == 'linux'):
definitions['CMAKE_FIND_ROOT_PATH'] = '/usr/lib/i386-linux-gnu'
definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'
if builder_type.os == 'osx':
definitions['LLVM_ENABLE_SUPPORT_XCODE_SIGNPOSTS'] = 'FORCE_OFF'
if builder_type.has_ccache():
definitions['LLVM_CCACHE_BUILD'] = 'ON'
return definitions
<|reserved_special_token_0|>
def add_halide_cmake_build_steps(factory, builder_type):
source_dir = get_halide_source_path()
build_dir = get_halide_build_path()
install_dir = get_halide_install_path(builder_type)
factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[
performance_lock.access('counting')], dir=build_dir, haltOnFailure=
False))
factory.addStep(MakeDirectory(name='Make Halide Build Dir', locks=[
performance_lock.access('counting')], dir=build_dir, haltOnFailure=
False))
factory.addStep(RemoveDirectory(name='Remove Halide Install Dir', locks
=[performance_lock.access('counting')], dir=install_dir,
haltOnFailure=False))
factory.addStep(MakeDirectory(name='Make Halide Install Dir', locks=[
performance_lock.access('counting')], dir=install_dir,
haltOnFailure=False))
factory.addStep(CMake(name='Configure Halide', description=
'Configure Halide', locks=[performance_lock.access('counting')],
haltOnFailure=True, workdir=build_dir, env=Property('env'), path=
source_dir, generator=get_cmake_generator(builder_type),
definitions=get_halide_cmake_definitions(builder_type), options=
get_halide_cmake_options(builder_type, build_dir)))
factory.addStep(ShellCommand(name='Build Halide', description=
'Build Halide', locks=[performance_lock.access('counting')],
haltOnFailure=True, workdir=build_dir, env=Property('env'), command
=get_cmake_build_command(builder_type, build_dir, targets=['all',
'install'])))
def add_halide_cmake_package_steps(factory, builder_type):
source_dir = get_halide_source_path()
target = builder_type.halide_target()
ext = 'zip' if builder_type.os == 'windows' else 'tar.gz'
factory.addStep(SetPropertiesFromCMakeCache(name=
'Get Halide package version', workdir=get_halide_build_path(),
props=['CMAKE_PROJECT_VERSION']))
extra_env = dict(Clang_DIR=get_llvm_install_path(builder_type,
'lib/cmake/clang'), LLD_DIR=get_llvm_install_path(builder_type,
'lib/cmake/lld'), LLVM_DIR=get_llvm_install_path(builder_type,
'lib/cmake/llvm'), Halide_VERSION=Property('CMAKE_PROJECT_VERSION'))
if builder_type.os == 'windows':
build_dir = get_halide_build_path('packaging_dir')
if builder_type.arch == 'arm':
arch = 'ARM' if builder_type.bits == 32 else 'ARM64'
else:
arch = 'Win32' if builder_type.bits == 32 else 'x64'
cmd = [get_halide_source_path('packaging/zip/package.bat'),
source_dir, build_dir, arch]
else:
build_dir = get_halide_build_path()
cmd = [get_halide_source_path('packaging/tgz/package.sh'),
source_dir, build_dir]
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
extra_env['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake',
'toolchain.linux-arm32.cmake')
factory.addStep(ShellCommand(name='Package Halide', description=
'Package Halide', workdir=build_dir, env=extend_property('env', **
extra_env), locks=[performance_lock.access('counting')],
haltOnFailure=True, command=cmd))
factory.addStep(FileUpload(name='Upload Halide package', workersrc=
Interpolate(f'Halide-%(prop:CMAKE_PROJECT_VERSION)s-{target}.{ext}'
), locks=[performance_lock.access('counting')], haltOnFailure=True,
workdir=build_dir, mode=420, masterdest=get_distrib_name(Property(
'CMAKE_PROJECT_VERSION'), target, ext)))
def pkg_version_and_target(path: Path):
match = re.match('^(.*)-[a-f0-9]+\\.(tar\\.gz|tgz|zip)', path.name)
return match.group(1) if match else None
factory.addStep(CleanOldFiles(name='Clean old releases', workdir=
ARTIFACTS_DIR, locks=[performance_lock.access('counting')], groupfn
=pkg_version_and_target))
<|reserved_special_token_0|>
def get_test_labels(builder_type):
targets = defaultdict(list)
preset = builder_type.sanitizer_preset()
if preset and 'fuzz' in preset:
targets['host'].extend(['fuzz'])
return targets
targets['host'].extend(['internal', 'correctness', 'generator',
'autoschedulers_cpu', 'error', 'warning', 'apps', 'performance',
'tutorial'])
if preset:
return targets
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
targets['host'].remove('internal')
targets['host'].remove('generator')
if builder_type.handles_python():
targets['host'].extend(['python'])
if builder_type.arch == 'x86':
t = 'x86-%d-%s' % (builder_type.bits, builder_type.os)
targets[t].extend(['correctness'])
if builder_type.bits == 64:
targets['%s-sse41' % t].extend(['correctness'])
for t, is_simulator in get_gpu_dsp_targets(builder_type):
if t == 'host-webgpu':
targets[t].extend(['correctness', 'generator'])
else:
targets[t].extend(['correctness', 'generator', 'apps'])
if 'cuda' in t:
targets[t].extend(['autoschedulers_cuda'])
if 'hvx' not in t:
targets[t].extend(['autoschedulers_gpu'])
if not is_simulator:
targets[t].extend(['performance'])
if builder_type.has_nvidia():
targets['host-cuda-opencl'].extend(['correctness_multi_gpu'])
if builder_type.handles_vulkan():
targets[
'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
if builder_type.handles_wasm():
if builder_type.handles_wasm_wabt():
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/wabt'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
if builder_type.handles_wasm_v8():
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/v8'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int-wasm_threads'
].extend(['generator', 'apps'])
if builder_type.handles_webgpu():
targets['wasm-32-wasmrt-webgpu'].extend(['generator'])
return targets
<|reserved_special_token_0|>
def short_target(halide_target):
s = halide_target.split('-')
if len(s) == 1:
return s[0]
elif len(s) == 2:
return '%s-%s' % (s[0], s[1])
elif len(s) == 3:
return '%s-%s-%s' % (s[0], s[1], s[2])
elif len(s) > 3:
return '%s-%s-%s…' % (s[0], s[1], s[2])
else:
return '<unknown>'
<|reserved_special_token_0|>
def create_halide_cmake_factory(builder_type):
factory = BuildFactory()
add_env_setup_step(factory, builder_type)
add_get_halide_source_steps(factory, builder_type)
add_halide_cmake_build_steps(factory, builder_type)
add_halide_cmake_test_steps(factory, builder_type)
if builder_type.purpose == Purpose.halide_nightly:
add_halide_cmake_package_steps(factory, builder_type)
return factory
<|reserved_special_token_0|>
def create_halide_builders():
for arch, bits, os in get_interesting_halide_targets():
for halide_branch in HALIDE_NIGHTLIES:
for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:
yield from create_halide_builder(arch, bits, os,
halide_branch, llvm_branch, Purpose.halide_nightly)
for halide_branch in _HALIDE_RELEASES:
for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:
yield from create_halide_builder(arch, bits, os,
halide_branch, llvm_branch, Purpose.halide_testbranch)
yield from create_halide_builder(arch, bits, os, HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch)
yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('x86', 32, 'linux', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('x86', 64, 'osx', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('arm', 64, 'osx', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
for llvm_branch in LLVM_BRANCHES:
if abs(LLVM_BRANCHES[llvm_branch].version.major - LLVM_BRANCHES[
LLVM_MAIN].version.major) in [1, 2]:
yield from create_halide_builder('x86', 64, 'linux',
HALIDE_MAIN, llvm_branch, Purpose.halide_testbranch)
<|reserved_special_token_0|>
class SafeGitHubEventHandler(GitHubEventHandler):
def handle_push(self, payload, event):
ref = payload['ref']
if re.match('^refs/(heads|tags)/(master|main|release/\\d+\\.x)$', ref):
return super().handle_push(payload, event)
else:
print(f'SafeGitHubEventHandler: ignoring push event for ref: {ref}'
)
return self.skip()
def handle_pull_request(self, payload, event):
pr = payload['pull_request']
try:
if any(label['name'] == 'skip_buildbots' for label in pr['labels']
):
return self.skip()
if any(r['login'] == 'halidebuildbots' for r in pr[
'requested_reviewers']):
if payload['action'] == 'review_requested':
payload['action'] = 'synchronize'
return super().handle_pull_request(payload, event)
trusted_repos = 'halide/Halide', 'CodeLinaro/Halide'
if pr['head']['repo']['full_name'] not in trusted_repos:
return self.skip()
return super().handle_pull_request(payload, event)
except KeyError as e:
print(f'SafeGitHubEventHandler: malformed payload: {payload}')
print(f'SafeGitHubEventHandler: missing key "{e}"')
return self.skip()
@staticmethod
def skip():
return [], 'git'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Purpose(Enum):
halide_nightly = 1
halide_testbranch = 2
llvm_nightly = 3
class BuildSystem(Enum):
make = 0
cmake = 1
class BuilderType:
"""A class to encapsulate the settings for a specific Builder.
(Do not confuse with CMake's 'BUILD_TYPE', which is something else.)
It includes:
- Halide 'target' in the form of arch-bits-os
- LLVM branch to be used
- CMake vs Make
- halide-nightly vs halide-testbranch vs llvm-nightly
- sanitizers vs none
It doesn't currently include any 'features' because we don't currently
bake any in at build time.
It doesn't currently include the C++ compiler used (eg gcc7 vs gcc8 vs clang),
mainly because we currently never test with multiple compilers for a given
setup. (If we ever need to do so, compiler should be added to this.)
"""
def __init__(self, arch, bits, os, halide_branch, llvm_branch, purpose,
sanitizer=None, buildsystem=BuildSystem.cmake):
assert arch in ['arm', 'x86']
assert bits in [32, 64]
assert os in ['linux', 'windows', 'osx']
assert llvm_branch in LLVM_BRANCHES, f'{llvm_branch} not recognized'
self.arch = arch
self.bits = bits
self.os = os
self.halide_branch = halide_branch
self.llvm_branch = llvm_branch
self.buildsystem = buildsystem
self.purpose = purpose
self.sanitizer = sanitizer
if self.halide_branch:
assert self.purpose != Purpose.llvm_nightly
assert self.halide_branch in HALIDE_BRANCHES, f'unknown branch {self.halide_branch}'
assert self.purpose == Purpose.halide_testbranch or self.llvm_branch in LLVM_FOR_HALIDE[
self.halide_branch]
else:
assert self.purpose == Purpose.llvm_nightly
if self.sanitizer:
assert self.sanitizer in _SANITIZERS
def handles_python(self):
if self.bits == 32:
return False
if self.arch == 'arm' and self.os == 'linux':
return False
if self.sanitizer_preset() is not None:
return False
return True
def handles_sanitizers(self):
if self.buildsystem != BuildSystem.cmake:
return False
return (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux' and self.llvm_branch == LLVM_MAIN)
def sanitizer_preset(self):
if self.handles_sanitizers():
if self.sanitizer == 'asan':
return 'linux-x64-asan'
if self.sanitizer == 'fuzzer':
return 'linux-x64-fuzzer'
return None
def handles_riscv(self):
return self.llvm_branch not in [LLVM_RELEASE_15]
def handles_hexagon(self):
return (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux' and self.llvm_branch == LLVM_MAIN)
def handles_wasm(self):
is_linux_x64 = (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux')
return self.llvm_branch == LLVM_MAIN and (is_linux_x64 or self.os ==
'osx')
def handles_wasm_wabt(self):
return self.handles_wasm()
def handles_wasm_v8(self):
return self.handles_wasm() and self.os == 'linux'
def has_nvidia(self):
return self.arch == 'x86' and self.bits == 64 and self.os in ['windows'
, 'linux']
def handles_vulkan(self):
return False
def handles_webgpu(self):
return self.os == 'osx' and self.halide_branch not in [
HALIDE_RELEASE_15]
def has_tflite(self):
if self.arch == 'x86' and self.bits == 64 and self.os == 'linux':
return True
if self.arch == 'arm' and self.bits == 64 and self.os == 'osx':
return True
return False
def has_ccache(self):
return self.os in ['osx', 'linux']
def halide_target(self):
return '%s-%d-%s' % (self.arch, self.bits, self.os)
def llvm_builder_label(self):
return 'llvm-%s-%s' % (LLVM_BRANCHES[self.llvm_branch].version.
major, self.halide_target())
def halide_builder_label(self):
a = ['halide']
if self.sanitizer:
a.append(self.sanitizer)
if self.purpose == Purpose.halide_testbranch:
a.append('testbranch')
elif self.purpose == Purpose.halide_nightly:
a.append('nightly')
a.append(self.halide_branch)
if self.halide_branch == HALIDE_MAIN:
a.append(f'llvm{LLVM_BRANCHES[self.llvm_branch].version.major}')
a.append(self.halide_target())
a.append(self.buildsystem.name)
return '-'.join(a)
def builder_label(self):
if self.purpose == Purpose.llvm_nightly:
return self.llvm_builder_label()
else:
return self.halide_builder_label()
def builder_tags(self):
return self.builder_label().split('-')
def get_worker_names(self):
return [n for n, cfg in _WORKERS if self.arch == cfg.arch and self.
bits in cfg.bits and self.os == cfg.os]
def __str__(self):
return self.halide_target()
def get_builddir_subpath(subpath):
return Transform(lambda x: x.replace('\\', '/'), Interpolate(
f'%(prop:builddir)s/{subpath}'))
def get_llvm_toolchains_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-toolchains', *subpaths))
def get_llvm_source_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-project', *subpaths))
def get_llvm_build_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-build', *subpaths))
def get_llvm_install_path(builder_type, *subpaths):
llvm_workdir = builder_type.llvm_builder_label()
return get_builddir_subpath(os.path.join('..', llvm_workdir,
'llvm-install', *subpaths))
def get_halide_source_path(*subpaths):
return get_builddir_subpath(os.path.join('halide-source', *subpaths))
<|reserved_special_token_0|>
def get_halide_install_path(builder_type, *subpaths):
s = 'halide-install'
if builder_type.sanitizer:
s += '-' + builder_type.sanitizer
return get_builddir_subpath(os.path.join(s, *subpaths))
<|reserved_special_token_0|>
def get_msvc_config_steps(factory, builder_type):
arch_for_bits = {(32): 'x64_x86', (64): 'x64'}
vcvarsall = 'vcvarsall.bat %s && set' % arch_for_bits[builder_type.bits]
vcvarsdir = (
'C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Auxiliary/Build'
)
def save_interesting_env_vars(rc, stdout, stderr):
d = {}
for line in stdout.split('\n'):
match = re.match('^([a-zA-Z0-9_-]+)=(.*)$', line.strip())
if match:
key = match.group(1).upper()
value = match.group(2)
if key in VCVARSALL_ENV_VARS:
d[key] = value
return {'env': d}
factory.addStep(SetPropertyFromCommand(name='Run VcVarsAll',
description='Run VcVarsAll', workdir=vcvarsdir, locks=[
performance_lock.access('counting')], haltOnFailure=True, command=
vcvarsall, extract_fn=save_interesting_env_vars))
def merge_renderable(_base, _extn):
@renderer
@defer.inlineCallbacks
def render(props):
base = yield props.render(_base)
extn = yield props.render(_extn)
base.update(extn)
return base
return render
def get_distrib_name(_version, target, ext):
@renderer
@defer.inlineCallbacks
def render(props):
rev = props.getProperty('got_revision')['halide']
version = yield props.render(_version)
return os.path.join(ARTIFACTS_DIR,
f'Halide-{version}-{target}-{rev}.{ext}')
return render
def get_cmake_generator(builder_type):
return 'Ninja'
<|reserved_special_token_0|>
def get_ctest_options(builder_type, build_dir):
assert builder_type.purpose != Purpose.llvm_nightly
if builder_type.sanitizer:
assert builder_type.handles_sanitizers()
return {'build_config': builder_type.sanitizer_preset()}
else:
return {'build_config': 'Release'}
def get_halide_cmake_definitions(builder_type, halide_target='host',
wasm_jit='wabt'):
cmake_definitions = {'Clang_DIR': get_llvm_install_path(builder_type,
'lib/cmake/clang'), 'CMAKE_INSTALL_PREFIX': get_halide_install_path
(builder_type), 'Halide_TARGET': halide_target, 'LLD_DIR':
get_llvm_install_path(builder_type, 'lib/cmake/lld'), 'LLVM_DIR':
get_llvm_install_path(builder_type, 'lib/cmake/llvm'), 'LLVM_ROOT':
get_llvm_install_path(builder_type), 'WITH_PYTHON_BINDINGS': 'ON' if
builder_type.handles_python() else 'OFF', 'WITH_TEST_FUZZ': 'ON' if
builder_type.sanitizer == 'fuzzer' else 'OFF'}
if builder_type.sanitizer and builder_type.handles_sanitizers():
pass
else:
cmake_definitions['CMAKE_BUILD_TYPE'] = 'Release'
if builder_type.has_ccache() and not builder_type.sanitizer_preset():
cmake_definitions['Halide_CCACHE_BUILD'] = 'ON'
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path(
'cmake', 'toolchain.linux-arm32.cmake')
if builder_type.os == 'windows':
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(
'%(prop:VCPKG_ROOT)s/scripts/buildsystems/vcpkg.cmake')
cmake_definitions['pybind11_DIR'] = Interpolate(
'%(prop:VIRTUAL_ENV)s/share/cmake/pybind11')
if 'wasm' in halide_target:
cmake_definitions['WITH_PYTHON_BINDINGS'] = 'OFF'
if builder_type.handles_wasm() and halide_target.startswith('wasm-'):
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(
'%(prop:EMSDK)s/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake'
)
cmake_definitions['NODE_JS_EXECUTABLE'] = Property(
'HALIDE_NODE_JS_PATH')
if wasm_jit == 'v8':
cmake_definitions['WITH_WABT'] = 'OFF'
cmake_definitions['WITH_V8'] = 'ON'
cmake_definitions['V8_INCLUDE_PATH'
] = '/home/halidenightly/v8/v8/include'
cmake_definitions['V8_LIB_PATH'] = (
'/home/halidenightly/v8/v8/out/x64.release.static/obj/libv8_monolith.a'
)
elif wasm_jit == 'wabt':
cmake_definitions['WITH_WABT'] = 'ON'
cmake_definitions['WITH_V8'] = 'OFF'
cmake_definitions['V8_INCLUDE_PATH'] = ''
cmake_definitions['V8_LIB_PATH'] = ''
else:
assert False, 'Unknown wasm jit ' + str(wasm_jit)
if builder_type.handles_webgpu() and 'webgpu' in halide_target:
cmake_definitions['WEBGPU_NODE_BINDINGS'] = Property(
'HL_WEBGPU_NODE_BINDINGS')
cmake_definitions['WEBGPU_NATIVE_LIB'] = Property(
'HL_WEBGPU_NATIVE_LIB')
if builder_type.handles_hexagon() and 'hvx' in halide_target:
cmake_definitions['Halide_BUILD_HEXAGON_REMOTE_RUNTIME'] = 'ON'
return cmake_definitions
<|reserved_special_token_0|>
def get_llvm_cmake_definitions(builder_type):
definitions = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_INSTALL_PREFIX':
get_llvm_install_path(builder_type), 'LLVM_BUILD_32_BITS': 'ON' if
builder_type.bits == 32 else 'OFF', 'LLVM_ENABLE_ASSERTIONS': 'ON',
'LLVM_ENABLE_BINDINGS': 'OFF', 'LLVM_ENABLE_CURL': 'OFF',
'LLVM_ENABLE_DIA_SDK': 'OFF', 'LLVM_ENABLE_HTTPLIB': 'OFF',
'LLVM_ENABLE_IDE': 'OFF', 'LLVM_ENABLE_LIBXML2': 'OFF',
'LLVM_ENABLE_OCAMLDOC': 'OFF', 'LLVM_ENABLE_RTTI': 'ON',
'LLVM_ENABLE_TERMINFO': 'OFF', 'LLVM_ENABLE_WARNINGS': 'OFF',
'LLVM_ENABLE_ZLIB': 'ON', 'LLVM_ENABLE_ZSTD': 'OFF',
'LLVM_INCLUDE_BENCHMARKS': 'OFF', 'LLVM_INCLUDE_EXAMPLES': 'OFF',
'LLVM_INCLUDE_TESTS': 'OFF', 'LLVM_TARGETS_TO_BUILD':
'X86;ARM;NVPTX;AArch64;Hexagon;PowerPC;WebAssembly'}
if builder_type.bits == 32:
definitions['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = 'NEVER'
if builder_type.handles_riscv():
definitions['LLVM_TARGETS_TO_BUILD'] += ';RISCV'
if builder_type.handles_sanitizers():
definitions['LLVM_ENABLE_RUNTIMES'
] = 'compiler-rt;libcxx;libcxxabi;libunwind'
definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld;clang-tools-extra'
else:
definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld'
if builder_type.os != 'windows':
definitions['CMAKE_CXX_FLAGS'] = '-Wno-psabi'
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
definitions['CMAKE_TOOLCHAIN_FILE'] = get_llvm_toolchains_path(
'toolchain.linux-arm32.cmake')
definitions['LLVM_TARGET_ARCH'] = 'ARM'
definitions['LLVM_DEFAULT_TARGET_TRIPLE'] = 'arm-linux-gnueabihf'
if (builder_type.arch == 'x86' and builder_type.bits == 32 and
builder_type.os == 'linux'):
definitions['CMAKE_FIND_ROOT_PATH'] = '/usr/lib/i386-linux-gnu'
definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'
if builder_type.os == 'osx':
definitions['LLVM_ENABLE_SUPPORT_XCODE_SIGNPOSTS'] = 'FORCE_OFF'
if builder_type.has_ccache():
definitions['LLVM_CCACHE_BUILD'] = 'ON'
return definitions
def extend_property(dict_name, **kwargs):
@renderer
def render(props):
table = props.getProperty(dict_name, default={})
table.update(kwargs)
return table
return render
def add_env_setup_step(factory, builder_type, enable_ccache=False):
if builder_type.os == 'windows':
get_msvc_config_steps(factory, builder_type)
cxx = 'c++'
cc = 'cc'
ld = 'ld'
if builder_type.os == 'linux':
cc = 'gcc-9'
cxx = 'g++-9'
ld = 'ld'
if builder_type.arch == 'x86' and builder_type.bits == 32:
cxx += ' -m32'
cc += ' -m32'
ld += ' -melf_i386'
elif builder_type.os == 'windows':
cxx = 'cl.exe'
cc = 'cl.exe'
if enable_ccache and builder_type.has_ccache():
cxx = 'ccache ' + cxx
cc = 'ccache ' + cc
env = {'CC': cc, 'CXX': cxx, 'LD': ld}
factory.addStep(SetPropertiesFromEnv(name='Read worker environment',
variables=['EMSDK', 'HALIDE_NODE_JS_PATH', 'HL_HEXAGON_TOOLS',
'HL_WEBGPU_NATIVE_LIB', 'HL_WEBGPU_NODE_BINDINGS',
'LD_LIBRARY_PATH', 'VIRTUAL_ENV', 'VCPKG_ROOT']))
vcpkg_root = Property('VCPKG_ROOT', default=None)
if builder_type.handles_hexagon():
hexagon_remote_bin = get_halide_build_path('src', 'runtime',
'hexagon_remote')
env['HL_HEXAGON_SIM_REMOTE'] = Transform(os.path.join,
hexagon_remote_bin, 'hexagon', 'bin', 'hexagon_sim_remote')
env['HL_HEXAGON_SIM_CYCLES'] = '1'
env['LD_LIBRARY_PATH'] = [hexagon_remote_bin, Interpolate(
'%(prop:HL_HEXAGON_TOOLS)s/lib/iss')]
env['HEXAGON_SDK_ROOT'] = Interpolate(
'%(prop:HL_HEXAGON_TOOLS)s/../../../..')
if builder_type.has_nvidia() and builder_type.handles_vulkan():
env['VK_INSTANCE_LAYERS'] = 'VK_LAYER_KHRONOS_validation'
if builder_type.os == 'osx':
env['METAL_DEVICE_WRAPPER_TYPE'] = '1'
if builder_type.os == 'windows':
vcpkg_root = Property('VCPKG_ROOT', default='C:/vcpkg')
env['VCPKG_ROOT'] = vcpkg_root
env['CUDA_CACHE_DISABLE'] = '1'
env['HL_DISABLE_WINDOWS_ABORT_DIALOG'] = '1'
factory.addStep(SetProperties(name='Initialize environment', properties
=dict(env=extend_property('env', **env), VCPKG_ROOT=vcpkg_root)))
<|reserved_special_token_0|>
def add_halide_cmake_build_steps(factory, builder_type):
source_dir = get_halide_source_path()
build_dir = get_halide_build_path()
install_dir = get_halide_install_path(builder_type)
factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[
performance_lock.access('counting')], dir=build_dir, haltOnFailure=
False))
factory.addStep(MakeDirectory(name='Make Halide Build Dir', locks=[
performance_lock.access('counting')], dir=build_dir, haltOnFailure=
False))
factory.addStep(RemoveDirectory(name='Remove Halide Install Dir', locks
=[performance_lock.access('counting')], dir=install_dir,
haltOnFailure=False))
factory.addStep(MakeDirectory(name='Make Halide Install Dir', locks=[
performance_lock.access('counting')], dir=install_dir,
haltOnFailure=False))
factory.addStep(CMake(name='Configure Halide', description=
'Configure Halide', locks=[performance_lock.access('counting')],
haltOnFailure=True, workdir=build_dir, env=Property('env'), path=
source_dir, generator=get_cmake_generator(builder_type),
definitions=get_halide_cmake_definitions(builder_type), options=
get_halide_cmake_options(builder_type, build_dir)))
factory.addStep(ShellCommand(name='Build Halide', description=
'Build Halide', locks=[performance_lock.access('counting')],
haltOnFailure=True, workdir=build_dir, env=Property('env'), command
=get_cmake_build_command(builder_type, build_dir, targets=['all',
'install'])))
def add_halide_cmake_package_steps(factory, builder_type):
source_dir = get_halide_source_path()
target = builder_type.halide_target()
ext = 'zip' if builder_type.os == 'windows' else 'tar.gz'
factory.addStep(SetPropertiesFromCMakeCache(name=
'Get Halide package version', workdir=get_halide_build_path(),
props=['CMAKE_PROJECT_VERSION']))
extra_env = dict(Clang_DIR=get_llvm_install_path(builder_type,
'lib/cmake/clang'), LLD_DIR=get_llvm_install_path(builder_type,
'lib/cmake/lld'), LLVM_DIR=get_llvm_install_path(builder_type,
'lib/cmake/llvm'), Halide_VERSION=Property('CMAKE_PROJECT_VERSION'))
if builder_type.os == 'windows':
build_dir = get_halide_build_path('packaging_dir')
if builder_type.arch == 'arm':
arch = 'ARM' if builder_type.bits == 32 else 'ARM64'
else:
arch = 'Win32' if builder_type.bits == 32 else 'x64'
cmd = [get_halide_source_path('packaging/zip/package.bat'),
source_dir, build_dir, arch]
else:
build_dir = get_halide_build_path()
cmd = [get_halide_source_path('packaging/tgz/package.sh'),
source_dir, build_dir]
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
extra_env['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake',
'toolchain.linux-arm32.cmake')
factory.addStep(ShellCommand(name='Package Halide', description=
'Package Halide', workdir=build_dir, env=extend_property('env', **
extra_env), locks=[performance_lock.access('counting')],
haltOnFailure=True, command=cmd))
factory.addStep(FileUpload(name='Upload Halide package', workersrc=
Interpolate(f'Halide-%(prop:CMAKE_PROJECT_VERSION)s-{target}.{ext}'
), locks=[performance_lock.access('counting')], haltOnFailure=True,
workdir=build_dir, mode=420, masterdest=get_distrib_name(Property(
'CMAKE_PROJECT_VERSION'), target, ext)))
def pkg_version_and_target(path: Path):
match = re.match('^(.*)-[a-f0-9]+\\.(tar\\.gz|tgz|zip)', path.name)
return match.group(1) if match else None
factory.addStep(CleanOldFiles(name='Clean old releases', workdir=
ARTIFACTS_DIR, locks=[performance_lock.access('counting')], groupfn
=pkg_version_and_target))
<|reserved_special_token_0|>
def get_test_labels(builder_type):
targets = defaultdict(list)
preset = builder_type.sanitizer_preset()
if preset and 'fuzz' in preset:
targets['host'].extend(['fuzz'])
return targets
targets['host'].extend(['internal', 'correctness', 'generator',
'autoschedulers_cpu', 'error', 'warning', 'apps', 'performance',
'tutorial'])
if preset:
return targets
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
targets['host'].remove('internal')
targets['host'].remove('generator')
if builder_type.handles_python():
targets['host'].extend(['python'])
if builder_type.arch == 'x86':
t = 'x86-%d-%s' % (builder_type.bits, builder_type.os)
targets[t].extend(['correctness'])
if builder_type.bits == 64:
targets['%s-sse41' % t].extend(['correctness'])
for t, is_simulator in get_gpu_dsp_targets(builder_type):
if t == 'host-webgpu':
targets[t].extend(['correctness', 'generator'])
else:
targets[t].extend(['correctness', 'generator', 'apps'])
if 'cuda' in t:
targets[t].extend(['autoschedulers_cuda'])
if 'hvx' not in t:
targets[t].extend(['autoschedulers_gpu'])
if not is_simulator:
targets[t].extend(['performance'])
if builder_type.has_nvidia():
targets['host-cuda-opencl'].extend(['correctness_multi_gpu'])
if builder_type.handles_vulkan():
targets[
'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
if builder_type.handles_wasm():
if builder_type.handles_wasm_wabt():
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/wabt'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
if builder_type.handles_wasm_v8():
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/v8'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int-wasm_threads'
].extend(['generator', 'apps'])
if builder_type.handles_webgpu():
targets['wasm-32-wasmrt-webgpu'].extend(['generator'])
return targets
<|reserved_special_token_0|>
def short_target(halide_target):
s = halide_target.split('-')
if len(s) == 1:
return s[0]
elif len(s) == 2:
return '%s-%s' % (s[0], s[1])
elif len(s) == 3:
return '%s-%s-%s' % (s[0], s[1], s[2])
elif len(s) > 3:
return '%s-%s-%s…' % (s[0], s[1], s[2])
else:
return '<unknown>'
<|reserved_special_token_0|>
def create_halide_cmake_factory(builder_type):
factory = BuildFactory()
add_env_setup_step(factory, builder_type)
add_get_halide_source_steps(factory, builder_type)
add_halide_cmake_build_steps(factory, builder_type)
add_halide_cmake_test_steps(factory, builder_type)
if builder_type.purpose == Purpose.halide_nightly:
add_halide_cmake_package_steps(factory, builder_type)
return factory
<|reserved_special_token_0|>
def create_halide_builders():
for arch, bits, os in get_interesting_halide_targets():
for halide_branch in HALIDE_NIGHTLIES:
for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:
yield from create_halide_builder(arch, bits, os,
halide_branch, llvm_branch, Purpose.halide_nightly)
for halide_branch in _HALIDE_RELEASES:
for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:
yield from create_halide_builder(arch, bits, os,
halide_branch, llvm_branch, Purpose.halide_testbranch)
yield from create_halide_builder(arch, bits, os, HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch)
yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('x86', 32, 'linux', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('x86', 64, 'osx', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('arm', 64, 'osx', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
for llvm_branch in LLVM_BRANCHES:
if abs(LLVM_BRANCHES[llvm_branch].version.major - LLVM_BRANCHES[
LLVM_MAIN].version.major) in [1, 2]:
yield from create_halide_builder('x86', 64, 'linux',
HALIDE_MAIN, llvm_branch, Purpose.halide_testbranch)
<|reserved_special_token_0|>
def create_llvm_builders():
for arch, bits, os in get_interesting_halide_targets():
for llvm_branch in LLVM_BRANCHES:
builder_type = BuilderType(arch, bits, os, None, llvm_branch,
Purpose.llvm_nightly)
for w in builder_type.get_worker_names():
label = builder_type.llvm_builder_label()
builder = BuilderConfig(name='%s/%s' % (label, w),
workerbuilddir=label, workernames=[w], factory=
create_llvm_cmake_factory(builder_type),
collapseRequests=True, locks=[llvm_build_locks[
llvm_branch + str(bits)].access('exclusive')], tags=
builder_type.builder_tags())
builder.builder_type = builder_type
yield builder
<|reserved_special_token_0|>
def create_schedulers():
for llvm_branch in LLVM_BRANCHES:
yield from create_llvm_scheduler(llvm_branch)
for halide_branch in HALIDE_BRANCHES:
yield from create_halide_scheduler(halide_branch)
<|reserved_special_token_0|>
class SafeGitHubEventHandler(GitHubEventHandler):
def handle_push(self, payload, event):
ref = payload['ref']
if re.match('^refs/(heads|tags)/(master|main|release/\\d+\\.x)$', ref):
return super().handle_push(payload, event)
else:
print(f'SafeGitHubEventHandler: ignoring push event for ref: {ref}'
)
return self.skip()
def handle_pull_request(self, payload, event):
pr = payload['pull_request']
try:
if any(label['name'] == 'skip_buildbots' for label in pr['labels']
):
return self.skip()
if any(r['login'] == 'halidebuildbots' for r in pr[
'requested_reviewers']):
if payload['action'] == 'review_requested':
payload['action'] = 'synchronize'
return super().handle_pull_request(payload, event)
trusted_repos = 'halide/Halide', 'CodeLinaro/Halide'
if pr['head']['repo']['full_name'] not in trusted_repos:
return self.skip()
return super().handle_pull_request(payload, event)
except KeyError as e:
print(f'SafeGitHubEventHandler: malformed payload: {payload}')
print(f'SafeGitHubEventHandler: missing key "{e}"')
return self.skip()
@staticmethod
def skip():
return [], 'git'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Purpose(Enum):
halide_nightly = 1
halide_testbranch = 2
llvm_nightly = 3
class BuildSystem(Enum):
make = 0
cmake = 1
class BuilderType:
"""A class to encapsulate the settings for a specific Builder.
(Do not confuse with CMake's 'BUILD_TYPE', which is something else.)
It includes:
- Halide 'target' in the form of arch-bits-os
- LLVM branch to be used
- CMake vs Make
- halide-nightly vs halide-testbranch vs llvm-nightly
- sanitizers vs none
It doesn't currently include any 'features' because we don't currently
bake any in at build time.
It doesn't currently include the C++ compiler used (eg gcc7 vs gcc8 vs clang),
mainly because we currently never test with multiple compilers for a given
setup. (If we ever need to do so, compiler should be added to this.)
"""
def __init__(self, arch, bits, os, halide_branch, llvm_branch, purpose,
sanitizer=None, buildsystem=BuildSystem.cmake):
assert arch in ['arm', 'x86']
assert bits in [32, 64]
assert os in ['linux', 'windows', 'osx']
assert llvm_branch in LLVM_BRANCHES, f'{llvm_branch} not recognized'
self.arch = arch
self.bits = bits
self.os = os
self.halide_branch = halide_branch
self.llvm_branch = llvm_branch
self.buildsystem = buildsystem
self.purpose = purpose
self.sanitizer = sanitizer
if self.halide_branch:
assert self.purpose != Purpose.llvm_nightly
assert self.halide_branch in HALIDE_BRANCHES, f'unknown branch {self.halide_branch}'
assert self.purpose == Purpose.halide_testbranch or self.llvm_branch in LLVM_FOR_HALIDE[
self.halide_branch]
else:
assert self.purpose == Purpose.llvm_nightly
if self.sanitizer:
assert self.sanitizer in _SANITIZERS
def handles_python(self):
if self.bits == 32:
return False
if self.arch == 'arm' and self.os == 'linux':
return False
if self.sanitizer_preset() is not None:
return False
return True
def handles_sanitizers(self):
if self.buildsystem != BuildSystem.cmake:
return False
return (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux' and self.llvm_branch == LLVM_MAIN)
def sanitizer_preset(self):
if self.handles_sanitizers():
if self.sanitizer == 'asan':
return 'linux-x64-asan'
if self.sanitizer == 'fuzzer':
return 'linux-x64-fuzzer'
return None
def handles_riscv(self):
return self.llvm_branch not in [LLVM_RELEASE_15]
def handles_hexagon(self):
return (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux' and self.llvm_branch == LLVM_MAIN)
def handles_wasm(self):
is_linux_x64 = (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux')
return self.llvm_branch == LLVM_MAIN and (is_linux_x64 or self.os ==
'osx')
def handles_wasm_wabt(self):
return self.handles_wasm()
def handles_wasm_v8(self):
return self.handles_wasm() and self.os == 'linux'
def has_nvidia(self):
return self.arch == 'x86' and self.bits == 64 and self.os in ['windows'
, 'linux']
def handles_vulkan(self):
return False
def handles_webgpu(self):
return self.os == 'osx' and self.halide_branch not in [
HALIDE_RELEASE_15]
def has_tflite(self):
if self.arch == 'x86' and self.bits == 64 and self.os == 'linux':
return True
if self.arch == 'arm' and self.bits == 64 and self.os == 'osx':
return True
return False
def has_ccache(self):
return self.os in ['osx', 'linux']
def halide_target(self):
return '%s-%d-%s' % (self.arch, self.bits, self.os)
def llvm_builder_label(self):
return 'llvm-%s-%s' % (LLVM_BRANCHES[self.llvm_branch].version.
major, self.halide_target())
def halide_builder_label(self):
a = ['halide']
if self.sanitizer:
a.append(self.sanitizer)
if self.purpose == Purpose.halide_testbranch:
a.append('testbranch')
elif self.purpose == Purpose.halide_nightly:
a.append('nightly')
a.append(self.halide_branch)
if self.halide_branch == HALIDE_MAIN:
a.append(f'llvm{LLVM_BRANCHES[self.llvm_branch].version.major}')
a.append(self.halide_target())
a.append(self.buildsystem.name)
return '-'.join(a)
def builder_label(self):
if self.purpose == Purpose.llvm_nightly:
return self.llvm_builder_label()
else:
return self.halide_builder_label()
def builder_tags(self):
return self.builder_label().split('-')
def get_worker_names(self):
return [n for n, cfg in _WORKERS if self.arch == cfg.arch and self.
bits in cfg.bits and self.os == cfg.os]
def __str__(self):
return self.halide_target()
def get_builddir_subpath(subpath):
return Transform(lambda x: x.replace('\\', '/'), Interpolate(
f'%(prop:builddir)s/{subpath}'))
def get_llvm_toolchains_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-toolchains', *subpaths))
def get_llvm_source_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-project', *subpaths))
def get_llvm_build_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-build', *subpaths))
def get_llvm_install_path(builder_type, *subpaths):
llvm_workdir = builder_type.llvm_builder_label()
return get_builddir_subpath(os.path.join('..', llvm_workdir,
'llvm-install', *subpaths))
def get_halide_source_path(*subpaths):
return get_builddir_subpath(os.path.join('halide-source', *subpaths))
<|reserved_special_token_0|>
def get_halide_install_path(builder_type, *subpaths):
s = 'halide-install'
if builder_type.sanitizer:
s += '-' + builder_type.sanitizer
return get_builddir_subpath(os.path.join(s, *subpaths))
<|reserved_special_token_0|>
def get_msvc_config_steps(factory, builder_type):
arch_for_bits = {(32): 'x64_x86', (64): 'x64'}
vcvarsall = 'vcvarsall.bat %s && set' % arch_for_bits[builder_type.bits]
vcvarsdir = (
'C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Auxiliary/Build'
)
def save_interesting_env_vars(rc, stdout, stderr):
d = {}
for line in stdout.split('\n'):
match = re.match('^([a-zA-Z0-9_-]+)=(.*)$', line.strip())
if match:
key = match.group(1).upper()
value = match.group(2)
if key in VCVARSALL_ENV_VARS:
d[key] = value
return {'env': d}
factory.addStep(SetPropertyFromCommand(name='Run VcVarsAll',
description='Run VcVarsAll', workdir=vcvarsdir, locks=[
performance_lock.access('counting')], haltOnFailure=True, command=
vcvarsall, extract_fn=save_interesting_env_vars))
def merge_renderable(_base, _extn):
@renderer
@defer.inlineCallbacks
def render(props):
base = yield props.render(_base)
extn = yield props.render(_extn)
base.update(extn)
return base
return render
def get_distrib_name(_version, target, ext):
@renderer
@defer.inlineCallbacks
def render(props):
rev = props.getProperty('got_revision')['halide']
version = yield props.render(_version)
return os.path.join(ARTIFACTS_DIR,
f'Halide-{version}-{target}-{rev}.{ext}')
return render
def get_cmake_generator(builder_type):
return 'Ninja'
<|reserved_special_token_0|>
def get_ctest_options(builder_type, build_dir):
assert builder_type.purpose != Purpose.llvm_nightly
if builder_type.sanitizer:
assert builder_type.handles_sanitizers()
return {'build_config': builder_type.sanitizer_preset()}
else:
return {'build_config': 'Release'}
def get_halide_cmake_definitions(builder_type, halide_target='host',
wasm_jit='wabt'):
cmake_definitions = {'Clang_DIR': get_llvm_install_path(builder_type,
'lib/cmake/clang'), 'CMAKE_INSTALL_PREFIX': get_halide_install_path
(builder_type), 'Halide_TARGET': halide_target, 'LLD_DIR':
get_llvm_install_path(builder_type, 'lib/cmake/lld'), 'LLVM_DIR':
get_llvm_install_path(builder_type, 'lib/cmake/llvm'), 'LLVM_ROOT':
get_llvm_install_path(builder_type), 'WITH_PYTHON_BINDINGS': 'ON' if
builder_type.handles_python() else 'OFF', 'WITH_TEST_FUZZ': 'ON' if
builder_type.sanitizer == 'fuzzer' else 'OFF'}
if builder_type.sanitizer and builder_type.handles_sanitizers():
pass
else:
cmake_definitions['CMAKE_BUILD_TYPE'] = 'Release'
if builder_type.has_ccache() and not builder_type.sanitizer_preset():
cmake_definitions['Halide_CCACHE_BUILD'] = 'ON'
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path(
'cmake', 'toolchain.linux-arm32.cmake')
if builder_type.os == 'windows':
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(
'%(prop:VCPKG_ROOT)s/scripts/buildsystems/vcpkg.cmake')
cmake_definitions['pybind11_DIR'] = Interpolate(
'%(prop:VIRTUAL_ENV)s/share/cmake/pybind11')
if 'wasm' in halide_target:
cmake_definitions['WITH_PYTHON_BINDINGS'] = 'OFF'
if builder_type.handles_wasm() and halide_target.startswith('wasm-'):
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(
'%(prop:EMSDK)s/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake'
)
cmake_definitions['NODE_JS_EXECUTABLE'] = Property(
'HALIDE_NODE_JS_PATH')
if wasm_jit == 'v8':
cmake_definitions['WITH_WABT'] = 'OFF'
cmake_definitions['WITH_V8'] = 'ON'
cmake_definitions['V8_INCLUDE_PATH'
] = '/home/halidenightly/v8/v8/include'
cmake_definitions['V8_LIB_PATH'] = (
'/home/halidenightly/v8/v8/out/x64.release.static/obj/libv8_monolith.a'
)
elif wasm_jit == 'wabt':
cmake_definitions['WITH_WABT'] = 'ON'
cmake_definitions['WITH_V8'] = 'OFF'
cmake_definitions['V8_INCLUDE_PATH'] = ''
cmake_definitions['V8_LIB_PATH'] = ''
else:
assert False, 'Unknown wasm jit ' + str(wasm_jit)
if builder_type.handles_webgpu() and 'webgpu' in halide_target:
cmake_definitions['WEBGPU_NODE_BINDINGS'] = Property(
'HL_WEBGPU_NODE_BINDINGS')
cmake_definitions['WEBGPU_NATIVE_LIB'] = Property(
'HL_WEBGPU_NATIVE_LIB')
if builder_type.handles_hexagon() and 'hvx' in halide_target:
cmake_definitions['Halide_BUILD_HEXAGON_REMOTE_RUNTIME'] = 'ON'
return cmake_definitions
<|reserved_special_token_0|>
def get_llvm_cmake_definitions(builder_type):
definitions = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_INSTALL_PREFIX':
get_llvm_install_path(builder_type), 'LLVM_BUILD_32_BITS': 'ON' if
builder_type.bits == 32 else 'OFF', 'LLVM_ENABLE_ASSERTIONS': 'ON',
'LLVM_ENABLE_BINDINGS': 'OFF', 'LLVM_ENABLE_CURL': 'OFF',
'LLVM_ENABLE_DIA_SDK': 'OFF', 'LLVM_ENABLE_HTTPLIB': 'OFF',
'LLVM_ENABLE_IDE': 'OFF', 'LLVM_ENABLE_LIBXML2': 'OFF',
'LLVM_ENABLE_OCAMLDOC': 'OFF', 'LLVM_ENABLE_RTTI': 'ON',
'LLVM_ENABLE_TERMINFO': 'OFF', 'LLVM_ENABLE_WARNINGS': 'OFF',
'LLVM_ENABLE_ZLIB': 'ON', 'LLVM_ENABLE_ZSTD': 'OFF',
'LLVM_INCLUDE_BENCHMARKS': 'OFF', 'LLVM_INCLUDE_EXAMPLES': 'OFF',
'LLVM_INCLUDE_TESTS': 'OFF', 'LLVM_TARGETS_TO_BUILD':
'X86;ARM;NVPTX;AArch64;Hexagon;PowerPC;WebAssembly'}
if builder_type.bits == 32:
definitions['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = 'NEVER'
if builder_type.handles_riscv():
definitions['LLVM_TARGETS_TO_BUILD'] += ';RISCV'
if builder_type.handles_sanitizers():
definitions['LLVM_ENABLE_RUNTIMES'
] = 'compiler-rt;libcxx;libcxxabi;libunwind'
definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld;clang-tools-extra'
else:
definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld'
if builder_type.os != 'windows':
definitions['CMAKE_CXX_FLAGS'] = '-Wno-psabi'
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
definitions['CMAKE_TOOLCHAIN_FILE'] = get_llvm_toolchains_path(
'toolchain.linux-arm32.cmake')
definitions['LLVM_TARGET_ARCH'] = 'ARM'
definitions['LLVM_DEFAULT_TARGET_TRIPLE'] = 'arm-linux-gnueabihf'
if (builder_type.arch == 'x86' and builder_type.bits == 32 and
builder_type.os == 'linux'):
definitions['CMAKE_FIND_ROOT_PATH'] = '/usr/lib/i386-linux-gnu'
definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'
if builder_type.os == 'osx':
definitions['LLVM_ENABLE_SUPPORT_XCODE_SIGNPOSTS'] = 'FORCE_OFF'
if builder_type.has_ccache():
definitions['LLVM_CCACHE_BUILD'] = 'ON'
return definitions
def extend_property(dict_name, **kwargs):
@renderer
def render(props):
table = props.getProperty(dict_name, default={})
table.update(kwargs)
return table
return render
def add_env_setup_step(factory, builder_type, enable_ccache=False):
if builder_type.os == 'windows':
get_msvc_config_steps(factory, builder_type)
cxx = 'c++'
cc = 'cc'
ld = 'ld'
if builder_type.os == 'linux':
cc = 'gcc-9'
cxx = 'g++-9'
ld = 'ld'
if builder_type.arch == 'x86' and builder_type.bits == 32:
cxx += ' -m32'
cc += ' -m32'
ld += ' -melf_i386'
elif builder_type.os == 'windows':
cxx = 'cl.exe'
cc = 'cl.exe'
if enable_ccache and builder_type.has_ccache():
cxx = 'ccache ' + cxx
cc = 'ccache ' + cc
env = {'CC': cc, 'CXX': cxx, 'LD': ld}
factory.addStep(SetPropertiesFromEnv(name='Read worker environment',
variables=['EMSDK', 'HALIDE_NODE_JS_PATH', 'HL_HEXAGON_TOOLS',
'HL_WEBGPU_NATIVE_LIB', 'HL_WEBGPU_NODE_BINDINGS',
'LD_LIBRARY_PATH', 'VIRTUAL_ENV', 'VCPKG_ROOT']))
vcpkg_root = Property('VCPKG_ROOT', default=None)
if builder_type.handles_hexagon():
hexagon_remote_bin = get_halide_build_path('src', 'runtime',
'hexagon_remote')
env['HL_HEXAGON_SIM_REMOTE'] = Transform(os.path.join,
hexagon_remote_bin, 'hexagon', 'bin', 'hexagon_sim_remote')
env['HL_HEXAGON_SIM_CYCLES'] = '1'
env['LD_LIBRARY_PATH'] = [hexagon_remote_bin, Interpolate(
'%(prop:HL_HEXAGON_TOOLS)s/lib/iss')]
env['HEXAGON_SDK_ROOT'] = Interpolate(
'%(prop:HL_HEXAGON_TOOLS)s/../../../..')
if builder_type.has_nvidia() and builder_type.handles_vulkan():
env['VK_INSTANCE_LAYERS'] = 'VK_LAYER_KHRONOS_validation'
if builder_type.os == 'osx':
env['METAL_DEVICE_WRAPPER_TYPE'] = '1'
if builder_type.os == 'windows':
vcpkg_root = Property('VCPKG_ROOT', default='C:/vcpkg')
env['VCPKG_ROOT'] = vcpkg_root
env['CUDA_CACHE_DISABLE'] = '1'
env['HL_DISABLE_WINDOWS_ABORT_DIALOG'] = '1'
factory.addStep(SetProperties(name='Initialize environment', properties
=dict(env=extend_property('env', **env), VCPKG_ROOT=vcpkg_root)))
@renderer
def get_llvm_latest_commit(props):
build_dir = props.getProperty('builddir')
assert not isinstance(build_dir, dict)
build_dir = build_dir.replace('\\', '/')
return (
'cd %s/llvm-project && git log -1 > %s/llvm-install/llvm_latest_commit.txt'
% (build_dir, build_dir))
<|reserved_special_token_0|>
def add_halide_cmake_build_steps(factory, builder_type):
source_dir = get_halide_source_path()
build_dir = get_halide_build_path()
install_dir = get_halide_install_path(builder_type)
factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[
performance_lock.access('counting')], dir=build_dir, haltOnFailure=
False))
factory.addStep(MakeDirectory(name='Make Halide Build Dir', locks=[
performance_lock.access('counting')], dir=build_dir, haltOnFailure=
False))
factory.addStep(RemoveDirectory(name='Remove Halide Install Dir', locks
=[performance_lock.access('counting')], dir=install_dir,
haltOnFailure=False))
factory.addStep(MakeDirectory(name='Make Halide Install Dir', locks=[
performance_lock.access('counting')], dir=install_dir,
haltOnFailure=False))
factory.addStep(CMake(name='Configure Halide', description=
'Configure Halide', locks=[performance_lock.access('counting')],
haltOnFailure=True, workdir=build_dir, env=Property('env'), path=
source_dir, generator=get_cmake_generator(builder_type),
definitions=get_halide_cmake_definitions(builder_type), options=
get_halide_cmake_options(builder_type, build_dir)))
factory.addStep(ShellCommand(name='Build Halide', description=
'Build Halide', locks=[performance_lock.access('counting')],
haltOnFailure=True, workdir=build_dir, env=Property('env'), command
=get_cmake_build_command(builder_type, build_dir, targets=['all',
'install'])))
def add_halide_cmake_package_steps(factory, builder_type):
source_dir = get_halide_source_path()
target = builder_type.halide_target()
ext = 'zip' if builder_type.os == 'windows' else 'tar.gz'
factory.addStep(SetPropertiesFromCMakeCache(name=
'Get Halide package version', workdir=get_halide_build_path(),
props=['CMAKE_PROJECT_VERSION']))
extra_env = dict(Clang_DIR=get_llvm_install_path(builder_type,
'lib/cmake/clang'), LLD_DIR=get_llvm_install_path(builder_type,
'lib/cmake/lld'), LLVM_DIR=get_llvm_install_path(builder_type,
'lib/cmake/llvm'), Halide_VERSION=Property('CMAKE_PROJECT_VERSION'))
if builder_type.os == 'windows':
build_dir = get_halide_build_path('packaging_dir')
if builder_type.arch == 'arm':
arch = 'ARM' if builder_type.bits == 32 else 'ARM64'
else:
arch = 'Win32' if builder_type.bits == 32 else 'x64'
cmd = [get_halide_source_path('packaging/zip/package.bat'),
source_dir, build_dir, arch]
else:
build_dir = get_halide_build_path()
cmd = [get_halide_source_path('packaging/tgz/package.sh'),
source_dir, build_dir]
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
extra_env['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake',
'toolchain.linux-arm32.cmake')
factory.addStep(ShellCommand(name='Package Halide', description=
'Package Halide', workdir=build_dir, env=extend_property('env', **
extra_env), locks=[performance_lock.access('counting')],
haltOnFailure=True, command=cmd))
factory.addStep(FileUpload(name='Upload Halide package', workersrc=
Interpolate(f'Halide-%(prop:CMAKE_PROJECT_VERSION)s-{target}.{ext}'
), locks=[performance_lock.access('counting')], haltOnFailure=True,
workdir=build_dir, mode=420, masterdest=get_distrib_name(Property(
'CMAKE_PROJECT_VERSION'), target, ext)))
def pkg_version_and_target(path: Path):
match = re.match('^(.*)-[a-f0-9]+\\.(tar\\.gz|tgz|zip)', path.name)
return match.group(1) if match else None
factory.addStep(CleanOldFiles(name='Clean old releases', workdir=
ARTIFACTS_DIR, locks=[performance_lock.access('counting')], groupfn
=pkg_version_and_target))
def get_gpu_dsp_targets(builder_type):
if builder_type.sanitizer_preset() is not None:
return
if builder_type.has_nvidia():
yield 'host-cuda', False
yield 'host-opencl', False
if builder_type.handles_vulkan():
yield 'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13', False
if builder_type.handles_webgpu():
yield 'host-webgpu', False
if builder_type.os == 'osx':
yield 'host-metal', False
if builder_type.handles_hexagon():
yield 'host-hvx', True
def get_test_labels(builder_type):
targets = defaultdict(list)
preset = builder_type.sanitizer_preset()
if preset and 'fuzz' in preset:
targets['host'].extend(['fuzz'])
return targets
targets['host'].extend(['internal', 'correctness', 'generator',
'autoschedulers_cpu', 'error', 'warning', 'apps', 'performance',
'tutorial'])
if preset:
return targets
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
targets['host'].remove('internal')
targets['host'].remove('generator')
if builder_type.handles_python():
targets['host'].extend(['python'])
if builder_type.arch == 'x86':
t = 'x86-%d-%s' % (builder_type.bits, builder_type.os)
targets[t].extend(['correctness'])
if builder_type.bits == 64:
targets['%s-sse41' % t].extend(['correctness'])
for t, is_simulator in get_gpu_dsp_targets(builder_type):
if t == 'host-webgpu':
targets[t].extend(['correctness', 'generator'])
else:
targets[t].extend(['correctness', 'generator', 'apps'])
if 'cuda' in t:
targets[t].extend(['autoschedulers_cuda'])
if 'hvx' not in t:
targets[t].extend(['autoschedulers_gpu'])
if not is_simulator:
targets[t].extend(['performance'])
if builder_type.has_nvidia():
targets['host-cuda-opencl'].extend(['correctness_multi_gpu'])
if builder_type.handles_vulkan():
targets[
'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
if builder_type.handles_wasm():
if builder_type.handles_wasm_wabt():
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/wabt'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
if builder_type.handles_wasm_v8():
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/v8'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int-wasm_threads'
].extend(['generator', 'apps'])
if builder_type.handles_webgpu():
targets['wasm-32-wasmrt-webgpu'].extend(['generator'])
return targets
<|reserved_special_token_0|>
def short_target(halide_target):
s = halide_target.split('-')
if len(s) == 1:
return s[0]
elif len(s) == 2:
return '%s-%s' % (s[0], s[1])
elif len(s) == 3:
return '%s-%s-%s' % (s[0], s[1], s[2])
elif len(s) > 3:
return '%s-%s-%s…' % (s[0], s[1], s[2])
else:
return '<unknown>'
<|reserved_special_token_0|>
def create_halide_make_factory(builder_type):
assert builder_type.os != 'windows'
make_threads = Property('WORKER_BUILD_PARALLELISM')
build_dir = get_halide_build_path()
factory = BuildFactory()
add_env_setup_step(factory, builder_type, enable_ccache=True)
add_get_halide_source_steps(factory, builder_type)
factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[
performance_lock.access('counting')], dir=build_dir))
target_label_pairs = [('host', 'build_tests')]
for halide_target, labels_for_target in get_test_labels(builder_type
).items():
if halide_target != 'host':
continue
_labels_to_skip = ['autoschedulers_cpu', 'autoschedulers_gpu',
'autoschedulers_cuda', 'performance', 'python']
if builder_type.bits == 32:
_labels_to_skip.extend(['autoschedulers_cpu',
'autoschedulers_gpu', 'autoschedulers_cuda'])
for label in labels_for_target:
if label in _labels_to_skip:
continue
target_label_pairs.append((halide_target, label))
for halide_target, label in target_label_pairs:
env = extend_property('env', LLVM_CONFIG=get_llvm_install_path(
builder_type, 'bin/llvm-config'), HL_TARGET=halide_target,
HL_JIT_TARGET=halide_target)
if is_time_critical_test(label):
p = 1
lock_mode = 'exclusive'
else:
p = make_threads
lock_mode = 'counting'
if label != 'build_tests':
label = 'test_%s' % label
factory.addStep(ShellCommand(name='make ' + label, description=
label + ' ' + halide_target, locks=[performance_lock.access(
lock_mode)], workdir=build_dir, env=env, haltOnFailure=False,
command=['make', '-f', get_halide_source_path('Makefile'), '-j',
p, label], timeout=3600))
return factory
def create_halide_cmake_factory(builder_type):
factory = BuildFactory()
add_env_setup_step(factory, builder_type)
add_get_halide_source_steps(factory, builder_type)
add_halide_cmake_build_steps(factory, builder_type)
add_halide_cmake_test_steps(factory, builder_type)
if builder_type.purpose == Purpose.halide_nightly:
add_halide_cmake_package_steps(factory, builder_type)
return factory
<|reserved_special_token_0|>
def create_halide_builder(arch, bits, os, halide_branch, llvm_branch,
purpose, buildsystem=BuildSystem.cmake):
sanitizers = [None]
if purpose != Purpose.halide_nightly:
sanitizers.extend(_SANITIZERS)
for san in sanitizers:
builder_type = BuilderType(arch, bits, os, halide_branch,
llvm_branch, purpose, san, buildsystem)
if san and purpose == Purpose.llvm_nightly:
continue
if san and not builder_type.handles_sanitizers():
continue
workers = builder_type.get_worker_names()
builder = BuilderConfig(name=builder_type.builder_label(),
workernames=workers, factory=create_halide_factory(builder_type
), collapseRequests=True, locks=[llvm_build_locks[llvm_branch +
str(bits)].access('counting')], tags=builder_type.builder_tags())
builder.builder_type = builder_type
yield builder
def create_halide_builders():
for arch, bits, os in get_interesting_halide_targets():
for halide_branch in HALIDE_NIGHTLIES:
for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:
yield from create_halide_builder(arch, bits, os,
halide_branch, llvm_branch, Purpose.halide_nightly)
for halide_branch in _HALIDE_RELEASES:
for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:
yield from create_halide_builder(arch, bits, os,
halide_branch, llvm_branch, Purpose.halide_testbranch)
yield from create_halide_builder(arch, bits, os, HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch)
yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('x86', 32, 'linux', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('x86', 64, 'osx', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('arm', 64, 'osx', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
for llvm_branch in LLVM_BRANCHES:
if abs(LLVM_BRANCHES[llvm_branch].version.major - LLVM_BRANCHES[
LLVM_MAIN].version.major) in [1, 2]:
yield from create_halide_builder('x86', 64, 'linux',
HALIDE_MAIN, llvm_branch, Purpose.halide_testbranch)
def create_halide_scheduler(halide_branch):
def is_halide_base_branch(br):
return any(br == hl.ref for hl in HALIDE_BRANCHES.values())
def is_halide_pr_branch(br):
return not is_halide_base_branch(br)
def github_base_branch_matches(change):
ref = change.properties.getProperty('basename')
return ref == HALIDE_BRANCHES[halide_branch].ref
builders = [b for b in c['builders'] if b.builder_type.halide_branch ==
halide_branch and b.builder_type.purpose == Purpose.halide_nightly]
if builders:
builder_names = [str(b.name) for b in builders]
yield Nightly(name='halide-package-' + halide_branch, codebases=[
'halide'], builderNames=builder_names, change_filter=
ChangeFilter(codebase='halide'), hour=4, minute=0)
yield ForceScheduler(name='force-halide-nightly-' + halide_branch,
builderNames=builder_names, codebases=['halide'])
builders = [b for b in c['builders'] if b.builder_type.halide_branch ==
halide_branch and b.builder_type.purpose == Purpose.halide_testbranch]
if builders:
builder_names = [str(b.name) for b in builders]
yield AnyBranchScheduler(name='halide-testbranch-' + halide_branch,
codebases=['halide'], change_filter=ChangeFilter(category=
'pull', codebase='halide', branch_fn=is_halide_pr_branch,
filter_fn=github_base_branch_matches), treeStableTimer=60 * 5,
builderNames=builder_names)
yield ForceScheduler(name='force-halide-testbranch-' +
halide_branch, builderNames=builder_names, codebases=['halide'])
<|reserved_special_token_0|>
def create_llvm_builders():
for arch, bits, os in get_interesting_halide_targets():
for llvm_branch in LLVM_BRANCHES:
builder_type = BuilderType(arch, bits, os, None, llvm_branch,
Purpose.llvm_nightly)
for w in builder_type.get_worker_names():
label = builder_type.llvm_builder_label()
builder = BuilderConfig(name='%s/%s' % (label, w),
workerbuilddir=label, workernames=[w], factory=
create_llvm_cmake_factory(builder_type),
collapseRequests=True, locks=[llvm_build_locks[
llvm_branch + str(bits)].access('exclusive')], tags=
builder_type.builder_tags())
builder.builder_type = builder_type
yield builder
<|reserved_special_token_0|>
def create_builders():
yield from create_llvm_builders()
yield from create_halide_builders()
def create_schedulers():
for llvm_branch in LLVM_BRANCHES:
yield from create_llvm_scheduler(llvm_branch)
for halide_branch in HALIDE_BRANCHES:
yield from create_halide_scheduler(halide_branch)
<|reserved_special_token_0|>
def prioritize_builders(buildmaster, builders):
def importance(builder):
builder_type = builder.config.builder_type
assert builder_type
if builder_type.purpose == Purpose.llvm_nightly:
return 0
if builder_type.purpose == Purpose.halide_testbranch:
return 1
if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_15]:
return 2
if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_16]:
return 2
if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_MAIN]:
return 3
return 4
return list(sorted(builders, key=importance))
<|reserved_special_token_0|>
class SafeGitHubEventHandler(GitHubEventHandler):
def handle_push(self, payload, event):
ref = payload['ref']
if re.match('^refs/(heads|tags)/(master|main|release/\\d+\\.x)$', ref):
return super().handle_push(payload, event)
else:
print(f'SafeGitHubEventHandler: ignoring push event for ref: {ref}'
)
return self.skip()
def handle_pull_request(self, payload, event):
pr = payload['pull_request']
try:
if any(label['name'] == 'skip_buildbots' for label in pr['labels']
):
return self.skip()
if any(r['login'] == 'halidebuildbots' for r in pr[
'requested_reviewers']):
if payload['action'] == 'review_requested':
payload['action'] = 'synchronize'
return super().handle_pull_request(payload, event)
trusted_repos = 'halide/Halide', 'CodeLinaro/Halide'
if pr['head']['repo']['full_name'] not in trusted_repos:
return self.skip()
return super().handle_pull_request(payload, event)
except KeyError as e:
print(f'SafeGitHubEventHandler: malformed payload: {payload}')
print(f'SafeGitHubEventHandler: missing key "{e}"')
return self.skip()
@staticmethod
def skip():
return [], 'git'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def codebase_generator(chdict):
repo = chdict['repository']
assert repo in all_repositories, 'Codebase not found for chdict: ' + str(
chdict)
return all_repositories[repo]
<|reserved_special_token_0|>
class Purpose(Enum):
halide_nightly = 1
halide_testbranch = 2
llvm_nightly = 3
class BuildSystem(Enum):
make = 0
cmake = 1
class BuilderType:
"""A class to encapsulate the settings for a specific Builder.
(Do not confuse with CMake's 'BUILD_TYPE', which is something else.)
It includes:
- Halide 'target' in the form of arch-bits-os
- LLVM branch to be used
- CMake vs Make
- halide-nightly vs halide-testbranch vs llvm-nightly
- sanitizers vs none
It doesn't currently include any 'features' because we don't currently
bake any in at build time.
It doesn't currently include the C++ compiler used (eg gcc7 vs gcc8 vs clang),
mainly because we currently never test with multiple compilers for a given
setup. (If we ever need to do so, compiler should be added to this.)
"""
def __init__(self, arch, bits, os, halide_branch, llvm_branch, purpose,
sanitizer=None, buildsystem=BuildSystem.cmake):
assert arch in ['arm', 'x86']
assert bits in [32, 64]
assert os in ['linux', 'windows', 'osx']
assert llvm_branch in LLVM_BRANCHES, f'{llvm_branch} not recognized'
self.arch = arch
self.bits = bits
self.os = os
self.halide_branch = halide_branch
self.llvm_branch = llvm_branch
self.buildsystem = buildsystem
self.purpose = purpose
self.sanitizer = sanitizer
if self.halide_branch:
assert self.purpose != Purpose.llvm_nightly
assert self.halide_branch in HALIDE_BRANCHES, f'unknown branch {self.halide_branch}'
assert self.purpose == Purpose.halide_testbranch or self.llvm_branch in LLVM_FOR_HALIDE[
self.halide_branch]
else:
assert self.purpose == Purpose.llvm_nightly
if self.sanitizer:
assert self.sanitizer in _SANITIZERS
def handles_python(self):
if self.bits == 32:
return False
if self.arch == 'arm' and self.os == 'linux':
return False
if self.sanitizer_preset() is not None:
return False
return True
def handles_sanitizers(self):
if self.buildsystem != BuildSystem.cmake:
return False
return (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux' and self.llvm_branch == LLVM_MAIN)
def sanitizer_preset(self):
if self.handles_sanitizers():
if self.sanitizer == 'asan':
return 'linux-x64-asan'
if self.sanitizer == 'fuzzer':
return 'linux-x64-fuzzer'
return None
def handles_riscv(self):
return self.llvm_branch not in [LLVM_RELEASE_15]
def handles_hexagon(self):
return (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux' and self.llvm_branch == LLVM_MAIN)
def handles_wasm(self):
is_linux_x64 = (self.arch == 'x86' and self.bits == 64 and self.os ==
'linux')
return self.llvm_branch == LLVM_MAIN and (is_linux_x64 or self.os ==
'osx')
def handles_wasm_wabt(self):
return self.handles_wasm()
def handles_wasm_v8(self):
return self.handles_wasm() and self.os == 'linux'
def has_nvidia(self):
return self.arch == 'x86' and self.bits == 64 and self.os in ['windows'
, 'linux']
def handles_vulkan(self):
return False
def handles_webgpu(self):
return self.os == 'osx' and self.halide_branch not in [
HALIDE_RELEASE_15]
def has_tflite(self):
if self.arch == 'x86' and self.bits == 64 and self.os == 'linux':
return True
if self.arch == 'arm' and self.bits == 64 and self.os == 'osx':
return True
return False
def has_ccache(self):
return self.os in ['osx', 'linux']
def halide_target(self):
return '%s-%d-%s' % (self.arch, self.bits, self.os)
def llvm_builder_label(self):
return 'llvm-%s-%s' % (LLVM_BRANCHES[self.llvm_branch].version.
major, self.halide_target())
def halide_builder_label(self):
a = ['halide']
if self.sanitizer:
a.append(self.sanitizer)
if self.purpose == Purpose.halide_testbranch:
a.append('testbranch')
elif self.purpose == Purpose.halide_nightly:
a.append('nightly')
a.append(self.halide_branch)
if self.halide_branch == HALIDE_MAIN:
a.append(f'llvm{LLVM_BRANCHES[self.llvm_branch].version.major}')
a.append(self.halide_target())
a.append(self.buildsystem.name)
return '-'.join(a)
def builder_label(self):
if self.purpose == Purpose.llvm_nightly:
return self.llvm_builder_label()
else:
return self.halide_builder_label()
def builder_tags(self):
return self.builder_label().split('-')
def get_worker_names(self):
return [n for n, cfg in _WORKERS if self.arch == cfg.arch and self.
bits in cfg.bits and self.os == cfg.os]
def __str__(self):
return self.halide_target()
def get_builddir_subpath(subpath):
return Transform(lambda x: x.replace('\\', '/'), Interpolate(
f'%(prop:builddir)s/{subpath}'))
def get_llvm_toolchains_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-toolchains', *subpaths))
def get_llvm_source_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-project', *subpaths))
def get_llvm_build_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-build', *subpaths))
def get_llvm_install_path(builder_type, *subpaths):
llvm_workdir = builder_type.llvm_builder_label()
return get_builddir_subpath(os.path.join('..', llvm_workdir,
'llvm-install', *subpaths))
def get_halide_source_path(*subpaths):
return get_builddir_subpath(os.path.join('halide-source', *subpaths))
<|reserved_special_token_0|>
def get_halide_install_path(builder_type, *subpaths):
s = 'halide-install'
if builder_type.sanitizer:
s += '-' + builder_type.sanitizer
return get_builddir_subpath(os.path.join(s, *subpaths))
def add_get_halide_source_steps(factory, builder_type):
factory.addStep(GitHub(name='Get Halide source', locks=[
performance_lock.access('counting')], codebase='halide', workdir=
get_halide_source_path(), repourl=
'https://github.com/halide/Halide.git', branch=HALIDE_BRANCHES[
builder_type.halide_branch].ref, mode='incremental'))
def add_get_llvm_source_steps(factory, builder_type):
factory.addStep(Git(name=
f'Get LLVM {LLVM_BRANCHES[builder_type.llvm_branch].version.major}',
locks=[performance_lock.access('counting')], codebase='llvm',
workdir=get_llvm_source_path(), repourl=
'https://github.com/llvm/llvm-project.git', branch=LLVM_BRANCHES[
builder_type.llvm_branch].ref, mode='incremental'))
toolchains_dir = get_llvm_toolchains_path()
factory.addStep(MakeDirectory(name='Make CMake toolchain directory',
locks=[performance_lock.access('counting')], dir=toolchains_dir,
haltOnFailure=False))
factory.addStep(FileDownload(name='Download CMake toolchains',
mastersrc='toolchain.linux-arm32.cmake', workerdest=
'toolchain.linux-arm32.cmake', workdir=toolchains_dir, locks=[
performance_lock.access('counting')], haltOnFailure=True, mode=420))
<|reserved_special_token_0|>
def get_msvc_config_steps(factory, builder_type):
arch_for_bits = {(32): 'x64_x86', (64): 'x64'}
vcvarsall = 'vcvarsall.bat %s && set' % arch_for_bits[builder_type.bits]
vcvarsdir = (
'C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Auxiliary/Build'
)
def save_interesting_env_vars(rc, stdout, stderr):
d = {}
for line in stdout.split('\n'):
match = re.match('^([a-zA-Z0-9_-]+)=(.*)$', line.strip())
if match:
key = match.group(1).upper()
value = match.group(2)
if key in VCVARSALL_ENV_VARS:
d[key] = value
return {'env': d}
factory.addStep(SetPropertyFromCommand(name='Run VcVarsAll',
description='Run VcVarsAll', workdir=vcvarsdir, locks=[
performance_lock.access('counting')], haltOnFailure=True, command=
vcvarsall, extract_fn=save_interesting_env_vars))
def merge_renderable(_base, _extn):
@renderer
@defer.inlineCallbacks
def render(props):
base = yield props.render(_base)
extn = yield props.render(_extn)
base.update(extn)
return base
return render
def get_distrib_name(_version, target, ext):
@renderer
@defer.inlineCallbacks
def render(props):
rev = props.getProperty('got_revision')['halide']
version = yield props.render(_version)
return os.path.join(ARTIFACTS_DIR,
f'Halide-{version}-{target}-{rev}.{ext}')
return render
def get_cmake_generator(builder_type):
return 'Ninja'
<|reserved_special_token_0|>
def get_halide_cmake_options(builder_type, build_dir):
options = []
if builder_type.sanitizer and builder_type.purpose != Purpose.llvm_nightly:
assert builder_type.handles_sanitizers()
options.append('--preset=%s' % builder_type.sanitizer_preset())
options += ['-B', build_dir]
return options
def get_ctest_options(builder_type, build_dir):
assert builder_type.purpose != Purpose.llvm_nightly
if builder_type.sanitizer:
assert builder_type.handles_sanitizers()
return {'build_config': builder_type.sanitizer_preset()}
else:
return {'build_config': 'Release'}
def get_halide_cmake_definitions(builder_type, halide_target='host',
wasm_jit='wabt'):
cmake_definitions = {'Clang_DIR': get_llvm_install_path(builder_type,
'lib/cmake/clang'), 'CMAKE_INSTALL_PREFIX': get_halide_install_path
(builder_type), 'Halide_TARGET': halide_target, 'LLD_DIR':
get_llvm_install_path(builder_type, 'lib/cmake/lld'), 'LLVM_DIR':
get_llvm_install_path(builder_type, 'lib/cmake/llvm'), 'LLVM_ROOT':
get_llvm_install_path(builder_type), 'WITH_PYTHON_BINDINGS': 'ON' if
builder_type.handles_python() else 'OFF', 'WITH_TEST_FUZZ': 'ON' if
builder_type.sanitizer == 'fuzzer' else 'OFF'}
if builder_type.sanitizer and builder_type.handles_sanitizers():
pass
else:
cmake_definitions['CMAKE_BUILD_TYPE'] = 'Release'
if builder_type.has_ccache() and not builder_type.sanitizer_preset():
cmake_definitions['Halide_CCACHE_BUILD'] = 'ON'
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path(
'cmake', 'toolchain.linux-arm32.cmake')
if builder_type.os == 'windows':
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(
'%(prop:VCPKG_ROOT)s/scripts/buildsystems/vcpkg.cmake')
cmake_definitions['pybind11_DIR'] = Interpolate(
'%(prop:VIRTUAL_ENV)s/share/cmake/pybind11')
if 'wasm' in halide_target:
cmake_definitions['WITH_PYTHON_BINDINGS'] = 'OFF'
if builder_type.handles_wasm() and halide_target.startswith('wasm-'):
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(
'%(prop:EMSDK)s/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake'
)
cmake_definitions['NODE_JS_EXECUTABLE'] = Property(
'HALIDE_NODE_JS_PATH')
if wasm_jit == 'v8':
cmake_definitions['WITH_WABT'] = 'OFF'
cmake_definitions['WITH_V8'] = 'ON'
cmake_definitions['V8_INCLUDE_PATH'
] = '/home/halidenightly/v8/v8/include'
cmake_definitions['V8_LIB_PATH'] = (
'/home/halidenightly/v8/v8/out/x64.release.static/obj/libv8_monolith.a'
)
elif wasm_jit == 'wabt':
cmake_definitions['WITH_WABT'] = 'ON'
cmake_definitions['WITH_V8'] = 'OFF'
cmake_definitions['V8_INCLUDE_PATH'] = ''
cmake_definitions['V8_LIB_PATH'] = ''
else:
assert False, 'Unknown wasm jit ' + str(wasm_jit)
if builder_type.handles_webgpu() and 'webgpu' in halide_target:
cmake_definitions['WEBGPU_NODE_BINDINGS'] = Property(
'HL_WEBGPU_NODE_BINDINGS')
cmake_definitions['WEBGPU_NATIVE_LIB'] = Property(
'HL_WEBGPU_NATIVE_LIB')
if builder_type.handles_hexagon() and 'hvx' in halide_target:
cmake_definitions['Halide_BUILD_HEXAGON_REMOTE_RUNTIME'] = 'ON'
return cmake_definitions
def get_cmake_build_command(builder_type, build_dir, targets=None):
cmd = ['ninja', '-C', build_dir, '-j', Property('WORKER_BUILD_PARALLELISM')
]
if builder_type.os == 'windows':
cmd.append('-v')
if targets:
cmd.extend(targets)
return cmd
def get_llvm_cmake_definitions(builder_type):
definitions = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_INSTALL_PREFIX':
get_llvm_install_path(builder_type), 'LLVM_BUILD_32_BITS': 'ON' if
builder_type.bits == 32 else 'OFF', 'LLVM_ENABLE_ASSERTIONS': 'ON',
'LLVM_ENABLE_BINDINGS': 'OFF', 'LLVM_ENABLE_CURL': 'OFF',
'LLVM_ENABLE_DIA_SDK': 'OFF', 'LLVM_ENABLE_HTTPLIB': 'OFF',
'LLVM_ENABLE_IDE': 'OFF', 'LLVM_ENABLE_LIBXML2': 'OFF',
'LLVM_ENABLE_OCAMLDOC': 'OFF', 'LLVM_ENABLE_RTTI': 'ON',
'LLVM_ENABLE_TERMINFO': 'OFF', 'LLVM_ENABLE_WARNINGS': 'OFF',
'LLVM_ENABLE_ZLIB': 'ON', 'LLVM_ENABLE_ZSTD': 'OFF',
'LLVM_INCLUDE_BENCHMARKS': 'OFF', 'LLVM_INCLUDE_EXAMPLES': 'OFF',
'LLVM_INCLUDE_TESTS': 'OFF', 'LLVM_TARGETS_TO_BUILD':
'X86;ARM;NVPTX;AArch64;Hexagon;PowerPC;WebAssembly'}
if builder_type.bits == 32:
definitions['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = 'ONLY'
definitions['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = 'NEVER'
if builder_type.handles_riscv():
definitions['LLVM_TARGETS_TO_BUILD'] += ';RISCV'
if builder_type.handles_sanitizers():
definitions['LLVM_ENABLE_RUNTIMES'
] = 'compiler-rt;libcxx;libcxxabi;libunwind'
definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld;clang-tools-extra'
else:
definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld'
if builder_type.os != 'windows':
definitions['CMAKE_CXX_FLAGS'] = '-Wno-psabi'
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
definitions['CMAKE_TOOLCHAIN_FILE'] = get_llvm_toolchains_path(
'toolchain.linux-arm32.cmake')
definitions['LLVM_TARGET_ARCH'] = 'ARM'
definitions['LLVM_DEFAULT_TARGET_TRIPLE'] = 'arm-linux-gnueabihf'
if (builder_type.arch == 'x86' and builder_type.bits == 32 and
builder_type.os == 'linux'):
definitions['CMAKE_FIND_ROOT_PATH'] = '/usr/lib/i386-linux-gnu'
definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'
if builder_type.os == 'osx':
definitions['LLVM_ENABLE_SUPPORT_XCODE_SIGNPOSTS'] = 'FORCE_OFF'
if builder_type.has_ccache():
definitions['LLVM_CCACHE_BUILD'] = 'ON'
return definitions
def extend_property(dict_name, **kwargs):
@renderer
def render(props):
table = props.getProperty(dict_name, default={})
table.update(kwargs)
return table
return render
def add_env_setup_step(factory, builder_type, enable_ccache=False):
if builder_type.os == 'windows':
get_msvc_config_steps(factory, builder_type)
cxx = 'c++'
cc = 'cc'
ld = 'ld'
if builder_type.os == 'linux':
cc = 'gcc-9'
cxx = 'g++-9'
ld = 'ld'
if builder_type.arch == 'x86' and builder_type.bits == 32:
cxx += ' -m32'
cc += ' -m32'
ld += ' -melf_i386'
elif builder_type.os == 'windows':
cxx = 'cl.exe'
cc = 'cl.exe'
if enable_ccache and builder_type.has_ccache():
cxx = 'ccache ' + cxx
cc = 'ccache ' + cc
env = {'CC': cc, 'CXX': cxx, 'LD': ld}
factory.addStep(SetPropertiesFromEnv(name='Read worker environment',
variables=['EMSDK', 'HALIDE_NODE_JS_PATH', 'HL_HEXAGON_TOOLS',
'HL_WEBGPU_NATIVE_LIB', 'HL_WEBGPU_NODE_BINDINGS',
'LD_LIBRARY_PATH', 'VIRTUAL_ENV', 'VCPKG_ROOT']))
vcpkg_root = Property('VCPKG_ROOT', default=None)
if builder_type.handles_hexagon():
hexagon_remote_bin = get_halide_build_path('src', 'runtime',
'hexagon_remote')
env['HL_HEXAGON_SIM_REMOTE'] = Transform(os.path.join,
hexagon_remote_bin, 'hexagon', 'bin', 'hexagon_sim_remote')
env['HL_HEXAGON_SIM_CYCLES'] = '1'
env['LD_LIBRARY_PATH'] = [hexagon_remote_bin, Interpolate(
'%(prop:HL_HEXAGON_TOOLS)s/lib/iss')]
env['HEXAGON_SDK_ROOT'] = Interpolate(
'%(prop:HL_HEXAGON_TOOLS)s/../../../..')
if builder_type.has_nvidia() and builder_type.handles_vulkan():
env['VK_INSTANCE_LAYERS'] = 'VK_LAYER_KHRONOS_validation'
if builder_type.os == 'osx':
env['METAL_DEVICE_WRAPPER_TYPE'] = '1'
if builder_type.os == 'windows':
vcpkg_root = Property('VCPKG_ROOT', default='C:/vcpkg')
env['VCPKG_ROOT'] = vcpkg_root
env['CUDA_CACHE_DISABLE'] = '1'
env['HL_DISABLE_WINDOWS_ABORT_DIALOG'] = '1'
factory.addStep(SetProperties(name='Initialize environment', properties
=dict(env=extend_property('env', **env), VCPKG_ROOT=vcpkg_root)))
@renderer
def get_llvm_latest_commit(props):
build_dir = props.getProperty('builddir')
assert not isinstance(build_dir, dict)
build_dir = build_dir.replace('\\', '/')
return (
'cd %s/llvm-project && git log -1 > %s/llvm-install/llvm_latest_commit.txt'
% (build_dir, build_dir))
<|reserved_special_token_0|>
def add_halide_cmake_build_steps(factory, builder_type):
source_dir = get_halide_source_path()
build_dir = get_halide_build_path()
install_dir = get_halide_install_path(builder_type)
factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[
performance_lock.access('counting')], dir=build_dir, haltOnFailure=
False))
factory.addStep(MakeDirectory(name='Make Halide Build Dir', locks=[
performance_lock.access('counting')], dir=build_dir, haltOnFailure=
False))
factory.addStep(RemoveDirectory(name='Remove Halide Install Dir', locks
=[performance_lock.access('counting')], dir=install_dir,
haltOnFailure=False))
factory.addStep(MakeDirectory(name='Make Halide Install Dir', locks=[
performance_lock.access('counting')], dir=install_dir,
haltOnFailure=False))
factory.addStep(CMake(name='Configure Halide', description=
'Configure Halide', locks=[performance_lock.access('counting')],
haltOnFailure=True, workdir=build_dir, env=Property('env'), path=
source_dir, generator=get_cmake_generator(builder_type),
definitions=get_halide_cmake_definitions(builder_type), options=
get_halide_cmake_options(builder_type, build_dir)))
factory.addStep(ShellCommand(name='Build Halide', description=
'Build Halide', locks=[performance_lock.access('counting')],
haltOnFailure=True, workdir=build_dir, env=Property('env'), command
=get_cmake_build_command(builder_type, build_dir, targets=['all',
'install'])))
def add_halide_cmake_package_steps(factory, builder_type):
source_dir = get_halide_source_path()
target = builder_type.halide_target()
ext = 'zip' if builder_type.os == 'windows' else 'tar.gz'
factory.addStep(SetPropertiesFromCMakeCache(name=
'Get Halide package version', workdir=get_halide_build_path(),
props=['CMAKE_PROJECT_VERSION']))
extra_env = dict(Clang_DIR=get_llvm_install_path(builder_type,
'lib/cmake/clang'), LLD_DIR=get_llvm_install_path(builder_type,
'lib/cmake/lld'), LLVM_DIR=get_llvm_install_path(builder_type,
'lib/cmake/llvm'), Halide_VERSION=Property('CMAKE_PROJECT_VERSION'))
if builder_type.os == 'windows':
build_dir = get_halide_build_path('packaging_dir')
if builder_type.arch == 'arm':
arch = 'ARM' if builder_type.bits == 32 else 'ARM64'
else:
arch = 'Win32' if builder_type.bits == 32 else 'x64'
cmd = [get_halide_source_path('packaging/zip/package.bat'),
source_dir, build_dir, arch]
else:
build_dir = get_halide_build_path()
cmd = [get_halide_source_path('packaging/tgz/package.sh'),
source_dir, build_dir]
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
extra_env['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake',
'toolchain.linux-arm32.cmake')
factory.addStep(ShellCommand(name='Package Halide', description=
'Package Halide', workdir=build_dir, env=extend_property('env', **
extra_env), locks=[performance_lock.access('counting')],
haltOnFailure=True, command=cmd))
factory.addStep(FileUpload(name='Upload Halide package', workersrc=
Interpolate(f'Halide-%(prop:CMAKE_PROJECT_VERSION)s-{target}.{ext}'
), locks=[performance_lock.access('counting')], haltOnFailure=True,
workdir=build_dir, mode=420, masterdest=get_distrib_name(Property(
'CMAKE_PROJECT_VERSION'), target, ext)))
def pkg_version_and_target(path: Path):
match = re.match('^(.*)-[a-f0-9]+\\.(tar\\.gz|tgz|zip)', path.name)
return match.group(1) if match else None
factory.addStep(CleanOldFiles(name='Clean old releases', workdir=
ARTIFACTS_DIR, locks=[performance_lock.access('counting')], groupfn
=pkg_version_and_target))
def get_gpu_dsp_targets(builder_type):
if builder_type.sanitizer_preset() is not None:
return
if builder_type.has_nvidia():
yield 'host-cuda', False
yield 'host-opencl', False
if builder_type.handles_vulkan():
yield 'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13', False
if builder_type.handles_webgpu():
yield 'host-webgpu', False
if builder_type.os == 'osx':
yield 'host-metal', False
if builder_type.handles_hexagon():
yield 'host-hvx', True
def get_test_labels(builder_type):
targets = defaultdict(list)
preset = builder_type.sanitizer_preset()
if preset and 'fuzz' in preset:
targets['host'].extend(['fuzz'])
return targets
targets['host'].extend(['internal', 'correctness', 'generator',
'autoschedulers_cpu', 'error', 'warning', 'apps', 'performance',
'tutorial'])
if preset:
return targets
if (builder_type.arch == 'arm' and builder_type.bits == 32 and
builder_type.os == 'linux'):
targets['host'].remove('internal')
targets['host'].remove('generator')
if builder_type.handles_python():
targets['host'].extend(['python'])
if builder_type.arch == 'x86':
t = 'x86-%d-%s' % (builder_type.bits, builder_type.os)
targets[t].extend(['correctness'])
if builder_type.bits == 64:
targets['%s-sse41' % t].extend(['correctness'])
for t, is_simulator in get_gpu_dsp_targets(builder_type):
if t == 'host-webgpu':
targets[t].extend(['correctness', 'generator'])
else:
targets[t].extend(['correctness', 'generator', 'apps'])
if 'cuda' in t:
targets[t].extend(['autoschedulers_cuda'])
if 'hvx' not in t:
targets[t].extend(['autoschedulers_gpu'])
if not is_simulator:
targets[t].extend(['performance'])
if builder_type.has_nvidia():
targets['host-cuda-opencl'].extend(['correctness_multi_gpu'])
if builder_type.handles_vulkan():
targets[
'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
if builder_type.handles_wasm():
if builder_type.handles_wasm_wabt():
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/wabt'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
if builder_type.handles_wasm_v8():
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/v8'
].extend(['internal', 'correctness', 'generator', 'error',
'warning'])
targets[
'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int-wasm_threads'
].extend(['generator', 'apps'])
if builder_type.handles_webgpu():
targets['wasm-32-wasmrt-webgpu'].extend(['generator'])
return targets
def is_time_critical_test(test):
return test in ['performance', 'autoschedulers_cpu',
'autoschedulers_gpu', 'autoschedulers_cuda']
def short_target(halide_target):
s = halide_target.split('-')
if len(s) == 1:
return s[0]
elif len(s) == 2:
return '%s-%s' % (s[0], s[1])
elif len(s) == 3:
return '%s-%s-%s' % (s[0], s[1], s[2])
elif len(s) > 3:
return '%s-%s-%s…' % (s[0], s[1], s[2])
else:
return '<unknown>'
def add_halide_cmake_test_steps(factory, builder_type):
parallelism = Property('WORKER_BUILD_PARALLELISM')
labels = get_test_labels(builder_type)
source_dir = get_halide_source_path()
build_dir = get_halide_build_path()
install_dir = get_halide_install_path(builder_type)
keys = list(labels.keys())
keys.remove('host')
keys.sort()
keys.insert(0, 'host')
for halide_target in keys:
env = extend_property('env', HL_JIT_TARGET=halide_target)
desc = 'T=%s' % short_target(halide_target)
test_labels = labels[halide_target]
wasm_jit = None
if halide_target.startswith('wasm-'):
halide_target, sep, wasm_jit = halide_target.partition('/')
env = extend_property('env', HL_JIT_TARGET=halide_target)
if wasm_jit:
desc = '%s + T=%s' % (wasm_jit, short_target(halide_target))
if not wasm_jit:
wasm_jit = 'wabt'
factory.addStep(CMake(name='Reconfigure for %s' % short_target(
halide_target), description='Reconfigure for %s' % desc, locks=
[performance_lock.access('counting')], haltOnFailure=True, env=
env, workdir=build_dir, path=source_dir, generator=
get_cmake_generator(builder_type), definitions=
get_halide_cmake_definitions(builder_type, halide_target=
halide_target, wasm_jit=wasm_jit), options=
get_halide_cmake_options(builder_type, build_dir)))
factory.addStep(ShellCommand(name='Rebuild for %s' % short_target(
halide_target), description='Rebuild Halide for %s' % desc,
locks=[performance_lock.access('counting')], haltOnFailure=True,
workdir=build_dir, env=env, command=get_cmake_build_command(
builder_type, build_dir, targets=['all', 'install'])))
do_apps = 'apps' in test_labels
if do_apps:
test_labels.remove('apps')
if not builder_type.handles_python():
if 'python' in test_labels:
test_labels.remove('python')
do_apps = False
parallel_test_labels = [test for test in test_labels if not
is_time_critical_test(test)]
exclusive_test_labels = [test for test in test_labels if
is_time_critical_test(test)]
if parallel_test_labels:
if len(parallel_test_labels) > 2:
test_set = ','.join([s[0] for s in parallel_test_labels])
else:
test_set = ', '.join(parallel_test_labels)
exclude_tests = []
if builder_type.os == 'windows' or builder_type.os == 'linux':
exclude_tests.append('interpolate')
exclude_tests.append('lens_blur')
exclude_tests.append('unsharp')
if builder_type.os == 'linux' or builder_type.bits == 32:
exclude_tests.append('tutorial_lesson_12')
if builder_type.sanitizer == 'asan':
exclude_tests.append('tutorial_lesson_19')
if builder_type.arch == 'arm' or builder_type.bits == 32:
exclude_tests.append('tutorial_lesson_19')
factory.addStep(CTest(name='Test %s %s' % (test_set, desc),
description='Test %s %s' % (test_set, desc), locks=[
performance_lock.access('counting')], workdir=build_dir,
env=env, timeout=3600, labels=parallel_test_labels,
exclude_tests=exclude_tests, jobs=parallelism, **
get_ctest_options(builder_type, build_dir)))
if exclusive_test_labels:
test_set = ','.join([s.replace('autoschedulers_', 'a_') for s in
exclusive_test_labels])
factory.addStep(CTest(name='Test %s %s' % (test_set, desc),
description='Test %s %s' % (test_set, desc), locks=[
performance_lock.access('exclusive')], workdir=build_dir,
env=env, timeout=3600, labels=exclusive_test_labels, **
get_ctest_options(builder_type, build_dir)))
if do_apps:
apps_build_dir = get_halide_build_path('apps')
apps_source_dir = get_halide_source_path('apps')
apps_cmake_defs = get_halide_cmake_definitions(builder_type,
halide_target=halide_target)
apps_cmake_defs['CMAKE_PREFIX_PATH'] = get_halide_install_path(
builder_type)
want_hannk = builder_type.has_tflite(
) and not halide_target.startswith('wasm-')
apps_cmake_defs['ENABLE_APPS_HANNK'
] = 'ON' if want_hannk else 'OFF'
factory.addStep(CMake(name='Configure apps for %s' % desc,
description='Configure apps for %s' % desc, locks=[
performance_lock.access('counting')], haltOnFailure=True,
env=env, workdir=apps_build_dir, path=apps_source_dir,
generator=get_cmake_generator(builder_type), definitions=
apps_cmake_defs, options=get_halide_cmake_options(
builder_type, build_dir)))
factory.addStep(ShellCommand(name='Build apps for %s' % desc,
description='Build apps for %s' % desc, locks=[
performance_lock.access('counting')], haltOnFailure=True,
workdir=apps_build_dir, env=env, command=
get_cmake_build_command(builder_type, apps_build_dir)))
exclude_tests = []
if builder_type.os == 'windows':
exclude_tests.append('lens_blur_filter')
factory.addStep(CTest(name='Test apps for %s' % desc,
description='Test apps for %s' % desc, locks=[
performance_lock.access('exclusive')], workdir=
apps_build_dir, env=env, timeout=3600, exclude_tests=
exclude_tests, exclude_labels=['slow_tests'], **
get_ctest_options(builder_type, apps_build_dir)))
def create_halide_make_factory(builder_type):
assert builder_type.os != 'windows'
make_threads = Property('WORKER_BUILD_PARALLELISM')
build_dir = get_halide_build_path()
factory = BuildFactory()
add_env_setup_step(factory, builder_type, enable_ccache=True)
add_get_halide_source_steps(factory, builder_type)
factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[
performance_lock.access('counting')], dir=build_dir))
target_label_pairs = [('host', 'build_tests')]
for halide_target, labels_for_target in get_test_labels(builder_type
).items():
if halide_target != 'host':
continue
_labels_to_skip = ['autoschedulers_cpu', 'autoschedulers_gpu',
'autoschedulers_cuda', 'performance', 'python']
if builder_type.bits == 32:
_labels_to_skip.extend(['autoschedulers_cpu',
'autoschedulers_gpu', 'autoschedulers_cuda'])
for label in labels_for_target:
if label in _labels_to_skip:
continue
target_label_pairs.append((halide_target, label))
for halide_target, label in target_label_pairs:
env = extend_property('env', LLVM_CONFIG=get_llvm_install_path(
builder_type, 'bin/llvm-config'), HL_TARGET=halide_target,
HL_JIT_TARGET=halide_target)
if is_time_critical_test(label):
p = 1
lock_mode = 'exclusive'
else:
p = make_threads
lock_mode = 'counting'
if label != 'build_tests':
label = 'test_%s' % label
factory.addStep(ShellCommand(name='make ' + label, description=
label + ' ' + halide_target, locks=[performance_lock.access(
lock_mode)], workdir=build_dir, env=env, haltOnFailure=False,
command=['make', '-f', get_halide_source_path('Makefile'), '-j',
p, label], timeout=3600))
return factory
def create_halide_cmake_factory(builder_type):
factory = BuildFactory()
add_env_setup_step(factory, builder_type)
add_get_halide_source_steps(factory, builder_type)
add_halide_cmake_build_steps(factory, builder_type)
add_halide_cmake_test_steps(factory, builder_type)
if builder_type.purpose == Purpose.halide_nightly:
add_halide_cmake_package_steps(factory, builder_type)
return factory
<|reserved_special_token_0|>
def create_halide_builder(arch, bits, os, halide_branch, llvm_branch,
purpose, buildsystem=BuildSystem.cmake):
sanitizers = [None]
if purpose != Purpose.halide_nightly:
sanitizers.extend(_SANITIZERS)
for san in sanitizers:
builder_type = BuilderType(arch, bits, os, halide_branch,
llvm_branch, purpose, san, buildsystem)
if san and purpose == Purpose.llvm_nightly:
continue
if san and not builder_type.handles_sanitizers():
continue
workers = builder_type.get_worker_names()
builder = BuilderConfig(name=builder_type.builder_label(),
workernames=workers, factory=create_halide_factory(builder_type
), collapseRequests=True, locks=[llvm_build_locks[llvm_branch +
str(bits)].access('counting')], tags=builder_type.builder_tags())
builder.builder_type = builder_type
yield builder
def create_halide_builders():
for arch, bits, os in get_interesting_halide_targets():
for halide_branch in HALIDE_NIGHTLIES:
for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:
yield from create_halide_builder(arch, bits, os,
halide_branch, llvm_branch, Purpose.halide_nightly)
for halide_branch in _HALIDE_RELEASES:
for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:
yield from create_halide_builder(arch, bits, os,
halide_branch, llvm_branch, Purpose.halide_testbranch)
yield from create_halide_builder(arch, bits, os, HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch)
yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('x86', 32, 'linux', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('x86', 64, 'osx', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('arm', 64, 'osx', HALIDE_MAIN,
LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)
for llvm_branch in LLVM_BRANCHES:
if abs(LLVM_BRANCHES[llvm_branch].version.major - LLVM_BRANCHES[
LLVM_MAIN].version.major) in [1, 2]:
yield from create_halide_builder('x86', 64, 'linux',
HALIDE_MAIN, llvm_branch, Purpose.halide_testbranch)
def create_halide_scheduler(halide_branch):
def is_halide_base_branch(br):
return any(br == hl.ref for hl in HALIDE_BRANCHES.values())
def is_halide_pr_branch(br):
return not is_halide_base_branch(br)
def github_base_branch_matches(change):
ref = change.properties.getProperty('basename')
return ref == HALIDE_BRANCHES[halide_branch].ref
builders = [b for b in c['builders'] if b.builder_type.halide_branch ==
halide_branch and b.builder_type.purpose == Purpose.halide_nightly]
if builders:
builder_names = [str(b.name) for b in builders]
yield Nightly(name='halide-package-' + halide_branch, codebases=[
'halide'], builderNames=builder_names, change_filter=
ChangeFilter(codebase='halide'), hour=4, minute=0)
yield ForceScheduler(name='force-halide-nightly-' + halide_branch,
builderNames=builder_names, codebases=['halide'])
builders = [b for b in c['builders'] if b.builder_type.halide_branch ==
halide_branch and b.builder_type.purpose == Purpose.halide_testbranch]
if builders:
builder_names = [str(b.name) for b in builders]
yield AnyBranchScheduler(name='halide-testbranch-' + halide_branch,
codebases=['halide'], change_filter=ChangeFilter(category=
'pull', codebase='halide', branch_fn=is_halide_pr_branch,
filter_fn=github_base_branch_matches), treeStableTimer=60 * 5,
builderNames=builder_names)
yield ForceScheduler(name='force-halide-testbranch-' +
halide_branch, builderNames=builder_names, codebases=['halide'])
<|reserved_special_token_0|>
def create_llvm_builders():
for arch, bits, os in get_interesting_halide_targets():
for llvm_branch in LLVM_BRANCHES:
builder_type = BuilderType(arch, bits, os, None, llvm_branch,
Purpose.llvm_nightly)
for w in builder_type.get_worker_names():
label = builder_type.llvm_builder_label()
builder = BuilderConfig(name='%s/%s' % (label, w),
workerbuilddir=label, workernames=[w], factory=
create_llvm_cmake_factory(builder_type),
collapseRequests=True, locks=[llvm_build_locks[
llvm_branch + str(bits)].access('exclusive')], tags=
builder_type.builder_tags())
builder.builder_type = builder_type
yield builder
<|reserved_special_token_0|>
def create_builders():
yield from create_llvm_builders()
yield from create_halide_builders()
def create_schedulers():
for llvm_branch in LLVM_BRANCHES:
yield from create_llvm_scheduler(llvm_branch)
for halide_branch in HALIDE_BRANCHES:
yield from create_halide_scheduler(halide_branch)
<|reserved_special_token_0|>
def prioritize_builders(buildmaster, builders):
def importance(builder):
builder_type = builder.config.builder_type
assert builder_type
if builder_type.purpose == Purpose.llvm_nightly:
return 0
if builder_type.purpose == Purpose.halide_testbranch:
return 1
if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_15]:
return 2
if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_16]:
return 2
if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_MAIN]:
return 3
return 4
return list(sorted(builders, key=importance))
<|reserved_special_token_0|>
class SafeGitHubEventHandler(GitHubEventHandler):
def handle_push(self, payload, event):
ref = payload['ref']
if re.match('^refs/(heads|tags)/(master|main|release/\\d+\\.x)$', ref):
return super().handle_push(payload, event)
else:
print(f'SafeGitHubEventHandler: ignoring push event for ref: {ref}'
)
return self.skip()
def handle_pull_request(self, payload, event):
pr = payload['pull_request']
try:
if any(label['name'] == 'skip_buildbots' for label in pr['labels']
):
return self.skip()
if any(r['login'] == 'halidebuildbots' for r in pr[
'requested_reviewers']):
if payload['action'] == 'review_requested':
payload['action'] = 'synchronize'
return super().handle_pull_request(payload, event)
trusted_repos = 'halide/Halide', 'CodeLinaro/Halide'
if pr['head']['repo']['full_name'] not in trusted_repos:
return self.skip()
return super().handle_pull_request(payload, event)
except KeyError as e:
print(f'SafeGitHubEventHandler: malformed payload: {payload}')
print(f'SafeGitHubEventHandler: missing key "{e}"')
return self.skip()
@staticmethod
def skip():
return [], 'git'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# -*- python -*-
# ex: set syntax=python:
# vim: set syntax=python:
import os
import re
from collections import defaultdict, namedtuple
from enum import Enum
from pathlib import Path
import buildbot.www.authz.endpointmatchers as ems
from buildbot.changes.filter import ChangeFilter
from buildbot.changes.gitpoller import GitPoller
from buildbot.config import BuilderConfig
from buildbot.locks import WorkerLock
from buildbot.process.factory import BuildFactory
from buildbot.process.properties import Interpolate, Property, renderer, Transform
from buildbot.reporters.generators.build import BuildStartEndStatusGenerator
from buildbot.reporters.github import GitHubStatusPush
from buildbot.reporters.message import MessageFormatterRenderable
from buildbot.schedulers.basic import AnyBranchScheduler
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.schedulers.timed import Nightly
from buildbot.steps.cmake import CMake
from buildbot.steps.master import SetProperties
from buildbot.steps.shell import SetPropertyFromCommand, ShellCommand
from buildbot.steps.source.git import Git
from buildbot.steps.source.github import GitHub
from buildbot.steps.transfer import FileUpload, FileDownload
from buildbot.steps.worker import MakeDirectory, SetPropertiesFromEnv, RemoveDirectory
from buildbot.worker import Worker
from buildbot.www.auth import UserPasswordAuth
from buildbot.www.authz import Authz
from buildbot.www.authz.roles import RolesFromUsername
from buildbot.www.hooks.github import GitHubEventHandler
from twisted.internet import defer
from custom_steps import CTest, CleanOldFiles, SetPropertiesFromCMakeCache
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
# SECRETS
GITHUB_TOKEN = Path('github_token.txt').read_text().strip()
WORKER_SECRET = Path('halide_bb_pass.txt').read_text().strip()
WEBHOOK_SECRET = Path('webhook_token.txt').read_text().strip()
WWW_PASSWORD = Path('buildbot_www_pass.txt').read_text().strip()
# SERVER SETTINGS
ARTIFACTS_DIR = os.environ.get('HALIDE_BB_MASTER_ARTIFACTS_DIR', '/home/halidenightly/artifacts')
REPO_DIR = Path(__file__, '..', '..').resolve()
# LLVM
# At any given time, we test (at least) 3 LLVM versions:
# - the current main (changes daily)
# - the most recent release (expected to be stable)
# - an older release (expected to be stable)
#
# The branches that correspond to these will rotate as new versions
# are released, but the underlying test logic should not need changing.
Version = namedtuple('Version', ['major', 'minor', 'patch'])
VersionedBranch = namedtuple('VersionedBranch', ['ref', 'version'])
LLVM_MAIN = 'main'
LLVM_RELEASE_17 = 'release_17'
LLVM_RELEASE_16 = 'release_16'
LLVM_RELEASE_15 = 'release_15'
LLVM_BRANCHES = {LLVM_MAIN: VersionedBranch(ref='main', version=Version(18, 0, 0)),
LLVM_RELEASE_17: VersionedBranch(ref='release/17.x', version=Version(17, 0, 0)),
LLVM_RELEASE_16: VersionedBranch(ref='llvmorg-16.0.6', version=Version(16, 0, 6)),
LLVM_RELEASE_15: VersionedBranch(ref='llvmorg-15.0.7', version=Version(15, 0, 7))}
# At any given time, Halide has a main branch, which supports (at least)
# the LLVM main branch and the most recent release branch (and maybe one older).
#
# We also support previous release branches; a release branch tracks *only* the
# corresponding version of LLVM (i.e., Halide 13 is 'release/13.x' and is only
# built/tested against LLVM13, even though it might still work with other LLVM versions).
#
# Note that we deliberately chose branch names that match LLVM's conventions.
#
# (Note that there are older releases of Halide that we no longer bother to build/test regularly.)
HALIDE_MAIN = 'main'
HALIDE_RELEASE_16 = 'release_16'
HALIDE_RELEASE_15 = 'release_15'
_HALIDE_RELEASES = [
HALIDE_RELEASE_16,
HALIDE_RELEASE_15,
]
HALIDE_BRANCHES = {HALIDE_MAIN: VersionedBranch(ref='main', version=Version(17, 0, 0)),
HALIDE_RELEASE_16: VersionedBranch(ref='release/16.x', version=Version(16, 0, 6)),
HALIDE_RELEASE_15: VersionedBranch(ref='release/15.x', version=Version(15, 0, 1))}
# This lists the Halide branch(es) for which we want to build nightlies;
# it's usually desirable to constrain these to save buildbot time (esp on the slower bots)
# and avoid branches that aren't changing much (i.e. -- recent releases that aren't
# likely to need new updates soon).
HALIDE_NIGHTLIES = [HALIDE_MAIN]
# Given a halide branch, return the 'native' llvm version we expect to use with it.
# For halide release branches, this is the corresponding llvm release branch; for
# halide main, it's llvm main.
LLVM_FOR_HALIDE = {
HALIDE_MAIN: [LLVM_MAIN, LLVM_RELEASE_17, LLVM_RELEASE_16],
HALIDE_RELEASE_16: [LLVM_RELEASE_16],
HALIDE_RELEASE_15: [LLVM_RELEASE_15],
}
# WORKERS
# Can use Python 3.7 dataclasses instead, if we choose to upgrade to that.
WorkerConfig = namedtuple('WorkerConfig', ['max_builds', 'j', 'arch', 'bits', 'os'])
# Using nproc+2 on the arm32 builds causes internal errors in gcc-armeabihf. Let's just use nproc.
_NPROC = Interpolate("%(worker:numcpus)s")
# For machines with max_builds=1, using nproc+2 cores for building is the conventional choice
# (and what ninja defaults to). Oddly, "ninja -j 0" means "use as many threads as you like" which
# is definitely not what we want.
_NPROC_PLUS_2 = Transform(lambda x: f'{int(x) + 2}', _NPROC)
_WORKERS = [
('linux-worker-1', WorkerConfig(max_builds=4, j=8, arch='x86', bits=[32, 64], os='linux')),
('linux-worker-4', WorkerConfig(max_builds=4, j=8, arch='x86', bits=[32, 64], os='linux')),
# 2013 Mac Pro running a 6-core Xeon.
('mac-x86-worker-1', WorkerConfig(max_builds=2, j=8, arch='x86', bits=[64], os='osx')),
# Mac Mini 2018, 3.2 GHz 6-Core Intel Core i7, 16GB memory
('mac-x86-worker-2', WorkerConfig(max_builds=2, j=8, arch='x86', bits=[64], os='osx')),
# Mac Mini 2018, ??? details TBD
('mac-x86-worker-3', WorkerConfig(max_builds=2, j=8, arch='x86', bits=[64], os='osx')),
('mac-arm-worker-1', WorkerConfig(max_builds=2, j=8, arch='arm', bits=[64], os='osx')),
# The arm-linux bots here have 4 cores but apparently don't have enough RAM to do more
# than -j=2 without crashing during LLVM builds.
('arm32-linux-worker-1', WorkerConfig(max_builds=1, j=2, arch='arm', bits=[32], os='linux')),
('arm32-linux-worker-2', WorkerConfig(max_builds=1, j=2, arch='arm', bits=[32], os='linux')),
('arm64-linux-worker-1', WorkerConfig(max_builds=1, j=2, arch='arm', bits=[64], os='linux')),
('arm64-linux-worker-2', WorkerConfig(max_builds=1, j=2, arch='arm', bits=[64], os='linux')),
# The rpi4 has 8GB ram, so apparently it's OK with -j=nproc for now.
('rpi4-linux-worker-1', WorkerConfig(max_builds=1, j=_NPROC, arch='arm', bits=[32], os='linux')),
# TODO: should normally be offline because every D3D12 test fails
('win-worker-2', WorkerConfig(max_builds=1, j=_NPROC_PLUS_2, arch='x86', bits=[32, 64], os='windows')),
# TODO: broken, pending repair till Monday
# ('win-worker-3', WorkerConfig(max_builds=2, j=_NPROC_PLUS_2, arch='x86', bits=[32, 64], os='windows')),
]
# The 'workers' list defines the set of recognized buildworkers. Each element is
# a Worker object, specifying a unique worker name and password. The same
# worker name and password must be configured on the worker.
c['workers'] = [Worker(n,
WORKER_SECRET,
keepalive_interval=300, # default is 3600 (1 hour). We'll do 5 mins.
max_builds=cfg.max_builds,
properties={'WORKER_BUILD_PARALLELISM': cfg.j}) for n, cfg in _WORKERS]
_SANITIZERS = [
'asan',
'fuzzer', # this isn't *technically* a sanitizer, but is close enough that it's a good fit
]
# LOCKS
# Performance testing requires exclusive use of a worker
# Compute-intensive build steps will grab this lock in reader
# mode. The performance test will grab it in exclusive mode.
performance_lock = WorkerLock("performance_lock", maxCount=9999)
# When building the LLVM nightlies, we can sync & build LLVM independently
# from other work, but when we update the install directory, we need to ensure
# we have an exclusive lock across the entire worker. (Since we have a small
# number of LLVM versions, and since 'make install' doesn't take very long,
# we could probably just get by with a single lock for *any* llvm install,
# but this isn't much harder to do.)
llvm_build_locks = {}
for llvm_branch, info in LLVM_BRANCHES.items():
for bits in [32, 64]:
llvm_build_locks[llvm_branch + str(bits)] = WorkerLock(
f'llvm_install_lock_{info.version.major}_{bits}', maxCount=9999)
# CHANGESOURCES
# Here we point the buildbot at third-party codebases, ie. dependencies.
# Currently, we only have LLVM's `main` branch configured.
c['change_source'] = [
GitPoller(
repourl='https://github.com/llvm/llvm-project.git',
workdir='gitpoller-llvm-workdir',
branch=LLVM_BRANCHES[LLVM_MAIN].ref,
pollInterval=60 * 60 * 24, # Only check llvm once every 24 hours
pollAtLaunch=True)
]
# CODEBASES
all_repositories = {
'https://github.com/halide/Halide.git': 'halide',
'https://github.com/llvm/llvm-project.git': 'llvm',
}
def codebase_generator(chdict):
repo = chdict['repository']
assert repo in all_repositories, "Codebase not found for chdict: " + str(chdict)
return all_repositories[repo]
c['codebaseGenerator'] = codebase_generator
# BUILDERS
# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
# what steps, and which workers can execute them. Note that any particular build will
# only take place on one worker.
class Purpose(Enum):
halide_nightly = 1
halide_testbranch = 2
llvm_nightly = 3
class BuildSystem(Enum):
make = 0
cmake = 1
class BuilderType:
"""A class to encapsulate the settings for a specific Builder.
(Do not confuse with CMake's 'BUILD_TYPE', which is something else.)
It includes:
- Halide 'target' in the form of arch-bits-os
- LLVM branch to be used
- CMake vs Make
- halide-nightly vs halide-testbranch vs llvm-nightly
- sanitizers vs none
It doesn't currently include any 'features' because we don't currently
bake any in at build time.
It doesn't currently include the C++ compiler used (eg gcc7 vs gcc8 vs clang),
mainly because we currently never test with multiple compilers for a given
setup. (If we ever need to do so, compiler should be added to this.)
"""
def __init__(self, arch, bits, os, halide_branch, llvm_branch, purpose, sanitizer=None,
buildsystem=BuildSystem.cmake):
assert arch in ['arm', 'x86']
assert bits in [32, 64]
assert os in ['linux', 'windows', 'osx']
assert llvm_branch in LLVM_BRANCHES, f'{llvm_branch} not recognized'
self.arch = arch
self.bits = bits
self.os = os
self.halide_branch = halide_branch
self.llvm_branch = llvm_branch
self.buildsystem = buildsystem
self.purpose = purpose
self.sanitizer = sanitizer
if self.halide_branch:
assert self.purpose != Purpose.llvm_nightly
assert self.halide_branch in HALIDE_BRANCHES, f'unknown branch {self.halide_branch}'
assert (self.purpose == Purpose.halide_testbranch or # if not testbranch...
self.llvm_branch in LLVM_FOR_HALIDE[self.halide_branch])
else:
assert self.purpose == Purpose.llvm_nightly
if self.sanitizer:
assert self.sanitizer in _SANITIZERS
# The armbots aren't configured with Python at all.
# We don't support the Python bindings on 32-bit at all.
def handles_python(self):
if self.bits == 32:
return False
if self.arch == 'arm' and self.os == 'linux':
return False
if self.sanitizer_preset() is not None:
return False
return True
def handles_sanitizers(self):
if self.buildsystem != BuildSystem.cmake:
return False
return (self.arch == 'x86'
and self.bits == 64
and self.os == 'linux'
and self.llvm_branch == LLVM_MAIN)
def sanitizer_preset(self):
if self.handles_sanitizers():
if self.sanitizer == 'asan':
return 'linux-x64-asan'
if self.sanitizer == 'fuzzer':
return 'linux-x64-fuzzer'
return None
def handles_riscv(self):
# Only support RISCV on LLVM16 or later.
return self.llvm_branch not in [LLVM_RELEASE_15]
def handles_hexagon(self):
return (self.arch == 'x86'
and self.bits == 64
and self.os == 'linux'
and self.llvm_branch == LLVM_MAIN)
def handles_wasm(self):
is_linux_x64 = (self.arch == 'x86'
and self.bits == 64
and self.os == 'linux')
return (self.llvm_branch == LLVM_MAIN
and (is_linux_x64 or self.os == 'osx'))
def handles_wasm_wabt(self):
return self.handles_wasm()
def handles_wasm_v8(self):
# OSX machines don't have V8 installed
return self.handles_wasm() and self.os == 'linux'
def has_nvidia(self):
return (self.arch == 'x86'
and self.bits == 64
and self.os in ['windows', 'linux'])
def handles_vulkan(self):
# TODO: disabled temporarily pending fixes to the Vulkan runtime
return False
# Stick with Linux on x86-64 for now. Others TBD.
# return (self.arch == 'x86'
# and self.bits == 64
# and self.os == 'linux'
# and self.halide_branch in [HALIDE_MAIN, HALIDE_RELEASE_16])
def handles_webgpu(self):
# At the moment, the WebGPU team recommends the OSX versions of Dawn/Node
# as the most robust for testing, so that's all we're set up to test with.
# (Note that 'Dawn' must be built/installed on the test machines manually;
# there are no binaries/prebuilts available at this time.)
return self.os == 'osx' and self.halide_branch not in [HALIDE_RELEASE_15]
def has_tflite(self):
if self.arch == 'x86' and self.bits == 64 and self.os == 'linux':
return True
if self.arch == 'arm' and self.bits == 64 and self.os == 'osx':
return True
return False
def has_ccache(self):
return self.os in ['osx', 'linux']
def halide_target(self):
return '%s-%d-%s' % (self.arch, self.bits, self.os)
def llvm_builder_label(self):
return 'llvm-%s-%s' % (LLVM_BRANCHES[self.llvm_branch].version.major, self.halide_target())
def halide_builder_label(self):
# This currently tries to (somewhat) mimic the existing label pattern,
# but is arbitrary. (If changed, manual purging of buildbot temporaries
# is appropriate)
a = ['halide']
if self.sanitizer:
a.append(self.sanitizer)
if self.purpose == Purpose.halide_testbranch:
a.append('testbranch')
elif self.purpose == Purpose.halide_nightly:
a.append('nightly')
a.append(self.halide_branch)
if self.halide_branch == HALIDE_MAIN:
# Halide master is built against multiple LLVM versions,
# so append that here for clarity
a.append(f'llvm{LLVM_BRANCHES[self.llvm_branch].version.major}')
a.append(self.halide_target())
a.append(self.buildsystem.name)
return '-'.join(a)
def builder_label(self):
if self.purpose == Purpose.llvm_nightly:
return self.llvm_builder_label()
else:
return self.halide_builder_label()
def builder_tags(self):
return self.builder_label().split('-')
def get_worker_names(self):
return [n for n, cfg in _WORKERS
if self.arch == cfg.arch and self.bits in cfg.bits and self.os == cfg.os]
def __str__(self):
return self.halide_target()
def get_builddir_subpath(subpath):
# Normalize paths to use forward slashes.
return Transform(lambda x: x.replace('\\', '/'), Interpolate(f'%(prop:builddir)s/{subpath}'))
def get_llvm_toolchains_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-toolchains', *subpaths))
# TODO: make private to the LLVM code
def get_llvm_source_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-project', *subpaths))
# TODO: make private to the LLVM code
def get_llvm_build_path(*subpaths):
return get_builddir_subpath(os.path.join('llvm-build', *subpaths))
def get_llvm_install_path(builder_type, *subpaths):
# Note that `builder_type.purpose` can be a Halide builder or an LLVM builder;
# we want to ignore that aspect and produce the same effective path
# regardless (ie, based only on the other aspects of the builder_type).
llvm_workdir = builder_type.llvm_builder_label()
return get_builddir_subpath(os.path.join('..', llvm_workdir, 'llvm-install', *subpaths))
def get_halide_source_path(*subpaths):
return get_builddir_subpath(os.path.join('halide-source', *subpaths))
def get_halide_build_path(*subpaths):
return get_builddir_subpath(os.path.join('halide-build', *subpaths))
def get_halide_install_path(builder_type, *subpaths):
s = 'halide-install'
if builder_type.sanitizer:
s += '-' + builder_type.sanitizer
return get_builddir_subpath(os.path.join(s, *subpaths))
def add_get_halide_source_steps(factory, builder_type):
factory.addStep(GitHub(name='Get Halide source',
locks=[performance_lock.access('counting')],
codebase='halide',
workdir=get_halide_source_path(),
repourl='https://github.com/halide/Halide.git',
branch=HALIDE_BRANCHES[builder_type.halide_branch].ref,
mode='incremental'))
def add_get_llvm_source_steps(factory, builder_type):
factory.addStep(Git(name=f'Get LLVM {LLVM_BRANCHES[builder_type.llvm_branch].version.major}',
locks=[performance_lock.access('counting')],
codebase='llvm',
workdir=get_llvm_source_path(),
repourl='https://github.com/llvm/llvm-project.git',
branch=LLVM_BRANCHES[builder_type.llvm_branch].ref,
mode='incremental'))
# Always download the toolchains, even on platforms we don't need 'em
toolchains_dir = get_llvm_toolchains_path()
factory.addStep(MakeDirectory(name="Make CMake toolchain directory",
locks=[performance_lock.access('counting')],
dir=toolchains_dir,
haltOnFailure=False))
factory.addStep(FileDownload(name='Download CMake toolchains',
mastersrc='toolchain.linux-arm32.cmake', # relative to base dir
workerdest='toolchain.linux-arm32.cmake', # relative to workdir
workdir=toolchains_dir,
locks=[performance_lock.access('counting')],
haltOnFailure=True,
mode=0o644))
# Determined by running `set` in cmd.exe before and after vcvarsall.bat
# and diffing the output. It's likely that we don't need all of these
# to make things work, but I haven't bothered to figure out what is irrelevant,
# so I'm erring on the side of maybe too much.
# noinspection SpellCheckingInspection
VCVARSALL_ENV_VARS = [
"COMMANDPROMPTTYPE",
"DEVENVDIR",
"EXTENSIONSDKDIR",
"FRAMEWORK40VERSION",
"FRAMEWORKDIR",
"FRAMEWORKDIR64",
"FRAMEWORKVERSION",
"FRAMEWORKVERSION64",
"INCLUDE",
"LIB",
"LIBPATH",
"NETFXSDKDIR",
"PATH",
"PLATFORM",
"UCRTVERSION",
"UNIVERSALCRTSDKDIR",
"VCIDEINSTALLDIR",
"VCINSTALLDIR",
"VCTOOLSINSTALLDIR",
"VCTOOLSREDISTDIR",
"VCTOOLSVERSION",
"VISUALSTUDIOVERSION",
"VS110COMNTOOLS",
"VS120COMNTOOLS",
"VS140COMNTOOLS",
"VS160COMNTOOLS",
"VSCMD_ARG_APP_PLAT",
"VSCMD_ARG_HOST_ARCH",
"VSCMD_ARG_TGT_ARCH",
"VSCMD_VER",
"VSINSTALLDIR",
"WINDOWSLIBPATH",
"WINDOWSSDKBINPATH",
"WINDOWSSDKDIR",
"WINDOWSSDKLIBVERSION",
"WINDOWSSDKVERBINPATH",
"WINDOWSSDKVERSION",
"WINDOWSSDK_EXECUTABLEPATH_X64",
"WINDOWSSDK_EXECUTABLEPATH_X86",
"__DOTNET_ADD_64BIT",
"__DOTNET_PREFERRED_BITNESS",
"__VSCMD_PREINIT_PATH",
"__VSCMD_SCRIPT_ERR_COUNT",
]
def get_msvc_config_steps(factory, builder_type):
# ensure that we use the x64 host compiler, not the x86 host compiler
arch_for_bits = {32: 'x64_x86', 64: 'x64'}
vcvarsall = 'vcvarsall.bat %s && set' % arch_for_bits[builder_type.bits]
# TODO: surely there is a better way of locating vcvarsall
# vcvarsdir = "c:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Auxiliary/Build"
vcvarsdir = "C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Auxiliary/Build"
# `vsvarsall && set` dumps all the settings to stdout;
# we'll extract & save just the subset we think are likely to be relevant.
def save_interesting_env_vars(rc, stdout, stderr):
d = {}
for line in stdout.split('\n'):
match = re.match("^([a-zA-Z0-9_-]+)=(.*)$", line.strip())
if match:
key = match.group(1).upper()
value = match.group(2)
if key in VCVARSALL_ENV_VARS:
d[key] = value
return {'env': d}
factory.addStep(
SetPropertyFromCommand(name='Run VcVarsAll',
description='Run VcVarsAll',
workdir=vcvarsdir,
locks=[performance_lock.access('counting')],
haltOnFailure=True,
command=vcvarsall,
extract_fn=save_interesting_env_vars))
def merge_renderable(_base, _extn):
@renderer
@defer.inlineCallbacks
def render(props):
base = yield props.render(_base)
extn = yield props.render(_extn)
base.update(extn)
return base
return render
def get_distrib_name(_version, target, ext):
@renderer
@defer.inlineCallbacks
def render(props):
rev = props.getProperty('got_revision')['halide']
version = yield props.render(_version)
return os.path.join(ARTIFACTS_DIR, f'Halide-{version}-{target}-{rev}.{ext}')
return render
def get_cmake_generator(builder_type):
return 'Ninja'
def get_llvm_cmake_options(builder_type):
options = []
return options
def get_halide_cmake_options(builder_type, build_dir):
options = []
if builder_type.sanitizer and builder_type.purpose != Purpose.llvm_nightly:
assert builder_type.handles_sanitizers()
options.append("--preset=%s" % builder_type.sanitizer_preset())
# append *after* preset so we override the build dir
options += ['-B', build_dir]
return options
def get_ctest_options(builder_type, build_dir):
assert builder_type.purpose != Purpose.llvm_nightly
if builder_type.sanitizer:
assert builder_type.handles_sanitizers()
# No, this won't work, see https://gitlab.kitware.com/cmake/cmake/-/issues/23982 --
# fortunately, we don't need to specify the current sanitizer toolchains
# at test time (just at configure time).
# return {'preset': builder_type.sanitizer_preset(), 'test_dir': build_dir}
return {'build_config': builder_type.sanitizer_preset()}
else:
return {'build_config': 'Release'}
def get_halide_cmake_definitions(builder_type, halide_target='host', wasm_jit='wabt'):
cmake_definitions = {
'Clang_DIR': get_llvm_install_path(builder_type, 'lib/cmake/clang'),
'CMAKE_INSTALL_PREFIX': get_halide_install_path(builder_type),
'Halide_TARGET': halide_target,
'LLD_DIR': get_llvm_install_path(builder_type, 'lib/cmake/lld'),
'LLVM_DIR': get_llvm_install_path(builder_type, 'lib/cmake/llvm'),
'LLVM_ROOT': get_llvm_install_path(builder_type),
'WITH_PYTHON_BINDINGS': 'ON' if builder_type.handles_python() else 'OFF',
'WITH_TEST_FUZZ': 'ON' if builder_type.sanitizer == 'fuzzer' else 'OFF'
}
if builder_type.sanitizer and builder_type.handles_sanitizers():
pass
else:
cmake_definitions['CMAKE_BUILD_TYPE'] = 'Release'
# Sanitizer builds intermittently fail when using CCache for reasons that aren't
# clear ("precompiled header modified") -- for now, just ignore CCache for them
if builder_type.has_ccache() and not builder_type.sanitizer_preset():
cmake_definitions['Halide_CCACHE_BUILD'] = 'ON'
if builder_type.arch == 'arm' and builder_type.bits == 32 and builder_type.os == 'linux':
# Halide always uses its own toolchain files, from the cmake/ subdir.
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake', 'toolchain.linux-arm32.cmake')
if builder_type.os == 'windows':
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate('%(prop:VCPKG_ROOT)s/scripts/buildsystems/vcpkg.cmake')
# CMake on Windows can't reliably find our pip-installed PyBind11 unless we set CMAKE_PREFIX_PATH to point to is
cmake_definitions['pybind11_DIR'] = Interpolate('%(prop:VIRTUAL_ENV)s/share/cmake/pybind11')
# Don't bother with anything Python-related if we are targeting WebAssembly.
if "wasm" in halide_target:
cmake_definitions['WITH_PYTHON_BINDINGS'] = 'OFF'
# TODO: HALIDE_NODE_JS_PATH is only necessary until EMSDK updates their built-in version of Node
# to v16.13+; when that is done, remove this definition.
if builder_type.handles_wasm() and halide_target.startswith("wasm-"):
cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(
'%(prop:EMSDK)s/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake')
cmake_definitions['NODE_JS_EXECUTABLE'] = Property('HALIDE_NODE_JS_PATH')
if wasm_jit == 'v8':
cmake_definitions['WITH_WABT'] = 'OFF'
cmake_definitions['WITH_V8'] = 'ON'
cmake_definitions['V8_INCLUDE_PATH'] = '/home/halidenightly/v8/v8/include'
cmake_definitions['V8_LIB_PATH'] = \
'/home/halidenightly/v8/v8/out/x64.release.static/obj/libv8_monolith.a'
elif wasm_jit == 'wabt':
cmake_definitions['WITH_WABT'] = 'ON'
cmake_definitions['WITH_V8'] = 'OFF'
cmake_definitions['V8_INCLUDE_PATH'] = ''
cmake_definitions['V8_LIB_PATH'] = ''
else:
assert False, "Unknown wasm jit " + str(wasm_jit)
if builder_type.handles_webgpu() and "webgpu" in halide_target:
# TODO(srj): remove these after https://github.com/halide/Halide/pull/7422 lands
cmake_definitions['WEBGPU_NODE_BINDINGS'] = Property('HL_WEBGPU_NODE_BINDINGS')
cmake_definitions['WEBGPU_NATIVE_LIB'] = Property('HL_WEBGPU_NATIVE_LIB')
if builder_type.handles_hexagon() and 'hvx' in halide_target:
cmake_definitions['Halide_BUILD_HEXAGON_REMOTE_RUNTIME'] = 'ON'
return cmake_definitions
def get_cmake_build_command(builder_type, build_dir, targets=None):
cmd = ['ninja',
'-C', build_dir,
'-j', Property('WORKER_BUILD_PARALLELISM')]
# TODO(srj): for debugging apps/c_backend
if builder_type.os == "windows":
cmd.append('-v')
if targets:
cmd.extend(targets)
return cmd
def get_llvm_cmake_definitions(builder_type):
# Keep sorted!
definitions = {
'CMAKE_BUILD_TYPE': 'Release',
'CMAKE_INSTALL_PREFIX': get_llvm_install_path(builder_type),
'LLVM_BUILD_32_BITS': ('ON' if builder_type.bits == 32 else 'OFF'),
'LLVM_ENABLE_ASSERTIONS': 'ON',
'LLVM_ENABLE_BINDINGS': 'OFF',
'LLVM_ENABLE_CURL': 'OFF',
'LLVM_ENABLE_DIA_SDK': 'OFF',
'LLVM_ENABLE_HTTPLIB': 'OFF',
'LLVM_ENABLE_IDE': 'OFF',
'LLVM_ENABLE_LIBXML2': 'OFF',
'LLVM_ENABLE_OCAMLDOC': 'OFF',
'LLVM_ENABLE_RTTI': 'ON',
'LLVM_ENABLE_TERMINFO': 'OFF',
'LLVM_ENABLE_WARNINGS': 'OFF', # silence them, it's not like we're gonna fix them
'LLVM_ENABLE_ZLIB': 'ON',
'LLVM_ENABLE_ZSTD': 'OFF',
'LLVM_INCLUDE_BENCHMARKS': 'OFF',
'LLVM_INCLUDE_EXAMPLES': 'OFF',
'LLVM_INCLUDE_TESTS': 'OFF',
'LLVM_TARGETS_TO_BUILD': 'X86;ARM;NVPTX;AArch64;Hexagon;PowerPC;WebAssembly',
}
if builder_type.bits == 32:
definitions['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = "ONLY"
definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = "ONLY"
definitions['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = "ONLY"
definitions['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = "NEVER"
if builder_type.handles_riscv():
definitions['LLVM_TARGETS_TO_BUILD'] += ";RISCV"
if builder_type.handles_sanitizers():
definitions['LLVM_ENABLE_RUNTIMES'] = "compiler-rt;libcxx;libcxxabi;libunwind"
# We only need clang-tools-extra if building for sanitizers -- skip them
# if the builder will never do this, to save time & space.
definitions['LLVM_ENABLE_PROJECTS'] = "clang;lld;clang-tools-extra"
else:
definitions['LLVM_ENABLE_PROJECTS'] = "clang;lld"
# Some versions of GCC will flood the output with useless warnings about
# "parameter passing for argument of type foo changed in GCC 7.1" unless
# we disable this warning. This isn't *essential*, but it makes looking at the
# LLVM build logs much less noisy.
if builder_type.os != 'windows':
definitions['CMAKE_CXX_FLAGS'] = '-Wno-psabi'
if builder_type.arch == 'arm' and builder_type.bits == 32 and builder_type.os == 'linux':
# LLVM doesn't provide a toolchain file, and we can't/don't-want-to rely on the
# one from Halide, so we'll rely on one that the buildbot downloads to each worker.
# (Note that this assumes the file has been properly downloaded.)
definitions['CMAKE_TOOLCHAIN_FILE'] = get_llvm_toolchains_path('toolchain.linux-arm32.cmake')
definitions['LLVM_TARGET_ARCH'] = 'ARM'
definitions['LLVM_DEFAULT_TARGET_TRIPLE'] = 'arm-linux-gnueabihf'
if builder_type.arch == 'x86' and builder_type.bits == 32 and builder_type.os == 'linux':
definitions['CMAKE_FIND_ROOT_PATH'] = '/usr/lib/i386-linux-gnu'
definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'
# This disables an XCode setting that can get enabled by default
# when assertions are enabled, but only if your XCode install has
# certain frameworks installed; we want it disabled, as it prevents
# prebuilt libraries from working properly with XCode 9.x.
if builder_type.os == 'osx':
definitions['LLVM_ENABLE_SUPPORT_XCODE_SIGNPOSTS'] = 'FORCE_OFF'
# We never build LLVM with sanitizers enabled
if builder_type.has_ccache():
definitions['LLVM_CCACHE_BUILD'] = 'ON'
return definitions
def extend_property(dict_name, **kwargs):
@renderer
def render(props):
table = props.getProperty(dict_name, default={})
table.update(kwargs)
return table
return render
def add_env_setup_step(factory, builder_type, enable_ccache=False):
if builder_type.os == 'windows':
# do this first because the SetPropertyFromCommand step isn't smart enough to merge
get_msvc_config_steps(factory, builder_type)
cxx = 'c++'
cc = 'cc'
ld = 'ld'
if builder_type.os == 'linux':
cc = 'gcc-9'
cxx = 'g++-9'
ld = 'ld'
if builder_type.arch == 'x86' and builder_type.bits == 32:
cxx += ' -m32'
cc += ' -m32'
ld += ' -melf_i386'
elif builder_type.os == 'windows':
cxx = 'cl.exe'
cc = 'cl.exe'
# This is only necessary (or desirable) for make-based builds of Halide;
# CMake-based builds handle it via Halide_CCACHE_BUILD and/or LLVM_CCACHE_BUILD
if enable_ccache and builder_type.has_ccache():
cxx = 'ccache ' + cxx
cc = 'ccache ' + cc
env = {
'CC': cc,
'CXX': cxx,
'LD': ld,
}
# TODO: HALIDE_NODE_JS_PATH is only necessary until EMSDK updates their built-in version of Node
# to v16.13+; when that is done, remove HALIDE_NODE_JS_PATH here and on the workers.
factory.addStep(SetPropertiesFromEnv(name='Read worker environment',
variables=['EMSDK',
'HALIDE_NODE_JS_PATH',
'HL_HEXAGON_TOOLS',
'HL_WEBGPU_NATIVE_LIB',
'HL_WEBGPU_NODE_BINDINGS',
'LD_LIBRARY_PATH',
'VIRTUAL_ENV',
'VCPKG_ROOT']))
vcpkg_root = Property('VCPKG_ROOT', default=None)
if builder_type.handles_hexagon():
# Environment variables for testing Hexagon DSP
hexagon_remote_bin = get_halide_build_path('src', 'runtime', 'hexagon_remote')
# Assume that HL_HEXAGON_TOOLS points to the correct directory (it might not be /usr/local/hexagon)
env['HL_HEXAGON_SIM_REMOTE'] = Transform(os.path.join,
hexagon_remote_bin,
'hexagon',
'bin',
'hexagon_sim_remote')
env['HL_HEXAGON_SIM_CYCLES'] = '1'
env['LD_LIBRARY_PATH'] = [
# no, this will cause a failure at runtime if LD_LIBRARY_PATH is unset (or empty!)
# Property('LD_LIBRARY_PATH'),
hexagon_remote_bin,
Interpolate('%(prop:HL_HEXAGON_TOOLS)s/lib/iss'),
]
env['HEXAGON_SDK_ROOT'] = Interpolate('%(prop:HL_HEXAGON_TOOLS)s/../../../..')
# Force Vulkan validation layer on to catch any driver related errors
# ... this enables a suite of diagnostic checks implemented in the Vulkan SDK
# that verifies the driver and application conform to the Vulkan runtime
# specification. This should not be enabled in production due to the overhead,
# but we want to catch any changes in driver behaviour and/or spurious errors that
# may be hard to find (but easy to fix if the right error messages are present)
if builder_type.has_nvidia() and builder_type.handles_vulkan():
env['VK_INSTANCE_LAYERS'] = "VK_LAYER_KHRONOS_validation"
if builder_type.os == 'osx':
# Environment variable for turning on Metal API validation
# This will have no effect on CPU testing, just Metal testing
env['METAL_DEVICE_WRAPPER_TYPE'] = '1'
if builder_type.os == 'windows':
vcpkg_root = Property('VCPKG_ROOT', default='C:/vcpkg')
env['VCPKG_ROOT'] = vcpkg_root
# Current NVidia drivers on our Windows buildbots can corrupt their own
# cache, leading to many spurious failures. Disable the cache
# for now, pending NVidia investigation.
env['CUDA_CACHE_DISABLE'] = '1'
# We don't ever want an Abort, Retry, Ignore dialog in our tests
env['HL_DISABLE_WINDOWS_ABORT_DIALOG'] = '1'
# Leaving this here (but commented out) in case we need to temporarily
# disable leak-checking in the future.
#
# if builder_type.handles_sanitizers():
# # Disable leak-checking (for now) for ASAN builds
# env['ASAN_OPTIONS'] = 'detect_leaks=0'
factory.addStep(SetProperties(
name='Initialize environment',
properties=dict(
env=extend_property('env', **env),
VCPKG_ROOT=vcpkg_root)))
@renderer
def get_llvm_latest_commit(props):
# Note that this property is a dict for multi-codebase builds,
# but just a string for single-codebase builds.
build_dir = props.getProperty('builddir')
assert not isinstance(build_dir, dict)
build_dir = build_dir.replace('\\', '/')
# Can't use got_revision here since we may be using git directly.
return "cd %s/llvm-project && git log -1 > %s/llvm-install/llvm_latest_commit.txt" % (build_dir, build_dir)
def add_llvm_steps(factory, builder_type, clean_rebuild):
build_dir = get_llvm_build_path()
install_dir = get_llvm_install_path(builder_type)
llvm_name = str(LLVM_BRANCHES[builder_type.llvm_branch].version.major)
if clean_rebuild:
factory.addStep(RemoveDirectory(name="Remove LLVM %s Build Dir" % llvm_name,
locks=[performance_lock.access('counting')],
dir=build_dir,
haltOnFailure=False))
factory.addStep(RemoveDirectory(name="Remove LLVM %s Install Dir" % llvm_name,
locks=[performance_lock.access('counting')],
dir=install_dir,
haltOnFailure=False))
factory.addStep(MakeDirectory(name="Make LLVM %s Build Dir" % llvm_name,
locks=[performance_lock.access('counting')],
dir=build_dir,
haltOnFailure=False))
factory.addStep(MakeDirectory(name="Make LLVM %s Install Dir" % llvm_name,
locks=[performance_lock.access('counting')],
dir=install_dir,
haltOnFailure=False))
factory.addStep(
CMake(name='Configure LLVM %s' % llvm_name,
locks=[performance_lock.access('counting')],
haltOnFailure=True,
env=Property('env'),
workdir=build_dir,
path=get_llvm_source_path('llvm'),
generator=get_cmake_generator(builder_type),
definitions=get_llvm_cmake_definitions(builder_type),
options=get_llvm_cmake_options(builder_type)))
factory.addStep(
ShellCommand(name='Build LLVM %s' % llvm_name,
locks=[performance_lock.access('counting')],
haltOnFailure=True,
workdir=build_dir,
env=Property('env'),
command=get_cmake_build_command(builder_type, build_dir, targets=['install'])))
# Save the SHA of LLVM's head rev into ${INSTALL}/llvm_version.txt,
# just to make debugging simpler
#
factory.addStep(
ShellCommand(name='Stamp Install Directory for LLVM %s' % llvm_name,
locks=[performance_lock.access('counting')],
haltOnFailure=True,
workdir=get_llvm_source_path(),
env=Property('env'),
command=get_llvm_latest_commit))
def add_halide_cmake_build_steps(factory, builder_type):
# Always do a clean build for Halide
source_dir = get_halide_source_path()
build_dir = get_halide_build_path()
install_dir = get_halide_install_path(builder_type)
factory.addStep(RemoveDirectory(name="Remove Halide Build Dir",
locks=[performance_lock.access('counting')],
dir=build_dir,
haltOnFailure=False))
factory.addStep(MakeDirectory(name="Make Halide Build Dir",
locks=[performance_lock.access('counting')],
dir=build_dir,
haltOnFailure=False))
factory.addStep(RemoveDirectory(name="Remove Halide Install Dir",
locks=[performance_lock.access('counting')],
dir=install_dir,
haltOnFailure=False))
factory.addStep(MakeDirectory(name="Make Halide Install Dir",
locks=[performance_lock.access('counting')],
dir=install_dir,
haltOnFailure=False))
factory.addStep(CMake(name='Configure Halide',
description='Configure Halide',
locks=[performance_lock.access('counting')],
haltOnFailure=True,
workdir=build_dir,
env=Property('env'),
path=source_dir,
generator=get_cmake_generator(builder_type),
definitions=get_halide_cmake_definitions(builder_type),
options=get_halide_cmake_options(builder_type, build_dir)))
factory.addStep(
ShellCommand(name='Build Halide',
description='Build Halide',
locks=[performance_lock.access('counting')],
haltOnFailure=True,
workdir=build_dir,
env=Property('env'),
command=get_cmake_build_command(builder_type, build_dir, targets=['all', 'install'])))
def add_halide_cmake_package_steps(factory, builder_type):
source_dir = get_halide_source_path()
target = builder_type.halide_target()
ext = 'zip' if builder_type.os == 'windows' else 'tar.gz'
factory.addStep(
SetPropertiesFromCMakeCache(
name='Get Halide package version',
workdir=get_halide_build_path(),
props=['CMAKE_PROJECT_VERSION']))
extra_env = dict(
Clang_DIR=get_llvm_install_path(builder_type, 'lib/cmake/clang'),
LLD_DIR=get_llvm_install_path(builder_type, 'lib/cmake/lld'),
LLVM_DIR=get_llvm_install_path(builder_type, 'lib/cmake/llvm'),
Halide_VERSION=Property('CMAKE_PROJECT_VERSION')
)
if builder_type.os == 'windows':
# TODO: on Windows, we can't use Ninja for packaging (as we do everywhere
# else in this cfg) due to a bug in CMake 3.18, so we must use MSBuild;
# that means we must use a different build directory entirely. To simplify the
# world, we make this a subdir of the real build dir (so it gets cleaned properly).
# https://github.com/halide/Halide/issues/5264
build_dir = get_halide_build_path("packaging_dir")
if builder_type.arch == 'arm':
arch = 'ARM' if builder_type.bits == 32 else 'ARM64'
else:
arch = 'Win32' if builder_type.bits == 32 else 'x64'
cmd = [get_halide_source_path('packaging/zip/package.bat'), source_dir, build_dir, arch]
else:
build_dir = get_halide_build_path()
cmd = [get_halide_source_path('packaging/tgz/package.sh'), source_dir, build_dir]
if builder_type.arch == 'arm' and builder_type.bits == 32 and builder_type.os == 'linux':
extra_env['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake', 'toolchain.linux-arm32.cmake')
factory.addStep(
ShellCommand(name='Package Halide',
description='Package Halide',
workdir=build_dir,
env=extend_property('env', **extra_env),
locks=[performance_lock.access('counting')],
haltOnFailure=True,
command=cmd))
factory.addStep(
FileUpload(name='Upload Halide package',
workersrc=Interpolate(f'Halide-%(prop:CMAKE_PROJECT_VERSION)s-{target}.{ext}'),
locks=[performance_lock.access('counting')],
haltOnFailure=True,
workdir=build_dir,
mode=0o644,
masterdest=get_distrib_name(Property('CMAKE_PROJECT_VERSION'), target, ext)))
def pkg_version_and_target(path: Path):
# Archives names are formatted like: Halide-[version]-[arch]-[commit].[ext]
# This grabs "Halide-[version]-[arch]".
match = re.match(r'^(.*)-[a-f0-9]+\.(tar\.gz|tgz|zip)', path.name)
return match.group(1) if match else None
factory.addStep(CleanOldFiles(
name='Clean old releases',
workdir=ARTIFACTS_DIR,
locks=[performance_lock.access('counting')],
groupfn=pkg_version_and_target))
# Figure out which "non-cpu" (GPU, DSP, etc) targets this builder can handle.
# Return (target, is_simulator)
def get_gpu_dsp_targets(builder_type):
if builder_type.sanitizer_preset() is not None:
return
if builder_type.has_nvidia():
yield 'host-cuda', False
yield 'host-opencl', False
# TODO: temporarily disabled because our only windows bot doesn't support it...
# if builder_type.os == 'windows':
# yield 'host-d3d12compute', False
# If we're running on a capable GPU, add all optional feature flags to the vulkan target
# which are required to get all the correctness tests to pass
if builder_type.handles_vulkan():
yield 'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13', False
if builder_type.handles_webgpu():
yield 'host-webgpu', False
if builder_type.os == 'osx':
yield 'host-metal', False
if builder_type.handles_hexagon():
# All the buildbots use a simulator for HVX, so performance tests
# won't be useful
yield 'host-hvx', True
# Return a dict with halide-targets as the keys, and a list of test-labels for each value.
def get_test_labels(builder_type):
targets = defaultdict(list)
preset = builder_type.sanitizer_preset()
# For the fuzz sanitizer, run only the fuzz tests
if preset and 'fuzz' in preset:
targets['host'].extend(['fuzz'])
return targets
targets['host'].extend(['internal', 'correctness', 'generator',
'autoschedulers_cpu', 'error', 'warning', 'apps', 'performance', 'tutorial'])
# For all other sanitizers (eg asan), don't bother with the gpu/etc tests.
if preset:
return targets
# TODO: some JIT+generator tests are failing on arm32; disable for now
# pending fixes (see https://github.com/halide/Halide/issues/4940)
if builder_type.arch == 'arm' and builder_type.bits == 32 and builder_type.os == 'linux':
targets['host'].remove('internal')
targets['host'].remove('generator')
if builder_type.handles_python():
targets['host'].extend(['python'])
# Test without SSE4.1 on all x86 systems
if builder_type.arch == 'x86':
t = 'x86-%d-%s' % (builder_type.bits, builder_type.os)
targets[t].extend(['correctness'])
# on x86-64, also test with SSE4.1 (but nothing else that 'host' might sniff)
if builder_type.bits == 64:
targets['%s-sse41' % t].extend(['correctness'])
# Test a subset of things on GPU/DSP targets, as appropriate
for t, is_simulator in get_gpu_dsp_targets(builder_type):
# TODO(https://github.com/halide/Halide/issues/7420): disable apps for host-gpu until the errors are resolved
if t == 'host-webgpu':
targets[t].extend(['correctness', 'generator'])
else:
targets[t].extend(['correctness', 'generator', 'apps'])
if 'cuda' in t:
targets[t].extend(['autoschedulers_cuda'])
if 'hvx' not in t:
targets[t].extend(['autoschedulers_gpu'])
# Don't do performance testing on simulators.
if not is_simulator:
targets[t].extend(['performance'])
# Handle this special case separately
if builder_type.has_nvidia():
targets['host-cuda-opencl'].extend(['correctness_multi_gpu'])
# If we're running on a capable GPU, add all optional feature flags to the vulkan target
# which are required to get all the correctness tests to pass
if builder_type.handles_vulkan():
targets['host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13'].extend(
['internal', 'correctness', 'generator', 'error', 'warning'])
if builder_type.handles_wasm():
if builder_type.handles_wasm_wabt():
# TODO: this is a horrid hack. For now, we want to test JIT with both WABT and V8.
# Add as a horrible wart on the target string.
targets['wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/wabt'].extend(
['internal', 'correctness', 'generator', 'error', 'warning'])
if builder_type.handles_wasm_v8():
# TODO: this is a horrid hack. For now, we want to test JIT with both WABT and V8.
# Add as a horrible wart on the target string.
targets['wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/v8'].extend(
['internal', 'correctness', 'generator', 'error', 'warning'])
# WABT (and thus WASM JIT) can't handle code build with wasm_threads yet,
# so only test Generator here
targets['wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int-wasm_threads'].extend(
['generator', 'apps'])
if builder_type.handles_webgpu():
# Most apps can't handle wasm builds yet.
targets['wasm-32-wasmrt-webgpu'].extend(['generator'])
return targets
def is_time_critical_test(test):
# Return true if the test label (or single-test name) is 'time critical' and must
# be run with an exclusive lock on the buildbot (typically, performance tests)
return test in ['performance', 'autoschedulers_cpu', 'autoschedulers_gpu', 'autoschedulers_cuda']
def short_target(halide_target):
s = halide_target.split('-')
if len(s) == 1:
return s[0]
elif len(s) == 2:
return '%s-%s' % (s[0], s[1])
elif len(s) == 3:
return '%s-%s-%s' % (s[0], s[1], s[2])
elif len(s) > 3:
return '%s-%s-%s…' % (s[0], s[1], s[2])
else:
return '<unknown>'
def add_halide_cmake_test_steps(factory, builder_type):
parallelism = Property('WORKER_BUILD_PARALLELISM')
labels = get_test_labels(builder_type)
source_dir = get_halide_source_path()
build_dir = get_halide_build_path()
install_dir = get_halide_install_path(builder_type) # NOQA
# Since we need to do at least a partial rebuild for each different target,
# we want to group things by target. Do host first, followed by a key-sorted
# order, to ensure predictability.
keys = list(labels.keys())
keys.remove('host')
keys.sort()
keys.insert(0, 'host')
for halide_target in keys:
# HL_TARGET is now ignored by CMake builds, no need to set
# (must specify -DHalide_TARGET to CMake instead)
# env['HL_TARGET'] = halide_target
env = extend_property('env', HL_JIT_TARGET=halide_target)
desc = 'T=%s' % short_target(halide_target)
# Do this *before* splitting the horrid wasm-specific target string
test_labels = labels[halide_target]
# wasm targets must ensure that the EMSDK (emcc, etc) are added to the
# active env.
wasm_jit = None
if halide_target.startswith("wasm-"):
halide_target, sep, wasm_jit = halide_target.partition('/')
# Re-set HL_JIT_TARGET with the de-warted target string
env = extend_property('env', HL_JIT_TARGET=halide_target)
if wasm_jit:
desc = '%s + T=%s' % (wasm_jit, short_target(halide_target))
if not wasm_jit:
wasm_jit = 'wabt'
factory.addStep(
CMake(name='Reconfigure for %s' % short_target(halide_target),
description='Reconfigure for %s' % desc,
locks=[performance_lock.access('counting')],
haltOnFailure=True,
env=env,
workdir=build_dir,
path=source_dir,
generator=get_cmake_generator(builder_type),
definitions=get_halide_cmake_definitions(
builder_type, halide_target=halide_target, wasm_jit=wasm_jit),
options=get_halide_cmake_options(builder_type, build_dir)))
factory.addStep(
ShellCommand(name='Rebuild for %s' % (short_target(halide_target)),
description='Rebuild Halide for %s' % desc,
locks=[performance_lock.access('counting')],
haltOnFailure=True,
workdir=build_dir,
env=env,
command=get_cmake_build_command(builder_type, build_dir, targets=['all', 'install'])))
do_apps = 'apps' in test_labels
if do_apps:
test_labels.remove('apps')
if not builder_type.handles_python():
if 'python' in test_labels:
test_labels.remove('python')
# TODO : some of the apps require python, so we must skip them for now also
do_apps = False
parallel_test_labels = [
test for test in test_labels if not is_time_critical_test(test)]
exclusive_test_labels = [test for test in test_labels if is_time_critical_test(test)]
if parallel_test_labels:
if len(parallel_test_labels) > 2:
test_set = ','.join([s[0] for s in parallel_test_labels])
else:
test_set = ', '.join(parallel_test_labels)
# Build up some special cases to exclude
exclude_tests = []
if builder_type.os == 'windows' or builder_type.os == 'linux':
# TODO: disable lens_blur on windows for now due to
# https://bugs.llvm.org/show_bug.cgi?id=46176
# and also due to Windows testbots having inadequate GPU RAM
# and also due to Linux testbots having inadequate GPU RAM
exclude_tests.append('interpolate')
exclude_tests.append('lens_blur')
exclude_tests.append('unsharp')
if builder_type.os == 'linux' or builder_type.bits == 32:
# TODO: disable tutorial_lesson_12_using_the_gpu (both C++ and python) on linux and 32-bit
exclude_tests.append('tutorial_lesson_12')
if builder_type.sanitizer == 'asan':
# lesson 19 can trigger memory leaks in some of the GPU device drivers,
# so just exclude it when doing ASAN
exclude_tests.append('tutorial_lesson_19')
if builder_type.arch == 'arm' or builder_type.bits == 32:
# TODO: disable lesson_19 on arm32
# https://github.com/halide/Halide/issues/5224
exclude_tests.append('tutorial_lesson_19')
factory.addStep(
CTest(name='Test %s %s' % (test_set, desc),
description='Test %s %s' % (test_set, desc),
locks=[performance_lock.access('counting')],
workdir=build_dir,
env=env,
timeout=3600,
labels=parallel_test_labels,
exclude_tests=exclude_tests,
jobs=parallelism,
**get_ctest_options(builder_type, build_dir)))
if exclusive_test_labels:
test_set = ','.join([s.replace('autoschedulers_', 'a_') for s in exclusive_test_labels])
factory.addStep(
CTest(name='Test %s %s' % (test_set, desc),
description='Test %s %s' % (test_set, desc),
locks=[performance_lock.access('exclusive')],
workdir=build_dir,
env=env,
timeout=3600,
labels=exclusive_test_labels,
**get_ctest_options(builder_type, build_dir)))
if do_apps:
apps_build_dir = get_halide_build_path("apps")
apps_source_dir = get_halide_source_path("apps")
# We currently don't attempt to build any of the apps with wasm
apps_cmake_defs = get_halide_cmake_definitions(builder_type, halide_target=halide_target)
apps_cmake_defs['CMAKE_PREFIX_PATH'] = get_halide_install_path(builder_type)
# apps/hannk is expensive to build, and doesn't (yet) build on all systems, so special-case it here
want_hannk = (builder_type.has_tflite() and not halide_target.startswith("wasm-"))
apps_cmake_defs['ENABLE_APPS_HANNK'] = 'ON' if want_hannk else 'OFF'
factory.addStep(
CMake(name='Configure apps for %s' % desc,
description='Configure apps for %s' % desc,
locks=[performance_lock.access('counting')],
haltOnFailure=True,
env=env,
workdir=apps_build_dir,
path=apps_source_dir,
generator=get_cmake_generator(builder_type),
definitions=apps_cmake_defs,
options=get_halide_cmake_options(builder_type, build_dir)))
factory.addStep(
ShellCommand(name='Build apps for %s' % desc,
description='Build apps for %s' % desc,
locks=[performance_lock.access('counting')],
haltOnFailure=True,
workdir=apps_build_dir,
env=env,
command=get_cmake_build_command(builder_type, apps_build_dir)))
# Note: do *not* run the apps/ tests in parallel; many of them expect
# to make full use of all cores, and running in parallel will just slow
# things down.
exclude_tests = []
if builder_type.os == 'windows':
# TODO: disable lens_blur_filter on windows for now due to
# https://github.com/halide/Halide/issues/5552
exclude_tests.append('lens_blur_filter')
factory.addStep(
CTest(name='Test apps for %s' % desc,
description='Test apps for %s' % desc,
locks=[performance_lock.access('exclusive')],
workdir=apps_build_dir,
env=env,
timeout=3600,
exclude_tests=exclude_tests,
exclude_labels=['slow_tests'],
**get_ctest_options(builder_type, apps_build_dir)))
def create_halide_make_factory(builder_type):
assert builder_type.os != 'windows'
make_threads = Property('WORKER_BUILD_PARALLELISM')
build_dir = get_halide_build_path()
factory = BuildFactory()
# We never enable sanitizers for Make builds here (only for CMake)
add_env_setup_step(factory, builder_type, enable_ccache=True)
# It's never necessary to use get_msvc_config_steps() for Make,
# since we never use Make with MSVC
add_get_halide_source_steps(factory, builder_type)
# Force a full rebuild of Halide every time
factory.addStep(RemoveDirectory(name="Remove Halide Build Dir",
locks=[performance_lock.access('counting')],
dir=build_dir))
target_label_pairs = [('host', 'build_tests')]
for halide_target, labels_for_target in get_test_labels(builder_type).items():
# For Make we skip every target that isn't plain 'host'
if halide_target != 'host':
continue
_labels_to_skip = [
# auto_schedule and performance requires exclusive machine access and isn't worth it for Make
"autoschedulers_cpu",
"autoschedulers_gpu",
"autoschedulers_cuda",
"performance",
# Make no longer provides support for building the Python bindings,
# regardless of builder_type.handles_python()
"python",
]
if builder_type.bits == 32:
# Don't test autoschedulers on 32-bit systems via Make;
# it's not set up 100% correctly for crosscompilation there
# and the CMake-based coverage is fine.
_labels_to_skip.extend(['autoschedulers_cpu', 'autoschedulers_gpu', 'autoschedulers_cuda'])
for label in labels_for_target:
if label in _labels_to_skip:
continue
target_label_pairs.append((halide_target, label))
for halide_target, label in target_label_pairs:
env = extend_property('env',
LLVM_CONFIG=get_llvm_install_path(builder_type, 'bin/llvm-config'),
HL_TARGET=halide_target,
HL_JIT_TARGET=halide_target)
if is_time_critical_test(label):
p = 1
lock_mode = 'exclusive'
else:
p = make_threads
lock_mode = 'counting'
if label != 'build_tests':
label = 'test_%s' % label
factory.addStep(ShellCommand(name='make ' + label,
description=label + ' ' + halide_target,
locks=[performance_lock.access(lock_mode)],
workdir=build_dir,
env=env,
haltOnFailure=False,
command=['make',
'-f', get_halide_source_path('Makefile'),
'-j', p,
label],
timeout=3600))
return factory
def create_halide_cmake_factory(builder_type):
factory = BuildFactory()
add_env_setup_step(factory, builder_type)
add_get_halide_source_steps(factory, builder_type)
add_halide_cmake_build_steps(factory, builder_type)
add_halide_cmake_test_steps(factory, builder_type)
# If everything else looks ok, build a distrib.
if builder_type.purpose == Purpose.halide_nightly:
add_halide_cmake_package_steps(factory, builder_type)
return factory
def create_halide_factory(builder_type):
if builder_type.buildsystem == BuildSystem.cmake:
return create_halide_cmake_factory(builder_type)
else:
return create_halide_make_factory(builder_type)
def get_interesting_halide_targets():
for arch in ['arm', 'x86']:
for bits in [32, 64]:
for os in ['linux', 'osx', 'windows']:
if arch == 'arm' and os == 'windows':
# No buildbots for windows-on-arm (yet)
continue
if os == 'osx' and bits != 64:
# osx is 64-bit only, period
continue
yield arch, bits, os
def create_halide_builder(arch, bits, os, halide_branch, llvm_branch, purpose, buildsystem=BuildSystem.cmake):
# Always do a build with no sanitizers
sanitizers = [None]
# Also build with sanitizers (but not if we are doing nightlies)
if purpose != Purpose.halide_nightly:
sanitizers.extend(_SANITIZERS)
for san in sanitizers:
builder_type = BuilderType(arch, bits, os, halide_branch, llvm_branch, purpose, san, buildsystem)
if san and purpose == Purpose.llvm_nightly:
continue
if san and not builder_type.handles_sanitizers():
continue
workers = builder_type.get_worker_names()
builder = BuilderConfig(name=builder_type.builder_label(),
workernames=workers,
factory=create_halide_factory(builder_type),
collapseRequests=True,
# We need counting access to our llvm branch during Halide builds.
# (We could probably get by with access during only a subset of
# our steps, but there doesn't appear to be a way to group
# lock requests across multiple-but-not-all-steps in a Build.)
locks=[llvm_build_locks[llvm_branch + str(bits)].access('counting')],
tags=builder_type.builder_tags())
builder.builder_type = builder_type
yield builder
def create_halide_builders():
for arch, bits, os in get_interesting_halide_targets():
# Create builders for build + package of Halide master + release branches
# (but only against their 'native' LLVM versions)
for halide_branch in HALIDE_NIGHTLIES:
for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:
yield from create_halide_builder(arch, bits, os, halide_branch, llvm_branch, Purpose.halide_nightly)
# Create the builders for testing pull requests to releases.
for halide_branch in _HALIDE_RELEASES:
for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:
yield from create_halide_builder(arch, bits, os, halide_branch, llvm_branch, Purpose.halide_testbranch)
# Create the builders for testing pull requests to main.
yield from create_halide_builder(arch, bits, os, HALIDE_MAIN, LLVM_MAIN, Purpose.halide_testbranch)
# Also test Makefiles on x86-linux & osx (but only on Halide main) to ensure they
# stay healthy. (Note: deliberately skip arm-linux, since they are the slowest bots.)
yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN, LLVM_MAIN,
Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('x86', 32, 'linux', HALIDE_MAIN, LLVM_MAIN,
Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('x86', 64, 'osx', HALIDE_MAIN, LLVM_MAIN,
Purpose.halide_testbranch, BuildSystem.make)
yield from create_halide_builder('arm', 64, 'osx', HALIDE_MAIN, LLVM_MAIN,
Purpose.halide_testbranch, BuildSystem.make)
# Test pull requests for Halide master against the current and previous LLVM, for at least one target.
for llvm_branch in LLVM_BRANCHES:
if abs(LLVM_BRANCHES[llvm_branch].version.major - LLVM_BRANCHES[LLVM_MAIN].version.major) in [1, 2]:
yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN, llvm_branch, Purpose.halide_testbranch)
def create_halide_scheduler(halide_branch):
def is_halide_base_branch(br):
return any(br == hl.ref for hl in HALIDE_BRANCHES.values())
def is_halide_pr_branch(br):
# If it's not one of the well-known branches, assume it's a pull request
return not is_halide_base_branch(br)
def github_base_branch_matches(change):
ref = change.properties.getProperty('basename')
return ref == HALIDE_BRANCHES[halide_branch].ref
# ----- nightlies
builders = [b for b in c['builders']
if b.builder_type.halide_branch == halide_branch and b.builder_type.purpose == Purpose.halide_nightly]
if builders:
builder_names = [str(b.name) for b in builders]
# Start the Halide nightlies at 9PM Pacific; our buildbot master uses UTC for
# cron, so that's 0400. Note that this is (deliberately) well before
# the LLVM nightlies get built (currently 11pm start); the idea is
# that Halide nightlies get built using the previous day's LLVM
# nightlies, on the assumption that those are more likely to get at
# least some test coverage (via testbranch) to minimize breakage.
yield Nightly(
name='halide-package-' + halide_branch,
codebases=['halide'],
builderNames=builder_names,
change_filter=ChangeFilter(codebase='halide'),
hour=4,
minute=0)
yield ForceScheduler(
name='force-halide-nightly-' + halide_branch,
builderNames=builder_names,
codebases=['halide'])
# ----- testbranch
builders = [b for b in c['builders']
if b.builder_type.halide_branch == halide_branch
and b.builder_type.purpose == Purpose.halide_testbranch]
if builders:
# NOT SingleBranchScheduler, because this can process changes from many branches (all PRs)
builder_names = [str(b.name) for b in builders]
yield AnyBranchScheduler(
name='halide-testbranch-' + halide_branch,
codebases=['halide'],
change_filter=ChangeFilter(category='pull', codebase='halide',
branch_fn=is_halide_pr_branch,
filter_fn=github_base_branch_matches),
treeStableTimer=60 * 5, # seconds
builderNames=builder_names)
yield ForceScheduler(
name='force-halide-testbranch-' + halide_branch,
builderNames=builder_names,
codebases=['halide'])
def create_llvm_cmake_factory(builder_type):
factory = BuildFactory()
add_env_setup_step(factory, builder_type)
add_get_llvm_source_steps(factory, builder_type)
clean_llvm_rebuild = (builder_type.llvm_branch == LLVM_MAIN)
add_llvm_steps(factory, builder_type, clean_llvm_rebuild)
return factory
def create_llvm_builders():
for arch, bits, os in get_interesting_halide_targets():
# Note that we want these Builders to run on *every* eligible worker;
# the goal is to ensure that all LLVM builds are updated locally
# on all of the workers.
for llvm_branch in LLVM_BRANCHES:
builder_type = BuilderType(arch, bits, os, None, llvm_branch, Purpose.llvm_nightly)
for w in builder_type.get_worker_names():
# Note that we need the builder name to be unique across workers,
# but we want the builddir on the *worker* side to be the same for all workers
# (to simplify things).
label = builder_type.llvm_builder_label()
builder = BuilderConfig(name="%s/%s" % (label, w),
workerbuilddir=label,
workernames=[w],
factory=create_llvm_cmake_factory(builder_type),
collapseRequests=True,
# We want exclusive access to this workerlock
# thru all this Builder's steps. (We could probably
# get by with holding it just during the install phase,
# but we'd have to finesse some details like removing
# the old install directory within the lock, and this
# is much simpler.)
locks=[llvm_build_locks[llvm_branch + str(bits)].access('exclusive')],
tags=builder_type.builder_tags())
builder.builder_type = builder_type
yield builder
def create_llvm_scheduler(llvm_branch):
builders = [str(b.name) for b in c['builders']
if b.builder_type.llvm_branch == llvm_branch and b.builder_type.purpose == Purpose.llvm_nightly]
# Start every day at 11PM Pacific; our buildbot use UTC for cron, so that's 0600
yield Nightly(
name=f'llvm-nightly-{LLVM_BRANCHES[llvm_branch].version.major}',
codebases=['llvm'],
builderNames=builders,
change_filter=ChangeFilter(codebase='llvm'),
hour=6,
minute=0)
for b in builders:
yield ForceScheduler(
name='force-llvm-nightly-' + b.replace('/', '_'),
codebases=['llvm'],
builderNames=[b])
def create_builders():
yield from create_llvm_builders()
yield from create_halide_builders()
def create_schedulers():
for llvm_branch in LLVM_BRANCHES:
yield from create_llvm_scheduler(llvm_branch)
for halide_branch in HALIDE_BRANCHES:
yield from create_halide_scheduler(halide_branch)
c['builders'] = list(create_builders())
c['schedulers'] = list(create_schedulers())
# Set the builder priorities
def prioritize_builders(buildmaster, builders):
def importance(builder):
builder_type = builder.config.builder_type
assert builder_type
# LLVM nightlies run only once a day (late at night) and should always
# get priority over everything else.
if builder_type.purpose == Purpose.llvm_nightly:
return 0
# Branch testers all need to come back before we can merge a PR,
# so they all have equal next-highest priority.
if builder_type.purpose == Purpose.halide_testbranch:
return 1
# non-branch testers are mostly used for bisecting failures that
# didn't show up in the branch testers and doing binary
# releases. We care most about the most recently-released llvm so
# that we have a full set of builds for releases, then llvm main
# for bisection, then older llvm versions.
if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_15]:
return 2
if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_16]:
return 2
if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_MAIN]:
return 3
return 4
return list(sorted(builders, key=importance))
c['prioritizeBuilders'] = prioritize_builders
# GitHub pull request filter
class SafeGitHubEventHandler(GitHubEventHandler):
def handle_push(self, payload, event):
ref = payload['ref']
if re.match(r"^refs/(heads|tags)/(master|main|release/\d+\.x)$", ref):
return super().handle_push(payload, event)
else:
print(f'SafeGitHubEventHandler: ignoring push event for ref: {ref}')
return self.skip()
def handle_pull_request(self, payload, event):
pr = payload['pull_request']
try:
# Skip anything with the 'skip_buildbots' label
if any(label['name'] == 'skip_buildbots' for label in pr['labels']):
# print("PR %s was skipped due to skip_buildbots" % str(pr['html_url']))
return self.skip()
# Test anything (even external) that has 'halidebuildbots' as a reviewer.
if any(r['login'] == 'halidebuildbots' for r in pr['requested_reviewers']):
# print("PR %s was handled due halidebuildbots" % str(pr['html_url']))
if payload['action'] == 'review_requested':
# Pretend it's a synchronize event instead since private buildbot code
# rejects review_requested for no apparent reason.
payload['action'] = 'synchronize'
return super().handle_pull_request(payload, event)
# Skip external pull requests that originate from untrusted forks
trusted_repos = (
'halide/Halide', # the primary repository is obviously trusted
'CodeLinaro/Halide', # the Qualcomm open-source staging fork is trustworthy
)
if pr['head']['repo']['full_name'] not in trusted_repos:
# print("PR %s was skipped due to being external:" % str(pr['head']['repo']['full_name']))
return self.skip()
# print("PR %s is being handled normally" % str(pr['html_url']))
return super().handle_pull_request(payload, event)
except KeyError as e:
print(f'SafeGitHubEventHandler: malformed payload: {payload}')
print(f'SafeGitHubEventHandler: missing key "{e}"')
return self.skip()
@staticmethod
def skip():
return [], 'git'
# WEB SERVER
# 'protocols' contains information about protocols which master will use for communicating with workers.
# You must define at least 'port' option that workers could connect to your master with this protocol.
# 'port' must match the value configured into the buildworkers (with their --master option)
c['protocols'] = {'pb': {'port': 9990}}
authz = Authz(
allowRules=[ems.ForceBuildEndpointMatcher(role="admins"),
ems.StopBuildEndpointMatcher(role="admins"),
ems.RebuildBuildEndpointMatcher(role="admins"),
ems.EnableSchedulerEndpointMatcher(role="admins")],
roleMatchers=[RolesFromUsername(roles=["admins"], usernames=["halidenightly"])])
c['www'] = dict(
auth=UserPasswordAuth({'halidenightly': WWW_PASSWORD}),
authz=authz,
port=8012,
change_hook_dialects={
'github': {
'secret': WEBHOOK_SECRET,
'codebase': 'halide',
'skips': [],
'class': SafeGitHubEventHandler,
# 'github_property_whitelist': ['github.base.ref'],
},
},
)
# PROJECT IDENTITY
# the 'title' string will appear at the top of this buildbot
# installation's html.WebStatus home page (linked to the
# 'titleURL') and is embedded in the title of the waterfall HTML page.
c['title'] = 'Halide'
c['titleURL'] = 'http://halide-lang.org'
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server (usually the html.WebStatus page) is visible. This
# typically uses the port number set in the Waterfall 'status' entry, but
# with an externally-visible host name which the buildbot cannot figure out
# without some help.
c['buildbotURL'] = 'https://buildbot.halide-lang.org/master/'
# DB URL
c['db'] = {
# This specifies what database buildbot uses to store its state. You can leave
# this at its default for all but the largest installations.
'db_url': 'sqlite:///state.sqlite',
}
# GitHub Integration
# Only testbranch builders need to be considered here
builders = [str(b.name) for b in c['builders'] if b.builder_type.purpose != Purpose.llvm_nightly]
generator = BuildStartEndStatusGenerator(builders=builders,
start_formatter=MessageFormatterRenderable('Build started.'),
end_formatter=MessageFormatterRenderable('Build done.'))
gs = GitHubStatusPush(token=GITHUB_TOKEN,
context=Interpolate("buildbot/%(prop:buildername)s"),
generators=[generator],
verbose=True)
c['services'] = [gs]
# Disable sending usage data
c['buildbotNetUsageData'] = None
|
flexible
|
{
"blob_id": "4abcca52095a169b71d2527ce52b8367534c42a4",
"index": 5989,
"step-1": "<mask token>\n\n\nclass Purpose(Enum):\n halide_nightly = 1\n halide_testbranch = 2\n llvm_nightly = 3\n\n\nclass BuildSystem(Enum):\n make = 0\n cmake = 1\n\n\nclass BuilderType:\n \"\"\"A class to encapsulate the settings for a specific Builder.\n (Do not confuse with CMake's 'BUILD_TYPE', which is something else.)\n\n It includes:\n - Halide 'target' in the form of arch-bits-os\n - LLVM branch to be used\n - CMake vs Make\n - halide-nightly vs halide-testbranch vs llvm-nightly\n - sanitizers vs none\n\n It doesn't currently include any 'features' because we don't currently\n bake any in at build time.\n\n It doesn't currently include the C++ compiler used (eg gcc7 vs gcc8 vs clang),\n mainly because we currently never test with multiple compilers for a given\n setup. (If we ever need to do so, compiler should be added to this.)\n \"\"\"\n\n def __init__(self, arch, bits, os, halide_branch, llvm_branch, purpose,\n sanitizer=None, buildsystem=BuildSystem.cmake):\n assert arch in ['arm', 'x86']\n assert bits in [32, 64]\n assert os in ['linux', 'windows', 'osx']\n assert llvm_branch in LLVM_BRANCHES, f'{llvm_branch} not recognized'\n self.arch = arch\n self.bits = bits\n self.os = os\n self.halide_branch = halide_branch\n self.llvm_branch = llvm_branch\n self.buildsystem = buildsystem\n self.purpose = purpose\n self.sanitizer = sanitizer\n if self.halide_branch:\n assert self.purpose != Purpose.llvm_nightly\n assert self.halide_branch in HALIDE_BRANCHES, f'unknown branch {self.halide_branch}'\n assert self.purpose == Purpose.halide_testbranch or self.llvm_branch in LLVM_FOR_HALIDE[\n self.halide_branch]\n else:\n assert self.purpose == Purpose.llvm_nightly\n if self.sanitizer:\n assert self.sanitizer in _SANITIZERS\n\n def handles_python(self):\n if self.bits == 32:\n return False\n if self.arch == 'arm' and self.os == 'linux':\n return False\n if self.sanitizer_preset() is not None:\n return False\n return True\n\n def handles_sanitizers(self):\n if self.buildsystem != BuildSystem.cmake:\n return False\n return (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux' and self.llvm_branch == LLVM_MAIN)\n\n def sanitizer_preset(self):\n if self.handles_sanitizers():\n if self.sanitizer == 'asan':\n return 'linux-x64-asan'\n if self.sanitizer == 'fuzzer':\n return 'linux-x64-fuzzer'\n return None\n\n def handles_riscv(self):\n return self.llvm_branch not in [LLVM_RELEASE_15]\n\n def handles_hexagon(self):\n return (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux' and self.llvm_branch == LLVM_MAIN)\n\n def handles_wasm(self):\n is_linux_x64 = (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux')\n return self.llvm_branch == LLVM_MAIN and (is_linux_x64 or self.os ==\n 'osx')\n\n def handles_wasm_wabt(self):\n return self.handles_wasm()\n\n def handles_wasm_v8(self):\n return self.handles_wasm() and self.os == 'linux'\n\n def has_nvidia(self):\n return self.arch == 'x86' and self.bits == 64 and self.os in ['windows'\n , 'linux']\n\n def handles_vulkan(self):\n return False\n\n def handles_webgpu(self):\n return self.os == 'osx' and self.halide_branch not in [\n HALIDE_RELEASE_15]\n\n def has_tflite(self):\n if self.arch == 'x86' and self.bits == 64 and self.os == 'linux':\n return True\n if self.arch == 'arm' and self.bits == 64 and self.os == 'osx':\n return True\n return False\n\n def has_ccache(self):\n return self.os in ['osx', 'linux']\n\n def halide_target(self):\n return '%s-%d-%s' % (self.arch, self.bits, self.os)\n\n def llvm_builder_label(self):\n return 'llvm-%s-%s' % (LLVM_BRANCHES[self.llvm_branch].version.\n major, self.halide_target())\n\n def halide_builder_label(self):\n a = ['halide']\n if self.sanitizer:\n a.append(self.sanitizer)\n if self.purpose == Purpose.halide_testbranch:\n a.append('testbranch')\n elif self.purpose == Purpose.halide_nightly:\n a.append('nightly')\n a.append(self.halide_branch)\n if self.halide_branch == HALIDE_MAIN:\n a.append(f'llvm{LLVM_BRANCHES[self.llvm_branch].version.major}')\n a.append(self.halide_target())\n a.append(self.buildsystem.name)\n return '-'.join(a)\n\n def builder_label(self):\n if self.purpose == Purpose.llvm_nightly:\n return self.llvm_builder_label()\n else:\n return self.halide_builder_label()\n\n def builder_tags(self):\n return self.builder_label().split('-')\n\n def get_worker_names(self):\n return [n for n, cfg in _WORKERS if self.arch == cfg.arch and self.\n bits in cfg.bits and self.os == cfg.os]\n\n def __str__(self):\n return self.halide_target()\n\n\ndef get_builddir_subpath(subpath):\n return Transform(lambda x: x.replace('\\\\', '/'), Interpolate(\n f'%(prop:builddir)s/{subpath}'))\n\n\n<mask token>\n\n\ndef get_llvm_source_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-project', *subpaths))\n\n\n<mask token>\n\n\ndef get_halide_source_path(*subpaths):\n return get_builddir_subpath(os.path.join('halide-source', *subpaths))\n\n\n<mask token>\n\n\ndef get_halide_install_path(builder_type, *subpaths):\n s = 'halide-install'\n if builder_type.sanitizer:\n s += '-' + builder_type.sanitizer\n return get_builddir_subpath(os.path.join(s, *subpaths))\n\n\n<mask token>\n\n\ndef merge_renderable(_base, _extn):\n\n @renderer\n @defer.inlineCallbacks\n def render(props):\n base = yield props.render(_base)\n extn = yield props.render(_extn)\n base.update(extn)\n return base\n return render\n\n\n<mask token>\n\n\ndef get_cmake_generator(builder_type):\n return 'Ninja'\n\n\n<mask token>\n\n\ndef get_halide_cmake_definitions(builder_type, halide_target='host',\n wasm_jit='wabt'):\n cmake_definitions = {'Clang_DIR': get_llvm_install_path(builder_type,\n 'lib/cmake/clang'), 'CMAKE_INSTALL_PREFIX': get_halide_install_path\n (builder_type), 'Halide_TARGET': halide_target, 'LLD_DIR':\n get_llvm_install_path(builder_type, 'lib/cmake/lld'), 'LLVM_DIR':\n get_llvm_install_path(builder_type, 'lib/cmake/llvm'), 'LLVM_ROOT':\n get_llvm_install_path(builder_type), 'WITH_PYTHON_BINDINGS': 'ON' if\n builder_type.handles_python() else 'OFF', 'WITH_TEST_FUZZ': 'ON' if\n builder_type.sanitizer == 'fuzzer' else 'OFF'}\n if builder_type.sanitizer and builder_type.handles_sanitizers():\n pass\n else:\n cmake_definitions['CMAKE_BUILD_TYPE'] = 'Release'\n if builder_type.has_ccache() and not builder_type.sanitizer_preset():\n cmake_definitions['Halide_CCACHE_BUILD'] = 'ON'\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path(\n 'cmake', 'toolchain.linux-arm32.cmake')\n if builder_type.os == 'windows':\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(\n '%(prop:VCPKG_ROOT)s/scripts/buildsystems/vcpkg.cmake')\n cmake_definitions['pybind11_DIR'] = Interpolate(\n '%(prop:VIRTUAL_ENV)s/share/cmake/pybind11')\n if 'wasm' in halide_target:\n cmake_definitions['WITH_PYTHON_BINDINGS'] = 'OFF'\n if builder_type.handles_wasm() and halide_target.startswith('wasm-'):\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(\n '%(prop:EMSDK)s/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake'\n )\n cmake_definitions['NODE_JS_EXECUTABLE'] = Property(\n 'HALIDE_NODE_JS_PATH')\n if wasm_jit == 'v8':\n cmake_definitions['WITH_WABT'] = 'OFF'\n cmake_definitions['WITH_V8'] = 'ON'\n cmake_definitions['V8_INCLUDE_PATH'\n ] = '/home/halidenightly/v8/v8/include'\n cmake_definitions['V8_LIB_PATH'] = (\n '/home/halidenightly/v8/v8/out/x64.release.static/obj/libv8_monolith.a'\n )\n elif wasm_jit == 'wabt':\n cmake_definitions['WITH_WABT'] = 'ON'\n cmake_definitions['WITH_V8'] = 'OFF'\n cmake_definitions['V8_INCLUDE_PATH'] = ''\n cmake_definitions['V8_LIB_PATH'] = ''\n else:\n assert False, 'Unknown wasm jit ' + str(wasm_jit)\n if builder_type.handles_webgpu() and 'webgpu' in halide_target:\n cmake_definitions['WEBGPU_NODE_BINDINGS'] = Property(\n 'HL_WEBGPU_NODE_BINDINGS')\n cmake_definitions['WEBGPU_NATIVE_LIB'] = Property(\n 'HL_WEBGPU_NATIVE_LIB')\n if builder_type.handles_hexagon() and 'hvx' in halide_target:\n cmake_definitions['Halide_BUILD_HEXAGON_REMOTE_RUNTIME'] = 'ON'\n return cmake_definitions\n\n\n<mask token>\n\n\ndef get_llvm_cmake_definitions(builder_type):\n definitions = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_INSTALL_PREFIX':\n get_llvm_install_path(builder_type), 'LLVM_BUILD_32_BITS': 'ON' if \n builder_type.bits == 32 else 'OFF', 'LLVM_ENABLE_ASSERTIONS': 'ON',\n 'LLVM_ENABLE_BINDINGS': 'OFF', 'LLVM_ENABLE_CURL': 'OFF',\n 'LLVM_ENABLE_DIA_SDK': 'OFF', 'LLVM_ENABLE_HTTPLIB': 'OFF',\n 'LLVM_ENABLE_IDE': 'OFF', 'LLVM_ENABLE_LIBXML2': 'OFF',\n 'LLVM_ENABLE_OCAMLDOC': 'OFF', 'LLVM_ENABLE_RTTI': 'ON',\n 'LLVM_ENABLE_TERMINFO': 'OFF', 'LLVM_ENABLE_WARNINGS': 'OFF',\n 'LLVM_ENABLE_ZLIB': 'ON', 'LLVM_ENABLE_ZSTD': 'OFF',\n 'LLVM_INCLUDE_BENCHMARKS': 'OFF', 'LLVM_INCLUDE_EXAMPLES': 'OFF',\n 'LLVM_INCLUDE_TESTS': 'OFF', 'LLVM_TARGETS_TO_BUILD':\n 'X86;ARM;NVPTX;AArch64;Hexagon;PowerPC;WebAssembly'}\n if builder_type.bits == 32:\n definitions['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = 'NEVER'\n if builder_type.handles_riscv():\n definitions['LLVM_TARGETS_TO_BUILD'] += ';RISCV'\n if builder_type.handles_sanitizers():\n definitions['LLVM_ENABLE_RUNTIMES'\n ] = 'compiler-rt;libcxx;libcxxabi;libunwind'\n definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld;clang-tools-extra'\n else:\n definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld'\n if builder_type.os != 'windows':\n definitions['CMAKE_CXX_FLAGS'] = '-Wno-psabi'\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n definitions['CMAKE_TOOLCHAIN_FILE'] = get_llvm_toolchains_path(\n 'toolchain.linux-arm32.cmake')\n definitions['LLVM_TARGET_ARCH'] = 'ARM'\n definitions['LLVM_DEFAULT_TARGET_TRIPLE'] = 'arm-linux-gnueabihf'\n if (builder_type.arch == 'x86' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n definitions['CMAKE_FIND_ROOT_PATH'] = '/usr/lib/i386-linux-gnu'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'\n if builder_type.os == 'osx':\n definitions['LLVM_ENABLE_SUPPORT_XCODE_SIGNPOSTS'] = 'FORCE_OFF'\n if builder_type.has_ccache():\n definitions['LLVM_CCACHE_BUILD'] = 'ON'\n return definitions\n\n\n<mask token>\n\n\ndef add_halide_cmake_build_steps(factory, builder_type):\n source_dir = get_halide_source_path()\n build_dir = get_halide_build_path()\n install_dir = get_halide_install_path(builder_type)\n factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[\n performance_lock.access('counting')], dir=build_dir, haltOnFailure=\n False))\n factory.addStep(MakeDirectory(name='Make Halide Build Dir', locks=[\n performance_lock.access('counting')], dir=build_dir, haltOnFailure=\n False))\n factory.addStep(RemoveDirectory(name='Remove Halide Install Dir', locks\n =[performance_lock.access('counting')], dir=install_dir,\n haltOnFailure=False))\n factory.addStep(MakeDirectory(name='Make Halide Install Dir', locks=[\n performance_lock.access('counting')], dir=install_dir,\n haltOnFailure=False))\n factory.addStep(CMake(name='Configure Halide', description=\n 'Configure Halide', locks=[performance_lock.access('counting')],\n haltOnFailure=True, workdir=build_dir, env=Property('env'), path=\n source_dir, generator=get_cmake_generator(builder_type),\n definitions=get_halide_cmake_definitions(builder_type), options=\n get_halide_cmake_options(builder_type, build_dir)))\n factory.addStep(ShellCommand(name='Build Halide', description=\n 'Build Halide', locks=[performance_lock.access('counting')],\n haltOnFailure=True, workdir=build_dir, env=Property('env'), command\n =get_cmake_build_command(builder_type, build_dir, targets=['all',\n 'install'])))\n\n\ndef add_halide_cmake_package_steps(factory, builder_type):\n source_dir = get_halide_source_path()\n target = builder_type.halide_target()\n ext = 'zip' if builder_type.os == 'windows' else 'tar.gz'\n factory.addStep(SetPropertiesFromCMakeCache(name=\n 'Get Halide package version', workdir=get_halide_build_path(),\n props=['CMAKE_PROJECT_VERSION']))\n extra_env = dict(Clang_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/clang'), LLD_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/lld'), LLVM_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/llvm'), Halide_VERSION=Property('CMAKE_PROJECT_VERSION'))\n if builder_type.os == 'windows':\n build_dir = get_halide_build_path('packaging_dir')\n if builder_type.arch == 'arm':\n arch = 'ARM' if builder_type.bits == 32 else 'ARM64'\n else:\n arch = 'Win32' if builder_type.bits == 32 else 'x64'\n cmd = [get_halide_source_path('packaging/zip/package.bat'),\n source_dir, build_dir, arch]\n else:\n build_dir = get_halide_build_path()\n cmd = [get_halide_source_path('packaging/tgz/package.sh'),\n source_dir, build_dir]\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n extra_env['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake',\n 'toolchain.linux-arm32.cmake')\n factory.addStep(ShellCommand(name='Package Halide', description=\n 'Package Halide', workdir=build_dir, env=extend_property('env', **\n extra_env), locks=[performance_lock.access('counting')],\n haltOnFailure=True, command=cmd))\n factory.addStep(FileUpload(name='Upload Halide package', workersrc=\n Interpolate(f'Halide-%(prop:CMAKE_PROJECT_VERSION)s-{target}.{ext}'\n ), locks=[performance_lock.access('counting')], haltOnFailure=True,\n workdir=build_dir, mode=420, masterdest=get_distrib_name(Property(\n 'CMAKE_PROJECT_VERSION'), target, ext)))\n\n def pkg_version_and_target(path: Path):\n match = re.match('^(.*)-[a-f0-9]+\\\\.(tar\\\\.gz|tgz|zip)', path.name)\n return match.group(1) if match else None\n factory.addStep(CleanOldFiles(name='Clean old releases', workdir=\n ARTIFACTS_DIR, locks=[performance_lock.access('counting')], groupfn\n =pkg_version_and_target))\n\n\n<mask token>\n\n\ndef get_test_labels(builder_type):\n targets = defaultdict(list)\n preset = builder_type.sanitizer_preset()\n if preset and 'fuzz' in preset:\n targets['host'].extend(['fuzz'])\n return targets\n targets['host'].extend(['internal', 'correctness', 'generator',\n 'autoschedulers_cpu', 'error', 'warning', 'apps', 'performance',\n 'tutorial'])\n if preset:\n return targets\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n targets['host'].remove('internal')\n targets['host'].remove('generator')\n if builder_type.handles_python():\n targets['host'].extend(['python'])\n if builder_type.arch == 'x86':\n t = 'x86-%d-%s' % (builder_type.bits, builder_type.os)\n targets[t].extend(['correctness'])\n if builder_type.bits == 64:\n targets['%s-sse41' % t].extend(['correctness'])\n for t, is_simulator in get_gpu_dsp_targets(builder_type):\n if t == 'host-webgpu':\n targets[t].extend(['correctness', 'generator'])\n else:\n targets[t].extend(['correctness', 'generator', 'apps'])\n if 'cuda' in t:\n targets[t].extend(['autoschedulers_cuda'])\n if 'hvx' not in t:\n targets[t].extend(['autoschedulers_gpu'])\n if not is_simulator:\n targets[t].extend(['performance'])\n if builder_type.has_nvidia():\n targets['host-cuda-opencl'].extend(['correctness_multi_gpu'])\n if builder_type.handles_vulkan():\n targets[\n 'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n if builder_type.handles_wasm():\n if builder_type.handles_wasm_wabt():\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/wabt'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n if builder_type.handles_wasm_v8():\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/v8'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int-wasm_threads'\n ].extend(['generator', 'apps'])\n if builder_type.handles_webgpu():\n targets['wasm-32-wasmrt-webgpu'].extend(['generator'])\n return targets\n\n\n<mask token>\n\n\ndef short_target(halide_target):\n s = halide_target.split('-')\n if len(s) == 1:\n return s[0]\n elif len(s) == 2:\n return '%s-%s' % (s[0], s[1])\n elif len(s) == 3:\n return '%s-%s-%s' % (s[0], s[1], s[2])\n elif len(s) > 3:\n return '%s-%s-%s…' % (s[0], s[1], s[2])\n else:\n return '<unknown>'\n\n\n<mask token>\n\n\ndef create_halide_cmake_factory(builder_type):\n factory = BuildFactory()\n add_env_setup_step(factory, builder_type)\n add_get_halide_source_steps(factory, builder_type)\n add_halide_cmake_build_steps(factory, builder_type)\n add_halide_cmake_test_steps(factory, builder_type)\n if builder_type.purpose == Purpose.halide_nightly:\n add_halide_cmake_package_steps(factory, builder_type)\n return factory\n\n\n<mask token>\n\n\ndef create_halide_builders():\n for arch, bits, os in get_interesting_halide_targets():\n for halide_branch in HALIDE_NIGHTLIES:\n for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:\n yield from create_halide_builder(arch, bits, os,\n halide_branch, llvm_branch, Purpose.halide_nightly)\n for halide_branch in _HALIDE_RELEASES:\n for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:\n yield from create_halide_builder(arch, bits, os,\n halide_branch, llvm_branch, Purpose.halide_testbranch)\n yield from create_halide_builder(arch, bits, os, HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch)\n yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('x86', 32, 'linux', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('x86', 64, 'osx', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('arm', 64, 'osx', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n for llvm_branch in LLVM_BRANCHES:\n if abs(LLVM_BRANCHES[llvm_branch].version.major - LLVM_BRANCHES[\n LLVM_MAIN].version.major) in [1, 2]:\n yield from create_halide_builder('x86', 64, 'linux',\n HALIDE_MAIN, llvm_branch, Purpose.halide_testbranch)\n\n\n<mask token>\n\n\nclass SafeGitHubEventHandler(GitHubEventHandler):\n\n def handle_push(self, payload, event):\n ref = payload['ref']\n if re.match('^refs/(heads|tags)/(master|main|release/\\\\d+\\\\.x)$', ref):\n return super().handle_push(payload, event)\n else:\n print(f'SafeGitHubEventHandler: ignoring push event for ref: {ref}'\n )\n return self.skip()\n\n def handle_pull_request(self, payload, event):\n pr = payload['pull_request']\n try:\n if any(label['name'] == 'skip_buildbots' for label in pr['labels']\n ):\n return self.skip()\n if any(r['login'] == 'halidebuildbots' for r in pr[\n 'requested_reviewers']):\n if payload['action'] == 'review_requested':\n payload['action'] = 'synchronize'\n return super().handle_pull_request(payload, event)\n trusted_repos = 'halide/Halide', 'CodeLinaro/Halide'\n if pr['head']['repo']['full_name'] not in trusted_repos:\n return self.skip()\n return super().handle_pull_request(payload, event)\n except KeyError as e:\n print(f'SafeGitHubEventHandler: malformed payload: {payload}')\n print(f'SafeGitHubEventHandler: missing key \"{e}\"')\n return self.skip()\n\n @staticmethod\n def skip():\n return [], 'git'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Purpose(Enum):\n halide_nightly = 1\n halide_testbranch = 2\n llvm_nightly = 3\n\n\nclass BuildSystem(Enum):\n make = 0\n cmake = 1\n\n\nclass BuilderType:\n \"\"\"A class to encapsulate the settings for a specific Builder.\n (Do not confuse with CMake's 'BUILD_TYPE', which is something else.)\n\n It includes:\n - Halide 'target' in the form of arch-bits-os\n - LLVM branch to be used\n - CMake vs Make\n - halide-nightly vs halide-testbranch vs llvm-nightly\n - sanitizers vs none\n\n It doesn't currently include any 'features' because we don't currently\n bake any in at build time.\n\n It doesn't currently include the C++ compiler used (eg gcc7 vs gcc8 vs clang),\n mainly because we currently never test with multiple compilers for a given\n setup. (If we ever need to do so, compiler should be added to this.)\n \"\"\"\n\n def __init__(self, arch, bits, os, halide_branch, llvm_branch, purpose,\n sanitizer=None, buildsystem=BuildSystem.cmake):\n assert arch in ['arm', 'x86']\n assert bits in [32, 64]\n assert os in ['linux', 'windows', 'osx']\n assert llvm_branch in LLVM_BRANCHES, f'{llvm_branch} not recognized'\n self.arch = arch\n self.bits = bits\n self.os = os\n self.halide_branch = halide_branch\n self.llvm_branch = llvm_branch\n self.buildsystem = buildsystem\n self.purpose = purpose\n self.sanitizer = sanitizer\n if self.halide_branch:\n assert self.purpose != Purpose.llvm_nightly\n assert self.halide_branch in HALIDE_BRANCHES, f'unknown branch {self.halide_branch}'\n assert self.purpose == Purpose.halide_testbranch or self.llvm_branch in LLVM_FOR_HALIDE[\n self.halide_branch]\n else:\n assert self.purpose == Purpose.llvm_nightly\n if self.sanitizer:\n assert self.sanitizer in _SANITIZERS\n\n def handles_python(self):\n if self.bits == 32:\n return False\n if self.arch == 'arm' and self.os == 'linux':\n return False\n if self.sanitizer_preset() is not None:\n return False\n return True\n\n def handles_sanitizers(self):\n if self.buildsystem != BuildSystem.cmake:\n return False\n return (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux' and self.llvm_branch == LLVM_MAIN)\n\n def sanitizer_preset(self):\n if self.handles_sanitizers():\n if self.sanitizer == 'asan':\n return 'linux-x64-asan'\n if self.sanitizer == 'fuzzer':\n return 'linux-x64-fuzzer'\n return None\n\n def handles_riscv(self):\n return self.llvm_branch not in [LLVM_RELEASE_15]\n\n def handles_hexagon(self):\n return (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux' and self.llvm_branch == LLVM_MAIN)\n\n def handles_wasm(self):\n is_linux_x64 = (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux')\n return self.llvm_branch == LLVM_MAIN and (is_linux_x64 or self.os ==\n 'osx')\n\n def handles_wasm_wabt(self):\n return self.handles_wasm()\n\n def handles_wasm_v8(self):\n return self.handles_wasm() and self.os == 'linux'\n\n def has_nvidia(self):\n return self.arch == 'x86' and self.bits == 64 and self.os in ['windows'\n , 'linux']\n\n def handles_vulkan(self):\n return False\n\n def handles_webgpu(self):\n return self.os == 'osx' and self.halide_branch not in [\n HALIDE_RELEASE_15]\n\n def has_tflite(self):\n if self.arch == 'x86' and self.bits == 64 and self.os == 'linux':\n return True\n if self.arch == 'arm' and self.bits == 64 and self.os == 'osx':\n return True\n return False\n\n def has_ccache(self):\n return self.os in ['osx', 'linux']\n\n def halide_target(self):\n return '%s-%d-%s' % (self.arch, self.bits, self.os)\n\n def llvm_builder_label(self):\n return 'llvm-%s-%s' % (LLVM_BRANCHES[self.llvm_branch].version.\n major, self.halide_target())\n\n def halide_builder_label(self):\n a = ['halide']\n if self.sanitizer:\n a.append(self.sanitizer)\n if self.purpose == Purpose.halide_testbranch:\n a.append('testbranch')\n elif self.purpose == Purpose.halide_nightly:\n a.append('nightly')\n a.append(self.halide_branch)\n if self.halide_branch == HALIDE_MAIN:\n a.append(f'llvm{LLVM_BRANCHES[self.llvm_branch].version.major}')\n a.append(self.halide_target())\n a.append(self.buildsystem.name)\n return '-'.join(a)\n\n def builder_label(self):\n if self.purpose == Purpose.llvm_nightly:\n return self.llvm_builder_label()\n else:\n return self.halide_builder_label()\n\n def builder_tags(self):\n return self.builder_label().split('-')\n\n def get_worker_names(self):\n return [n for n, cfg in _WORKERS if self.arch == cfg.arch and self.\n bits in cfg.bits and self.os == cfg.os]\n\n def __str__(self):\n return self.halide_target()\n\n\ndef get_builddir_subpath(subpath):\n return Transform(lambda x: x.replace('\\\\', '/'), Interpolate(\n f'%(prop:builddir)s/{subpath}'))\n\n\ndef get_llvm_toolchains_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-toolchains', *subpaths))\n\n\ndef get_llvm_source_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-project', *subpaths))\n\n\ndef get_llvm_build_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-build', *subpaths))\n\n\ndef get_llvm_install_path(builder_type, *subpaths):\n llvm_workdir = builder_type.llvm_builder_label()\n return get_builddir_subpath(os.path.join('..', llvm_workdir,\n 'llvm-install', *subpaths))\n\n\ndef get_halide_source_path(*subpaths):\n return get_builddir_subpath(os.path.join('halide-source', *subpaths))\n\n\n<mask token>\n\n\ndef get_halide_install_path(builder_type, *subpaths):\n s = 'halide-install'\n if builder_type.sanitizer:\n s += '-' + builder_type.sanitizer\n return get_builddir_subpath(os.path.join(s, *subpaths))\n\n\n<mask token>\n\n\ndef get_msvc_config_steps(factory, builder_type):\n arch_for_bits = {(32): 'x64_x86', (64): 'x64'}\n vcvarsall = 'vcvarsall.bat %s && set' % arch_for_bits[builder_type.bits]\n vcvarsdir = (\n 'C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Auxiliary/Build'\n )\n\n def save_interesting_env_vars(rc, stdout, stderr):\n d = {}\n for line in stdout.split('\\n'):\n match = re.match('^([a-zA-Z0-9_-]+)=(.*)$', line.strip())\n if match:\n key = match.group(1).upper()\n value = match.group(2)\n if key in VCVARSALL_ENV_VARS:\n d[key] = value\n return {'env': d}\n factory.addStep(SetPropertyFromCommand(name='Run VcVarsAll',\n description='Run VcVarsAll', workdir=vcvarsdir, locks=[\n performance_lock.access('counting')], haltOnFailure=True, command=\n vcvarsall, extract_fn=save_interesting_env_vars))\n\n\ndef merge_renderable(_base, _extn):\n\n @renderer\n @defer.inlineCallbacks\n def render(props):\n base = yield props.render(_base)\n extn = yield props.render(_extn)\n base.update(extn)\n return base\n return render\n\n\ndef get_distrib_name(_version, target, ext):\n\n @renderer\n @defer.inlineCallbacks\n def render(props):\n rev = props.getProperty('got_revision')['halide']\n version = yield props.render(_version)\n return os.path.join(ARTIFACTS_DIR,\n f'Halide-{version}-{target}-{rev}.{ext}')\n return render\n\n\ndef get_cmake_generator(builder_type):\n return 'Ninja'\n\n\n<mask token>\n\n\ndef get_ctest_options(builder_type, build_dir):\n assert builder_type.purpose != Purpose.llvm_nightly\n if builder_type.sanitizer:\n assert builder_type.handles_sanitizers()\n return {'build_config': builder_type.sanitizer_preset()}\n else:\n return {'build_config': 'Release'}\n\n\ndef get_halide_cmake_definitions(builder_type, halide_target='host',\n wasm_jit='wabt'):\n cmake_definitions = {'Clang_DIR': get_llvm_install_path(builder_type,\n 'lib/cmake/clang'), 'CMAKE_INSTALL_PREFIX': get_halide_install_path\n (builder_type), 'Halide_TARGET': halide_target, 'LLD_DIR':\n get_llvm_install_path(builder_type, 'lib/cmake/lld'), 'LLVM_DIR':\n get_llvm_install_path(builder_type, 'lib/cmake/llvm'), 'LLVM_ROOT':\n get_llvm_install_path(builder_type), 'WITH_PYTHON_BINDINGS': 'ON' if\n builder_type.handles_python() else 'OFF', 'WITH_TEST_FUZZ': 'ON' if\n builder_type.sanitizer == 'fuzzer' else 'OFF'}\n if builder_type.sanitizer and builder_type.handles_sanitizers():\n pass\n else:\n cmake_definitions['CMAKE_BUILD_TYPE'] = 'Release'\n if builder_type.has_ccache() and not builder_type.sanitizer_preset():\n cmake_definitions['Halide_CCACHE_BUILD'] = 'ON'\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path(\n 'cmake', 'toolchain.linux-arm32.cmake')\n if builder_type.os == 'windows':\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(\n '%(prop:VCPKG_ROOT)s/scripts/buildsystems/vcpkg.cmake')\n cmake_definitions['pybind11_DIR'] = Interpolate(\n '%(prop:VIRTUAL_ENV)s/share/cmake/pybind11')\n if 'wasm' in halide_target:\n cmake_definitions['WITH_PYTHON_BINDINGS'] = 'OFF'\n if builder_type.handles_wasm() and halide_target.startswith('wasm-'):\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(\n '%(prop:EMSDK)s/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake'\n )\n cmake_definitions['NODE_JS_EXECUTABLE'] = Property(\n 'HALIDE_NODE_JS_PATH')\n if wasm_jit == 'v8':\n cmake_definitions['WITH_WABT'] = 'OFF'\n cmake_definitions['WITH_V8'] = 'ON'\n cmake_definitions['V8_INCLUDE_PATH'\n ] = '/home/halidenightly/v8/v8/include'\n cmake_definitions['V8_LIB_PATH'] = (\n '/home/halidenightly/v8/v8/out/x64.release.static/obj/libv8_monolith.a'\n )\n elif wasm_jit == 'wabt':\n cmake_definitions['WITH_WABT'] = 'ON'\n cmake_definitions['WITH_V8'] = 'OFF'\n cmake_definitions['V8_INCLUDE_PATH'] = ''\n cmake_definitions['V8_LIB_PATH'] = ''\n else:\n assert False, 'Unknown wasm jit ' + str(wasm_jit)\n if builder_type.handles_webgpu() and 'webgpu' in halide_target:\n cmake_definitions['WEBGPU_NODE_BINDINGS'] = Property(\n 'HL_WEBGPU_NODE_BINDINGS')\n cmake_definitions['WEBGPU_NATIVE_LIB'] = Property(\n 'HL_WEBGPU_NATIVE_LIB')\n if builder_type.handles_hexagon() and 'hvx' in halide_target:\n cmake_definitions['Halide_BUILD_HEXAGON_REMOTE_RUNTIME'] = 'ON'\n return cmake_definitions\n\n\n<mask token>\n\n\ndef get_llvm_cmake_definitions(builder_type):\n definitions = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_INSTALL_PREFIX':\n get_llvm_install_path(builder_type), 'LLVM_BUILD_32_BITS': 'ON' if \n builder_type.bits == 32 else 'OFF', 'LLVM_ENABLE_ASSERTIONS': 'ON',\n 'LLVM_ENABLE_BINDINGS': 'OFF', 'LLVM_ENABLE_CURL': 'OFF',\n 'LLVM_ENABLE_DIA_SDK': 'OFF', 'LLVM_ENABLE_HTTPLIB': 'OFF',\n 'LLVM_ENABLE_IDE': 'OFF', 'LLVM_ENABLE_LIBXML2': 'OFF',\n 'LLVM_ENABLE_OCAMLDOC': 'OFF', 'LLVM_ENABLE_RTTI': 'ON',\n 'LLVM_ENABLE_TERMINFO': 'OFF', 'LLVM_ENABLE_WARNINGS': 'OFF',\n 'LLVM_ENABLE_ZLIB': 'ON', 'LLVM_ENABLE_ZSTD': 'OFF',\n 'LLVM_INCLUDE_BENCHMARKS': 'OFF', 'LLVM_INCLUDE_EXAMPLES': 'OFF',\n 'LLVM_INCLUDE_TESTS': 'OFF', 'LLVM_TARGETS_TO_BUILD':\n 'X86;ARM;NVPTX;AArch64;Hexagon;PowerPC;WebAssembly'}\n if builder_type.bits == 32:\n definitions['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = 'NEVER'\n if builder_type.handles_riscv():\n definitions['LLVM_TARGETS_TO_BUILD'] += ';RISCV'\n if builder_type.handles_sanitizers():\n definitions['LLVM_ENABLE_RUNTIMES'\n ] = 'compiler-rt;libcxx;libcxxabi;libunwind'\n definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld;clang-tools-extra'\n else:\n definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld'\n if builder_type.os != 'windows':\n definitions['CMAKE_CXX_FLAGS'] = '-Wno-psabi'\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n definitions['CMAKE_TOOLCHAIN_FILE'] = get_llvm_toolchains_path(\n 'toolchain.linux-arm32.cmake')\n definitions['LLVM_TARGET_ARCH'] = 'ARM'\n definitions['LLVM_DEFAULT_TARGET_TRIPLE'] = 'arm-linux-gnueabihf'\n if (builder_type.arch == 'x86' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n definitions['CMAKE_FIND_ROOT_PATH'] = '/usr/lib/i386-linux-gnu'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'\n if builder_type.os == 'osx':\n definitions['LLVM_ENABLE_SUPPORT_XCODE_SIGNPOSTS'] = 'FORCE_OFF'\n if builder_type.has_ccache():\n definitions['LLVM_CCACHE_BUILD'] = 'ON'\n return definitions\n\n\ndef extend_property(dict_name, **kwargs):\n\n @renderer\n def render(props):\n table = props.getProperty(dict_name, default={})\n table.update(kwargs)\n return table\n return render\n\n\ndef add_env_setup_step(factory, builder_type, enable_ccache=False):\n if builder_type.os == 'windows':\n get_msvc_config_steps(factory, builder_type)\n cxx = 'c++'\n cc = 'cc'\n ld = 'ld'\n if builder_type.os == 'linux':\n cc = 'gcc-9'\n cxx = 'g++-9'\n ld = 'ld'\n if builder_type.arch == 'x86' and builder_type.bits == 32:\n cxx += ' -m32'\n cc += ' -m32'\n ld += ' -melf_i386'\n elif builder_type.os == 'windows':\n cxx = 'cl.exe'\n cc = 'cl.exe'\n if enable_ccache and builder_type.has_ccache():\n cxx = 'ccache ' + cxx\n cc = 'ccache ' + cc\n env = {'CC': cc, 'CXX': cxx, 'LD': ld}\n factory.addStep(SetPropertiesFromEnv(name='Read worker environment',\n variables=['EMSDK', 'HALIDE_NODE_JS_PATH', 'HL_HEXAGON_TOOLS',\n 'HL_WEBGPU_NATIVE_LIB', 'HL_WEBGPU_NODE_BINDINGS',\n 'LD_LIBRARY_PATH', 'VIRTUAL_ENV', 'VCPKG_ROOT']))\n vcpkg_root = Property('VCPKG_ROOT', default=None)\n if builder_type.handles_hexagon():\n hexagon_remote_bin = get_halide_build_path('src', 'runtime',\n 'hexagon_remote')\n env['HL_HEXAGON_SIM_REMOTE'] = Transform(os.path.join,\n hexagon_remote_bin, 'hexagon', 'bin', 'hexagon_sim_remote')\n env['HL_HEXAGON_SIM_CYCLES'] = '1'\n env['LD_LIBRARY_PATH'] = [hexagon_remote_bin, Interpolate(\n '%(prop:HL_HEXAGON_TOOLS)s/lib/iss')]\n env['HEXAGON_SDK_ROOT'] = Interpolate(\n '%(prop:HL_HEXAGON_TOOLS)s/../../../..')\n if builder_type.has_nvidia() and builder_type.handles_vulkan():\n env['VK_INSTANCE_LAYERS'] = 'VK_LAYER_KHRONOS_validation'\n if builder_type.os == 'osx':\n env['METAL_DEVICE_WRAPPER_TYPE'] = '1'\n if builder_type.os == 'windows':\n vcpkg_root = Property('VCPKG_ROOT', default='C:/vcpkg')\n env['VCPKG_ROOT'] = vcpkg_root\n env['CUDA_CACHE_DISABLE'] = '1'\n env['HL_DISABLE_WINDOWS_ABORT_DIALOG'] = '1'\n factory.addStep(SetProperties(name='Initialize environment', properties\n =dict(env=extend_property('env', **env), VCPKG_ROOT=vcpkg_root)))\n\n\n<mask token>\n\n\ndef add_halide_cmake_build_steps(factory, builder_type):\n source_dir = get_halide_source_path()\n build_dir = get_halide_build_path()\n install_dir = get_halide_install_path(builder_type)\n factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[\n performance_lock.access('counting')], dir=build_dir, haltOnFailure=\n False))\n factory.addStep(MakeDirectory(name='Make Halide Build Dir', locks=[\n performance_lock.access('counting')], dir=build_dir, haltOnFailure=\n False))\n factory.addStep(RemoveDirectory(name='Remove Halide Install Dir', locks\n =[performance_lock.access('counting')], dir=install_dir,\n haltOnFailure=False))\n factory.addStep(MakeDirectory(name='Make Halide Install Dir', locks=[\n performance_lock.access('counting')], dir=install_dir,\n haltOnFailure=False))\n factory.addStep(CMake(name='Configure Halide', description=\n 'Configure Halide', locks=[performance_lock.access('counting')],\n haltOnFailure=True, workdir=build_dir, env=Property('env'), path=\n source_dir, generator=get_cmake_generator(builder_type),\n definitions=get_halide_cmake_definitions(builder_type), options=\n get_halide_cmake_options(builder_type, build_dir)))\n factory.addStep(ShellCommand(name='Build Halide', description=\n 'Build Halide', locks=[performance_lock.access('counting')],\n haltOnFailure=True, workdir=build_dir, env=Property('env'), command\n =get_cmake_build_command(builder_type, build_dir, targets=['all',\n 'install'])))\n\n\ndef add_halide_cmake_package_steps(factory, builder_type):\n source_dir = get_halide_source_path()\n target = builder_type.halide_target()\n ext = 'zip' if builder_type.os == 'windows' else 'tar.gz'\n factory.addStep(SetPropertiesFromCMakeCache(name=\n 'Get Halide package version', workdir=get_halide_build_path(),\n props=['CMAKE_PROJECT_VERSION']))\n extra_env = dict(Clang_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/clang'), LLD_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/lld'), LLVM_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/llvm'), Halide_VERSION=Property('CMAKE_PROJECT_VERSION'))\n if builder_type.os == 'windows':\n build_dir = get_halide_build_path('packaging_dir')\n if builder_type.arch == 'arm':\n arch = 'ARM' if builder_type.bits == 32 else 'ARM64'\n else:\n arch = 'Win32' if builder_type.bits == 32 else 'x64'\n cmd = [get_halide_source_path('packaging/zip/package.bat'),\n source_dir, build_dir, arch]\n else:\n build_dir = get_halide_build_path()\n cmd = [get_halide_source_path('packaging/tgz/package.sh'),\n source_dir, build_dir]\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n extra_env['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake',\n 'toolchain.linux-arm32.cmake')\n factory.addStep(ShellCommand(name='Package Halide', description=\n 'Package Halide', workdir=build_dir, env=extend_property('env', **\n extra_env), locks=[performance_lock.access('counting')],\n haltOnFailure=True, command=cmd))\n factory.addStep(FileUpload(name='Upload Halide package', workersrc=\n Interpolate(f'Halide-%(prop:CMAKE_PROJECT_VERSION)s-{target}.{ext}'\n ), locks=[performance_lock.access('counting')], haltOnFailure=True,\n workdir=build_dir, mode=420, masterdest=get_distrib_name(Property(\n 'CMAKE_PROJECT_VERSION'), target, ext)))\n\n def pkg_version_and_target(path: Path):\n match = re.match('^(.*)-[a-f0-9]+\\\\.(tar\\\\.gz|tgz|zip)', path.name)\n return match.group(1) if match else None\n factory.addStep(CleanOldFiles(name='Clean old releases', workdir=\n ARTIFACTS_DIR, locks=[performance_lock.access('counting')], groupfn\n =pkg_version_and_target))\n\n\n<mask token>\n\n\ndef get_test_labels(builder_type):\n targets = defaultdict(list)\n preset = builder_type.sanitizer_preset()\n if preset and 'fuzz' in preset:\n targets['host'].extend(['fuzz'])\n return targets\n targets['host'].extend(['internal', 'correctness', 'generator',\n 'autoschedulers_cpu', 'error', 'warning', 'apps', 'performance',\n 'tutorial'])\n if preset:\n return targets\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n targets['host'].remove('internal')\n targets['host'].remove('generator')\n if builder_type.handles_python():\n targets['host'].extend(['python'])\n if builder_type.arch == 'x86':\n t = 'x86-%d-%s' % (builder_type.bits, builder_type.os)\n targets[t].extend(['correctness'])\n if builder_type.bits == 64:\n targets['%s-sse41' % t].extend(['correctness'])\n for t, is_simulator in get_gpu_dsp_targets(builder_type):\n if t == 'host-webgpu':\n targets[t].extend(['correctness', 'generator'])\n else:\n targets[t].extend(['correctness', 'generator', 'apps'])\n if 'cuda' in t:\n targets[t].extend(['autoschedulers_cuda'])\n if 'hvx' not in t:\n targets[t].extend(['autoschedulers_gpu'])\n if not is_simulator:\n targets[t].extend(['performance'])\n if builder_type.has_nvidia():\n targets['host-cuda-opencl'].extend(['correctness_multi_gpu'])\n if builder_type.handles_vulkan():\n targets[\n 'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n if builder_type.handles_wasm():\n if builder_type.handles_wasm_wabt():\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/wabt'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n if builder_type.handles_wasm_v8():\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/v8'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int-wasm_threads'\n ].extend(['generator', 'apps'])\n if builder_type.handles_webgpu():\n targets['wasm-32-wasmrt-webgpu'].extend(['generator'])\n return targets\n\n\n<mask token>\n\n\ndef short_target(halide_target):\n s = halide_target.split('-')\n if len(s) == 1:\n return s[0]\n elif len(s) == 2:\n return '%s-%s' % (s[0], s[1])\n elif len(s) == 3:\n return '%s-%s-%s' % (s[0], s[1], s[2])\n elif len(s) > 3:\n return '%s-%s-%s…' % (s[0], s[1], s[2])\n else:\n return '<unknown>'\n\n\n<mask token>\n\n\ndef create_halide_cmake_factory(builder_type):\n factory = BuildFactory()\n add_env_setup_step(factory, builder_type)\n add_get_halide_source_steps(factory, builder_type)\n add_halide_cmake_build_steps(factory, builder_type)\n add_halide_cmake_test_steps(factory, builder_type)\n if builder_type.purpose == Purpose.halide_nightly:\n add_halide_cmake_package_steps(factory, builder_type)\n return factory\n\n\n<mask token>\n\n\ndef create_halide_builders():\n for arch, bits, os in get_interesting_halide_targets():\n for halide_branch in HALIDE_NIGHTLIES:\n for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:\n yield from create_halide_builder(arch, bits, os,\n halide_branch, llvm_branch, Purpose.halide_nightly)\n for halide_branch in _HALIDE_RELEASES:\n for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:\n yield from create_halide_builder(arch, bits, os,\n halide_branch, llvm_branch, Purpose.halide_testbranch)\n yield from create_halide_builder(arch, bits, os, HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch)\n yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('x86', 32, 'linux', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('x86', 64, 'osx', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('arm', 64, 'osx', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n for llvm_branch in LLVM_BRANCHES:\n if abs(LLVM_BRANCHES[llvm_branch].version.major - LLVM_BRANCHES[\n LLVM_MAIN].version.major) in [1, 2]:\n yield from create_halide_builder('x86', 64, 'linux',\n HALIDE_MAIN, llvm_branch, Purpose.halide_testbranch)\n\n\n<mask token>\n\n\ndef create_llvm_builders():\n for arch, bits, os in get_interesting_halide_targets():\n for llvm_branch in LLVM_BRANCHES:\n builder_type = BuilderType(arch, bits, os, None, llvm_branch,\n Purpose.llvm_nightly)\n for w in builder_type.get_worker_names():\n label = builder_type.llvm_builder_label()\n builder = BuilderConfig(name='%s/%s' % (label, w),\n workerbuilddir=label, workernames=[w], factory=\n create_llvm_cmake_factory(builder_type),\n collapseRequests=True, locks=[llvm_build_locks[\n llvm_branch + str(bits)].access('exclusive')], tags=\n builder_type.builder_tags())\n builder.builder_type = builder_type\n yield builder\n\n\n<mask token>\n\n\ndef create_schedulers():\n for llvm_branch in LLVM_BRANCHES:\n yield from create_llvm_scheduler(llvm_branch)\n for halide_branch in HALIDE_BRANCHES:\n yield from create_halide_scheduler(halide_branch)\n\n\n<mask token>\n\n\nclass SafeGitHubEventHandler(GitHubEventHandler):\n\n def handle_push(self, payload, event):\n ref = payload['ref']\n if re.match('^refs/(heads|tags)/(master|main|release/\\\\d+\\\\.x)$', ref):\n return super().handle_push(payload, event)\n else:\n print(f'SafeGitHubEventHandler: ignoring push event for ref: {ref}'\n )\n return self.skip()\n\n def handle_pull_request(self, payload, event):\n pr = payload['pull_request']\n try:\n if any(label['name'] == 'skip_buildbots' for label in pr['labels']\n ):\n return self.skip()\n if any(r['login'] == 'halidebuildbots' for r in pr[\n 'requested_reviewers']):\n if payload['action'] == 'review_requested':\n payload['action'] = 'synchronize'\n return super().handle_pull_request(payload, event)\n trusted_repos = 'halide/Halide', 'CodeLinaro/Halide'\n if pr['head']['repo']['full_name'] not in trusted_repos:\n return self.skip()\n return super().handle_pull_request(payload, event)\n except KeyError as e:\n print(f'SafeGitHubEventHandler: malformed payload: {payload}')\n print(f'SafeGitHubEventHandler: missing key \"{e}\"')\n return self.skip()\n\n @staticmethod\n def skip():\n return [], 'git'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Purpose(Enum):\n halide_nightly = 1\n halide_testbranch = 2\n llvm_nightly = 3\n\n\nclass BuildSystem(Enum):\n make = 0\n cmake = 1\n\n\nclass BuilderType:\n \"\"\"A class to encapsulate the settings for a specific Builder.\n (Do not confuse with CMake's 'BUILD_TYPE', which is something else.)\n\n It includes:\n - Halide 'target' in the form of arch-bits-os\n - LLVM branch to be used\n - CMake vs Make\n - halide-nightly vs halide-testbranch vs llvm-nightly\n - sanitizers vs none\n\n It doesn't currently include any 'features' because we don't currently\n bake any in at build time.\n\n It doesn't currently include the C++ compiler used (eg gcc7 vs gcc8 vs clang),\n mainly because we currently never test with multiple compilers for a given\n setup. (If we ever need to do so, compiler should be added to this.)\n \"\"\"\n\n def __init__(self, arch, bits, os, halide_branch, llvm_branch, purpose,\n sanitizer=None, buildsystem=BuildSystem.cmake):\n assert arch in ['arm', 'x86']\n assert bits in [32, 64]\n assert os in ['linux', 'windows', 'osx']\n assert llvm_branch in LLVM_BRANCHES, f'{llvm_branch} not recognized'\n self.arch = arch\n self.bits = bits\n self.os = os\n self.halide_branch = halide_branch\n self.llvm_branch = llvm_branch\n self.buildsystem = buildsystem\n self.purpose = purpose\n self.sanitizer = sanitizer\n if self.halide_branch:\n assert self.purpose != Purpose.llvm_nightly\n assert self.halide_branch in HALIDE_BRANCHES, f'unknown branch {self.halide_branch}'\n assert self.purpose == Purpose.halide_testbranch or self.llvm_branch in LLVM_FOR_HALIDE[\n self.halide_branch]\n else:\n assert self.purpose == Purpose.llvm_nightly\n if self.sanitizer:\n assert self.sanitizer in _SANITIZERS\n\n def handles_python(self):\n if self.bits == 32:\n return False\n if self.arch == 'arm' and self.os == 'linux':\n return False\n if self.sanitizer_preset() is not None:\n return False\n return True\n\n def handles_sanitizers(self):\n if self.buildsystem != BuildSystem.cmake:\n return False\n return (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux' and self.llvm_branch == LLVM_MAIN)\n\n def sanitizer_preset(self):\n if self.handles_sanitizers():\n if self.sanitizer == 'asan':\n return 'linux-x64-asan'\n if self.sanitizer == 'fuzzer':\n return 'linux-x64-fuzzer'\n return None\n\n def handles_riscv(self):\n return self.llvm_branch not in [LLVM_RELEASE_15]\n\n def handles_hexagon(self):\n return (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux' and self.llvm_branch == LLVM_MAIN)\n\n def handles_wasm(self):\n is_linux_x64 = (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux')\n return self.llvm_branch == LLVM_MAIN and (is_linux_x64 or self.os ==\n 'osx')\n\n def handles_wasm_wabt(self):\n return self.handles_wasm()\n\n def handles_wasm_v8(self):\n return self.handles_wasm() and self.os == 'linux'\n\n def has_nvidia(self):\n return self.arch == 'x86' and self.bits == 64 and self.os in ['windows'\n , 'linux']\n\n def handles_vulkan(self):\n return False\n\n def handles_webgpu(self):\n return self.os == 'osx' and self.halide_branch not in [\n HALIDE_RELEASE_15]\n\n def has_tflite(self):\n if self.arch == 'x86' and self.bits == 64 and self.os == 'linux':\n return True\n if self.arch == 'arm' and self.bits == 64 and self.os == 'osx':\n return True\n return False\n\n def has_ccache(self):\n return self.os in ['osx', 'linux']\n\n def halide_target(self):\n return '%s-%d-%s' % (self.arch, self.bits, self.os)\n\n def llvm_builder_label(self):\n return 'llvm-%s-%s' % (LLVM_BRANCHES[self.llvm_branch].version.\n major, self.halide_target())\n\n def halide_builder_label(self):\n a = ['halide']\n if self.sanitizer:\n a.append(self.sanitizer)\n if self.purpose == Purpose.halide_testbranch:\n a.append('testbranch')\n elif self.purpose == Purpose.halide_nightly:\n a.append('nightly')\n a.append(self.halide_branch)\n if self.halide_branch == HALIDE_MAIN:\n a.append(f'llvm{LLVM_BRANCHES[self.llvm_branch].version.major}')\n a.append(self.halide_target())\n a.append(self.buildsystem.name)\n return '-'.join(a)\n\n def builder_label(self):\n if self.purpose == Purpose.llvm_nightly:\n return self.llvm_builder_label()\n else:\n return self.halide_builder_label()\n\n def builder_tags(self):\n return self.builder_label().split('-')\n\n def get_worker_names(self):\n return [n for n, cfg in _WORKERS if self.arch == cfg.arch and self.\n bits in cfg.bits and self.os == cfg.os]\n\n def __str__(self):\n return self.halide_target()\n\n\ndef get_builddir_subpath(subpath):\n return Transform(lambda x: x.replace('\\\\', '/'), Interpolate(\n f'%(prop:builddir)s/{subpath}'))\n\n\ndef get_llvm_toolchains_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-toolchains', *subpaths))\n\n\ndef get_llvm_source_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-project', *subpaths))\n\n\ndef get_llvm_build_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-build', *subpaths))\n\n\ndef get_llvm_install_path(builder_type, *subpaths):\n llvm_workdir = builder_type.llvm_builder_label()\n return get_builddir_subpath(os.path.join('..', llvm_workdir,\n 'llvm-install', *subpaths))\n\n\ndef get_halide_source_path(*subpaths):\n return get_builddir_subpath(os.path.join('halide-source', *subpaths))\n\n\n<mask token>\n\n\ndef get_halide_install_path(builder_type, *subpaths):\n s = 'halide-install'\n if builder_type.sanitizer:\n s += '-' + builder_type.sanitizer\n return get_builddir_subpath(os.path.join(s, *subpaths))\n\n\n<mask token>\n\n\ndef get_msvc_config_steps(factory, builder_type):\n arch_for_bits = {(32): 'x64_x86', (64): 'x64'}\n vcvarsall = 'vcvarsall.bat %s && set' % arch_for_bits[builder_type.bits]\n vcvarsdir = (\n 'C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Auxiliary/Build'\n )\n\n def save_interesting_env_vars(rc, stdout, stderr):\n d = {}\n for line in stdout.split('\\n'):\n match = re.match('^([a-zA-Z0-9_-]+)=(.*)$', line.strip())\n if match:\n key = match.group(1).upper()\n value = match.group(2)\n if key in VCVARSALL_ENV_VARS:\n d[key] = value\n return {'env': d}\n factory.addStep(SetPropertyFromCommand(name='Run VcVarsAll',\n description='Run VcVarsAll', workdir=vcvarsdir, locks=[\n performance_lock.access('counting')], haltOnFailure=True, command=\n vcvarsall, extract_fn=save_interesting_env_vars))\n\n\ndef merge_renderable(_base, _extn):\n\n @renderer\n @defer.inlineCallbacks\n def render(props):\n base = yield props.render(_base)\n extn = yield props.render(_extn)\n base.update(extn)\n return base\n return render\n\n\ndef get_distrib_name(_version, target, ext):\n\n @renderer\n @defer.inlineCallbacks\n def render(props):\n rev = props.getProperty('got_revision')['halide']\n version = yield props.render(_version)\n return os.path.join(ARTIFACTS_DIR,\n f'Halide-{version}-{target}-{rev}.{ext}')\n return render\n\n\ndef get_cmake_generator(builder_type):\n return 'Ninja'\n\n\n<mask token>\n\n\ndef get_ctest_options(builder_type, build_dir):\n assert builder_type.purpose != Purpose.llvm_nightly\n if builder_type.sanitizer:\n assert builder_type.handles_sanitizers()\n return {'build_config': builder_type.sanitizer_preset()}\n else:\n return {'build_config': 'Release'}\n\n\ndef get_halide_cmake_definitions(builder_type, halide_target='host',\n wasm_jit='wabt'):\n cmake_definitions = {'Clang_DIR': get_llvm_install_path(builder_type,\n 'lib/cmake/clang'), 'CMAKE_INSTALL_PREFIX': get_halide_install_path\n (builder_type), 'Halide_TARGET': halide_target, 'LLD_DIR':\n get_llvm_install_path(builder_type, 'lib/cmake/lld'), 'LLVM_DIR':\n get_llvm_install_path(builder_type, 'lib/cmake/llvm'), 'LLVM_ROOT':\n get_llvm_install_path(builder_type), 'WITH_PYTHON_BINDINGS': 'ON' if\n builder_type.handles_python() else 'OFF', 'WITH_TEST_FUZZ': 'ON' if\n builder_type.sanitizer == 'fuzzer' else 'OFF'}\n if builder_type.sanitizer and builder_type.handles_sanitizers():\n pass\n else:\n cmake_definitions['CMAKE_BUILD_TYPE'] = 'Release'\n if builder_type.has_ccache() and not builder_type.sanitizer_preset():\n cmake_definitions['Halide_CCACHE_BUILD'] = 'ON'\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path(\n 'cmake', 'toolchain.linux-arm32.cmake')\n if builder_type.os == 'windows':\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(\n '%(prop:VCPKG_ROOT)s/scripts/buildsystems/vcpkg.cmake')\n cmake_definitions['pybind11_DIR'] = Interpolate(\n '%(prop:VIRTUAL_ENV)s/share/cmake/pybind11')\n if 'wasm' in halide_target:\n cmake_definitions['WITH_PYTHON_BINDINGS'] = 'OFF'\n if builder_type.handles_wasm() and halide_target.startswith('wasm-'):\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(\n '%(prop:EMSDK)s/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake'\n )\n cmake_definitions['NODE_JS_EXECUTABLE'] = Property(\n 'HALIDE_NODE_JS_PATH')\n if wasm_jit == 'v8':\n cmake_definitions['WITH_WABT'] = 'OFF'\n cmake_definitions['WITH_V8'] = 'ON'\n cmake_definitions['V8_INCLUDE_PATH'\n ] = '/home/halidenightly/v8/v8/include'\n cmake_definitions['V8_LIB_PATH'] = (\n '/home/halidenightly/v8/v8/out/x64.release.static/obj/libv8_monolith.a'\n )\n elif wasm_jit == 'wabt':\n cmake_definitions['WITH_WABT'] = 'ON'\n cmake_definitions['WITH_V8'] = 'OFF'\n cmake_definitions['V8_INCLUDE_PATH'] = ''\n cmake_definitions['V8_LIB_PATH'] = ''\n else:\n assert False, 'Unknown wasm jit ' + str(wasm_jit)\n if builder_type.handles_webgpu() and 'webgpu' in halide_target:\n cmake_definitions['WEBGPU_NODE_BINDINGS'] = Property(\n 'HL_WEBGPU_NODE_BINDINGS')\n cmake_definitions['WEBGPU_NATIVE_LIB'] = Property(\n 'HL_WEBGPU_NATIVE_LIB')\n if builder_type.handles_hexagon() and 'hvx' in halide_target:\n cmake_definitions['Halide_BUILD_HEXAGON_REMOTE_RUNTIME'] = 'ON'\n return cmake_definitions\n\n\n<mask token>\n\n\ndef get_llvm_cmake_definitions(builder_type):\n definitions = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_INSTALL_PREFIX':\n get_llvm_install_path(builder_type), 'LLVM_BUILD_32_BITS': 'ON' if \n builder_type.bits == 32 else 'OFF', 'LLVM_ENABLE_ASSERTIONS': 'ON',\n 'LLVM_ENABLE_BINDINGS': 'OFF', 'LLVM_ENABLE_CURL': 'OFF',\n 'LLVM_ENABLE_DIA_SDK': 'OFF', 'LLVM_ENABLE_HTTPLIB': 'OFF',\n 'LLVM_ENABLE_IDE': 'OFF', 'LLVM_ENABLE_LIBXML2': 'OFF',\n 'LLVM_ENABLE_OCAMLDOC': 'OFF', 'LLVM_ENABLE_RTTI': 'ON',\n 'LLVM_ENABLE_TERMINFO': 'OFF', 'LLVM_ENABLE_WARNINGS': 'OFF',\n 'LLVM_ENABLE_ZLIB': 'ON', 'LLVM_ENABLE_ZSTD': 'OFF',\n 'LLVM_INCLUDE_BENCHMARKS': 'OFF', 'LLVM_INCLUDE_EXAMPLES': 'OFF',\n 'LLVM_INCLUDE_TESTS': 'OFF', 'LLVM_TARGETS_TO_BUILD':\n 'X86;ARM;NVPTX;AArch64;Hexagon;PowerPC;WebAssembly'}\n if builder_type.bits == 32:\n definitions['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = 'NEVER'\n if builder_type.handles_riscv():\n definitions['LLVM_TARGETS_TO_BUILD'] += ';RISCV'\n if builder_type.handles_sanitizers():\n definitions['LLVM_ENABLE_RUNTIMES'\n ] = 'compiler-rt;libcxx;libcxxabi;libunwind'\n definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld;clang-tools-extra'\n else:\n definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld'\n if builder_type.os != 'windows':\n definitions['CMAKE_CXX_FLAGS'] = '-Wno-psabi'\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n definitions['CMAKE_TOOLCHAIN_FILE'] = get_llvm_toolchains_path(\n 'toolchain.linux-arm32.cmake')\n definitions['LLVM_TARGET_ARCH'] = 'ARM'\n definitions['LLVM_DEFAULT_TARGET_TRIPLE'] = 'arm-linux-gnueabihf'\n if (builder_type.arch == 'x86' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n definitions['CMAKE_FIND_ROOT_PATH'] = '/usr/lib/i386-linux-gnu'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'\n if builder_type.os == 'osx':\n definitions['LLVM_ENABLE_SUPPORT_XCODE_SIGNPOSTS'] = 'FORCE_OFF'\n if builder_type.has_ccache():\n definitions['LLVM_CCACHE_BUILD'] = 'ON'\n return definitions\n\n\ndef extend_property(dict_name, **kwargs):\n\n @renderer\n def render(props):\n table = props.getProperty(dict_name, default={})\n table.update(kwargs)\n return table\n return render\n\n\ndef add_env_setup_step(factory, builder_type, enable_ccache=False):\n if builder_type.os == 'windows':\n get_msvc_config_steps(factory, builder_type)\n cxx = 'c++'\n cc = 'cc'\n ld = 'ld'\n if builder_type.os == 'linux':\n cc = 'gcc-9'\n cxx = 'g++-9'\n ld = 'ld'\n if builder_type.arch == 'x86' and builder_type.bits == 32:\n cxx += ' -m32'\n cc += ' -m32'\n ld += ' -melf_i386'\n elif builder_type.os == 'windows':\n cxx = 'cl.exe'\n cc = 'cl.exe'\n if enable_ccache and builder_type.has_ccache():\n cxx = 'ccache ' + cxx\n cc = 'ccache ' + cc\n env = {'CC': cc, 'CXX': cxx, 'LD': ld}\n factory.addStep(SetPropertiesFromEnv(name='Read worker environment',\n variables=['EMSDK', 'HALIDE_NODE_JS_PATH', 'HL_HEXAGON_TOOLS',\n 'HL_WEBGPU_NATIVE_LIB', 'HL_WEBGPU_NODE_BINDINGS',\n 'LD_LIBRARY_PATH', 'VIRTUAL_ENV', 'VCPKG_ROOT']))\n vcpkg_root = Property('VCPKG_ROOT', default=None)\n if builder_type.handles_hexagon():\n hexagon_remote_bin = get_halide_build_path('src', 'runtime',\n 'hexagon_remote')\n env['HL_HEXAGON_SIM_REMOTE'] = Transform(os.path.join,\n hexagon_remote_bin, 'hexagon', 'bin', 'hexagon_sim_remote')\n env['HL_HEXAGON_SIM_CYCLES'] = '1'\n env['LD_LIBRARY_PATH'] = [hexagon_remote_bin, Interpolate(\n '%(prop:HL_HEXAGON_TOOLS)s/lib/iss')]\n env['HEXAGON_SDK_ROOT'] = Interpolate(\n '%(prop:HL_HEXAGON_TOOLS)s/../../../..')\n if builder_type.has_nvidia() and builder_type.handles_vulkan():\n env['VK_INSTANCE_LAYERS'] = 'VK_LAYER_KHRONOS_validation'\n if builder_type.os == 'osx':\n env['METAL_DEVICE_WRAPPER_TYPE'] = '1'\n if builder_type.os == 'windows':\n vcpkg_root = Property('VCPKG_ROOT', default='C:/vcpkg')\n env['VCPKG_ROOT'] = vcpkg_root\n env['CUDA_CACHE_DISABLE'] = '1'\n env['HL_DISABLE_WINDOWS_ABORT_DIALOG'] = '1'\n factory.addStep(SetProperties(name='Initialize environment', properties\n =dict(env=extend_property('env', **env), VCPKG_ROOT=vcpkg_root)))\n\n\n@renderer\ndef get_llvm_latest_commit(props):\n build_dir = props.getProperty('builddir')\n assert not isinstance(build_dir, dict)\n build_dir = build_dir.replace('\\\\', '/')\n return (\n 'cd %s/llvm-project && git log -1 > %s/llvm-install/llvm_latest_commit.txt'\n % (build_dir, build_dir))\n\n\n<mask token>\n\n\ndef add_halide_cmake_build_steps(factory, builder_type):\n source_dir = get_halide_source_path()\n build_dir = get_halide_build_path()\n install_dir = get_halide_install_path(builder_type)\n factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[\n performance_lock.access('counting')], dir=build_dir, haltOnFailure=\n False))\n factory.addStep(MakeDirectory(name='Make Halide Build Dir', locks=[\n performance_lock.access('counting')], dir=build_dir, haltOnFailure=\n False))\n factory.addStep(RemoveDirectory(name='Remove Halide Install Dir', locks\n =[performance_lock.access('counting')], dir=install_dir,\n haltOnFailure=False))\n factory.addStep(MakeDirectory(name='Make Halide Install Dir', locks=[\n performance_lock.access('counting')], dir=install_dir,\n haltOnFailure=False))\n factory.addStep(CMake(name='Configure Halide', description=\n 'Configure Halide', locks=[performance_lock.access('counting')],\n haltOnFailure=True, workdir=build_dir, env=Property('env'), path=\n source_dir, generator=get_cmake_generator(builder_type),\n definitions=get_halide_cmake_definitions(builder_type), options=\n get_halide_cmake_options(builder_type, build_dir)))\n factory.addStep(ShellCommand(name='Build Halide', description=\n 'Build Halide', locks=[performance_lock.access('counting')],\n haltOnFailure=True, workdir=build_dir, env=Property('env'), command\n =get_cmake_build_command(builder_type, build_dir, targets=['all',\n 'install'])))\n\n\ndef add_halide_cmake_package_steps(factory, builder_type):\n source_dir = get_halide_source_path()\n target = builder_type.halide_target()\n ext = 'zip' if builder_type.os == 'windows' else 'tar.gz'\n factory.addStep(SetPropertiesFromCMakeCache(name=\n 'Get Halide package version', workdir=get_halide_build_path(),\n props=['CMAKE_PROJECT_VERSION']))\n extra_env = dict(Clang_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/clang'), LLD_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/lld'), LLVM_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/llvm'), Halide_VERSION=Property('CMAKE_PROJECT_VERSION'))\n if builder_type.os == 'windows':\n build_dir = get_halide_build_path('packaging_dir')\n if builder_type.arch == 'arm':\n arch = 'ARM' if builder_type.bits == 32 else 'ARM64'\n else:\n arch = 'Win32' if builder_type.bits == 32 else 'x64'\n cmd = [get_halide_source_path('packaging/zip/package.bat'),\n source_dir, build_dir, arch]\n else:\n build_dir = get_halide_build_path()\n cmd = [get_halide_source_path('packaging/tgz/package.sh'),\n source_dir, build_dir]\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n extra_env['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake',\n 'toolchain.linux-arm32.cmake')\n factory.addStep(ShellCommand(name='Package Halide', description=\n 'Package Halide', workdir=build_dir, env=extend_property('env', **\n extra_env), locks=[performance_lock.access('counting')],\n haltOnFailure=True, command=cmd))\n factory.addStep(FileUpload(name='Upload Halide package', workersrc=\n Interpolate(f'Halide-%(prop:CMAKE_PROJECT_VERSION)s-{target}.{ext}'\n ), locks=[performance_lock.access('counting')], haltOnFailure=True,\n workdir=build_dir, mode=420, masterdest=get_distrib_name(Property(\n 'CMAKE_PROJECT_VERSION'), target, ext)))\n\n def pkg_version_and_target(path: Path):\n match = re.match('^(.*)-[a-f0-9]+\\\\.(tar\\\\.gz|tgz|zip)', path.name)\n return match.group(1) if match else None\n factory.addStep(CleanOldFiles(name='Clean old releases', workdir=\n ARTIFACTS_DIR, locks=[performance_lock.access('counting')], groupfn\n =pkg_version_and_target))\n\n\ndef get_gpu_dsp_targets(builder_type):\n if builder_type.sanitizer_preset() is not None:\n return\n if builder_type.has_nvidia():\n yield 'host-cuda', False\n yield 'host-opencl', False\n if builder_type.handles_vulkan():\n yield 'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13', False\n if builder_type.handles_webgpu():\n yield 'host-webgpu', False\n if builder_type.os == 'osx':\n yield 'host-metal', False\n if builder_type.handles_hexagon():\n yield 'host-hvx', True\n\n\ndef get_test_labels(builder_type):\n targets = defaultdict(list)\n preset = builder_type.sanitizer_preset()\n if preset and 'fuzz' in preset:\n targets['host'].extend(['fuzz'])\n return targets\n targets['host'].extend(['internal', 'correctness', 'generator',\n 'autoschedulers_cpu', 'error', 'warning', 'apps', 'performance',\n 'tutorial'])\n if preset:\n return targets\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n targets['host'].remove('internal')\n targets['host'].remove('generator')\n if builder_type.handles_python():\n targets['host'].extend(['python'])\n if builder_type.arch == 'x86':\n t = 'x86-%d-%s' % (builder_type.bits, builder_type.os)\n targets[t].extend(['correctness'])\n if builder_type.bits == 64:\n targets['%s-sse41' % t].extend(['correctness'])\n for t, is_simulator in get_gpu_dsp_targets(builder_type):\n if t == 'host-webgpu':\n targets[t].extend(['correctness', 'generator'])\n else:\n targets[t].extend(['correctness', 'generator', 'apps'])\n if 'cuda' in t:\n targets[t].extend(['autoschedulers_cuda'])\n if 'hvx' not in t:\n targets[t].extend(['autoschedulers_gpu'])\n if not is_simulator:\n targets[t].extend(['performance'])\n if builder_type.has_nvidia():\n targets['host-cuda-opencl'].extend(['correctness_multi_gpu'])\n if builder_type.handles_vulkan():\n targets[\n 'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n if builder_type.handles_wasm():\n if builder_type.handles_wasm_wabt():\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/wabt'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n if builder_type.handles_wasm_v8():\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/v8'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int-wasm_threads'\n ].extend(['generator', 'apps'])\n if builder_type.handles_webgpu():\n targets['wasm-32-wasmrt-webgpu'].extend(['generator'])\n return targets\n\n\n<mask token>\n\n\ndef short_target(halide_target):\n s = halide_target.split('-')\n if len(s) == 1:\n return s[0]\n elif len(s) == 2:\n return '%s-%s' % (s[0], s[1])\n elif len(s) == 3:\n return '%s-%s-%s' % (s[0], s[1], s[2])\n elif len(s) > 3:\n return '%s-%s-%s…' % (s[0], s[1], s[2])\n else:\n return '<unknown>'\n\n\n<mask token>\n\n\ndef create_halide_make_factory(builder_type):\n assert builder_type.os != 'windows'\n make_threads = Property('WORKER_BUILD_PARALLELISM')\n build_dir = get_halide_build_path()\n factory = BuildFactory()\n add_env_setup_step(factory, builder_type, enable_ccache=True)\n add_get_halide_source_steps(factory, builder_type)\n factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[\n performance_lock.access('counting')], dir=build_dir))\n target_label_pairs = [('host', 'build_tests')]\n for halide_target, labels_for_target in get_test_labels(builder_type\n ).items():\n if halide_target != 'host':\n continue\n _labels_to_skip = ['autoschedulers_cpu', 'autoschedulers_gpu',\n 'autoschedulers_cuda', 'performance', 'python']\n if builder_type.bits == 32:\n _labels_to_skip.extend(['autoschedulers_cpu',\n 'autoschedulers_gpu', 'autoschedulers_cuda'])\n for label in labels_for_target:\n if label in _labels_to_skip:\n continue\n target_label_pairs.append((halide_target, label))\n for halide_target, label in target_label_pairs:\n env = extend_property('env', LLVM_CONFIG=get_llvm_install_path(\n builder_type, 'bin/llvm-config'), HL_TARGET=halide_target,\n HL_JIT_TARGET=halide_target)\n if is_time_critical_test(label):\n p = 1\n lock_mode = 'exclusive'\n else:\n p = make_threads\n lock_mode = 'counting'\n if label != 'build_tests':\n label = 'test_%s' % label\n factory.addStep(ShellCommand(name='make ' + label, description=\n label + ' ' + halide_target, locks=[performance_lock.access(\n lock_mode)], workdir=build_dir, env=env, haltOnFailure=False,\n command=['make', '-f', get_halide_source_path('Makefile'), '-j',\n p, label], timeout=3600))\n return factory\n\n\ndef create_halide_cmake_factory(builder_type):\n factory = BuildFactory()\n add_env_setup_step(factory, builder_type)\n add_get_halide_source_steps(factory, builder_type)\n add_halide_cmake_build_steps(factory, builder_type)\n add_halide_cmake_test_steps(factory, builder_type)\n if builder_type.purpose == Purpose.halide_nightly:\n add_halide_cmake_package_steps(factory, builder_type)\n return factory\n\n\n<mask token>\n\n\ndef create_halide_builder(arch, bits, os, halide_branch, llvm_branch,\n purpose, buildsystem=BuildSystem.cmake):\n sanitizers = [None]\n if purpose != Purpose.halide_nightly:\n sanitizers.extend(_SANITIZERS)\n for san in sanitizers:\n builder_type = BuilderType(arch, bits, os, halide_branch,\n llvm_branch, purpose, san, buildsystem)\n if san and purpose == Purpose.llvm_nightly:\n continue\n if san and not builder_type.handles_sanitizers():\n continue\n workers = builder_type.get_worker_names()\n builder = BuilderConfig(name=builder_type.builder_label(),\n workernames=workers, factory=create_halide_factory(builder_type\n ), collapseRequests=True, locks=[llvm_build_locks[llvm_branch +\n str(bits)].access('counting')], tags=builder_type.builder_tags())\n builder.builder_type = builder_type\n yield builder\n\n\ndef create_halide_builders():\n for arch, bits, os in get_interesting_halide_targets():\n for halide_branch in HALIDE_NIGHTLIES:\n for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:\n yield from create_halide_builder(arch, bits, os,\n halide_branch, llvm_branch, Purpose.halide_nightly)\n for halide_branch in _HALIDE_RELEASES:\n for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:\n yield from create_halide_builder(arch, bits, os,\n halide_branch, llvm_branch, Purpose.halide_testbranch)\n yield from create_halide_builder(arch, bits, os, HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch)\n yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('x86', 32, 'linux', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('x86', 64, 'osx', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('arm', 64, 'osx', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n for llvm_branch in LLVM_BRANCHES:\n if abs(LLVM_BRANCHES[llvm_branch].version.major - LLVM_BRANCHES[\n LLVM_MAIN].version.major) in [1, 2]:\n yield from create_halide_builder('x86', 64, 'linux',\n HALIDE_MAIN, llvm_branch, Purpose.halide_testbranch)\n\n\ndef create_halide_scheduler(halide_branch):\n\n def is_halide_base_branch(br):\n return any(br == hl.ref for hl in HALIDE_BRANCHES.values())\n\n def is_halide_pr_branch(br):\n return not is_halide_base_branch(br)\n\n def github_base_branch_matches(change):\n ref = change.properties.getProperty('basename')\n return ref == HALIDE_BRANCHES[halide_branch].ref\n builders = [b for b in c['builders'] if b.builder_type.halide_branch ==\n halide_branch and b.builder_type.purpose == Purpose.halide_nightly]\n if builders:\n builder_names = [str(b.name) for b in builders]\n yield Nightly(name='halide-package-' + halide_branch, codebases=[\n 'halide'], builderNames=builder_names, change_filter=\n ChangeFilter(codebase='halide'), hour=4, minute=0)\n yield ForceScheduler(name='force-halide-nightly-' + halide_branch,\n builderNames=builder_names, codebases=['halide'])\n builders = [b for b in c['builders'] if b.builder_type.halide_branch ==\n halide_branch and b.builder_type.purpose == Purpose.halide_testbranch]\n if builders:\n builder_names = [str(b.name) for b in builders]\n yield AnyBranchScheduler(name='halide-testbranch-' + halide_branch,\n codebases=['halide'], change_filter=ChangeFilter(category=\n 'pull', codebase='halide', branch_fn=is_halide_pr_branch,\n filter_fn=github_base_branch_matches), treeStableTimer=60 * 5,\n builderNames=builder_names)\n yield ForceScheduler(name='force-halide-testbranch-' +\n halide_branch, builderNames=builder_names, codebases=['halide'])\n\n\n<mask token>\n\n\ndef create_llvm_builders():\n for arch, bits, os in get_interesting_halide_targets():\n for llvm_branch in LLVM_BRANCHES:\n builder_type = BuilderType(arch, bits, os, None, llvm_branch,\n Purpose.llvm_nightly)\n for w in builder_type.get_worker_names():\n label = builder_type.llvm_builder_label()\n builder = BuilderConfig(name='%s/%s' % (label, w),\n workerbuilddir=label, workernames=[w], factory=\n create_llvm_cmake_factory(builder_type),\n collapseRequests=True, locks=[llvm_build_locks[\n llvm_branch + str(bits)].access('exclusive')], tags=\n builder_type.builder_tags())\n builder.builder_type = builder_type\n yield builder\n\n\n<mask token>\n\n\ndef create_builders():\n yield from create_llvm_builders()\n yield from create_halide_builders()\n\n\ndef create_schedulers():\n for llvm_branch in LLVM_BRANCHES:\n yield from create_llvm_scheduler(llvm_branch)\n for halide_branch in HALIDE_BRANCHES:\n yield from create_halide_scheduler(halide_branch)\n\n\n<mask token>\n\n\ndef prioritize_builders(buildmaster, builders):\n\n def importance(builder):\n builder_type = builder.config.builder_type\n assert builder_type\n if builder_type.purpose == Purpose.llvm_nightly:\n return 0\n if builder_type.purpose == Purpose.halide_testbranch:\n return 1\n if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_15]:\n return 2\n if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_16]:\n return 2\n if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_MAIN]:\n return 3\n return 4\n return list(sorted(builders, key=importance))\n\n\n<mask token>\n\n\nclass SafeGitHubEventHandler(GitHubEventHandler):\n\n def handle_push(self, payload, event):\n ref = payload['ref']\n if re.match('^refs/(heads|tags)/(master|main|release/\\\\d+\\\\.x)$', ref):\n return super().handle_push(payload, event)\n else:\n print(f'SafeGitHubEventHandler: ignoring push event for ref: {ref}'\n )\n return self.skip()\n\n def handle_pull_request(self, payload, event):\n pr = payload['pull_request']\n try:\n if any(label['name'] == 'skip_buildbots' for label in pr['labels']\n ):\n return self.skip()\n if any(r['login'] == 'halidebuildbots' for r in pr[\n 'requested_reviewers']):\n if payload['action'] == 'review_requested':\n payload['action'] = 'synchronize'\n return super().handle_pull_request(payload, event)\n trusted_repos = 'halide/Halide', 'CodeLinaro/Halide'\n if pr['head']['repo']['full_name'] not in trusted_repos:\n return self.skip()\n return super().handle_pull_request(payload, event)\n except KeyError as e:\n print(f'SafeGitHubEventHandler: malformed payload: {payload}')\n print(f'SafeGitHubEventHandler: missing key \"{e}\"')\n return self.skip()\n\n @staticmethod\n def skip():\n return [], 'git'\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef codebase_generator(chdict):\n repo = chdict['repository']\n assert repo in all_repositories, 'Codebase not found for chdict: ' + str(\n chdict)\n return all_repositories[repo]\n\n\n<mask token>\n\n\nclass Purpose(Enum):\n halide_nightly = 1\n halide_testbranch = 2\n llvm_nightly = 3\n\n\nclass BuildSystem(Enum):\n make = 0\n cmake = 1\n\n\nclass BuilderType:\n \"\"\"A class to encapsulate the settings for a specific Builder.\n (Do not confuse with CMake's 'BUILD_TYPE', which is something else.)\n\n It includes:\n - Halide 'target' in the form of arch-bits-os\n - LLVM branch to be used\n - CMake vs Make\n - halide-nightly vs halide-testbranch vs llvm-nightly\n - sanitizers vs none\n\n It doesn't currently include any 'features' because we don't currently\n bake any in at build time.\n\n It doesn't currently include the C++ compiler used (eg gcc7 vs gcc8 vs clang),\n mainly because we currently never test with multiple compilers for a given\n setup. (If we ever need to do so, compiler should be added to this.)\n \"\"\"\n\n def __init__(self, arch, bits, os, halide_branch, llvm_branch, purpose,\n sanitizer=None, buildsystem=BuildSystem.cmake):\n assert arch in ['arm', 'x86']\n assert bits in [32, 64]\n assert os in ['linux', 'windows', 'osx']\n assert llvm_branch in LLVM_BRANCHES, f'{llvm_branch} not recognized'\n self.arch = arch\n self.bits = bits\n self.os = os\n self.halide_branch = halide_branch\n self.llvm_branch = llvm_branch\n self.buildsystem = buildsystem\n self.purpose = purpose\n self.sanitizer = sanitizer\n if self.halide_branch:\n assert self.purpose != Purpose.llvm_nightly\n assert self.halide_branch in HALIDE_BRANCHES, f'unknown branch {self.halide_branch}'\n assert self.purpose == Purpose.halide_testbranch or self.llvm_branch in LLVM_FOR_HALIDE[\n self.halide_branch]\n else:\n assert self.purpose == Purpose.llvm_nightly\n if self.sanitizer:\n assert self.sanitizer in _SANITIZERS\n\n def handles_python(self):\n if self.bits == 32:\n return False\n if self.arch == 'arm' and self.os == 'linux':\n return False\n if self.sanitizer_preset() is not None:\n return False\n return True\n\n def handles_sanitizers(self):\n if self.buildsystem != BuildSystem.cmake:\n return False\n return (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux' and self.llvm_branch == LLVM_MAIN)\n\n def sanitizer_preset(self):\n if self.handles_sanitizers():\n if self.sanitizer == 'asan':\n return 'linux-x64-asan'\n if self.sanitizer == 'fuzzer':\n return 'linux-x64-fuzzer'\n return None\n\n def handles_riscv(self):\n return self.llvm_branch not in [LLVM_RELEASE_15]\n\n def handles_hexagon(self):\n return (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux' and self.llvm_branch == LLVM_MAIN)\n\n def handles_wasm(self):\n is_linux_x64 = (self.arch == 'x86' and self.bits == 64 and self.os ==\n 'linux')\n return self.llvm_branch == LLVM_MAIN and (is_linux_x64 or self.os ==\n 'osx')\n\n def handles_wasm_wabt(self):\n return self.handles_wasm()\n\n def handles_wasm_v8(self):\n return self.handles_wasm() and self.os == 'linux'\n\n def has_nvidia(self):\n return self.arch == 'x86' and self.bits == 64 and self.os in ['windows'\n , 'linux']\n\n def handles_vulkan(self):\n return False\n\n def handles_webgpu(self):\n return self.os == 'osx' and self.halide_branch not in [\n HALIDE_RELEASE_15]\n\n def has_tflite(self):\n if self.arch == 'x86' and self.bits == 64 and self.os == 'linux':\n return True\n if self.arch == 'arm' and self.bits == 64 and self.os == 'osx':\n return True\n return False\n\n def has_ccache(self):\n return self.os in ['osx', 'linux']\n\n def halide_target(self):\n return '%s-%d-%s' % (self.arch, self.bits, self.os)\n\n def llvm_builder_label(self):\n return 'llvm-%s-%s' % (LLVM_BRANCHES[self.llvm_branch].version.\n major, self.halide_target())\n\n def halide_builder_label(self):\n a = ['halide']\n if self.sanitizer:\n a.append(self.sanitizer)\n if self.purpose == Purpose.halide_testbranch:\n a.append('testbranch')\n elif self.purpose == Purpose.halide_nightly:\n a.append('nightly')\n a.append(self.halide_branch)\n if self.halide_branch == HALIDE_MAIN:\n a.append(f'llvm{LLVM_BRANCHES[self.llvm_branch].version.major}')\n a.append(self.halide_target())\n a.append(self.buildsystem.name)\n return '-'.join(a)\n\n def builder_label(self):\n if self.purpose == Purpose.llvm_nightly:\n return self.llvm_builder_label()\n else:\n return self.halide_builder_label()\n\n def builder_tags(self):\n return self.builder_label().split('-')\n\n def get_worker_names(self):\n return [n for n, cfg in _WORKERS if self.arch == cfg.arch and self.\n bits in cfg.bits and self.os == cfg.os]\n\n def __str__(self):\n return self.halide_target()\n\n\ndef get_builddir_subpath(subpath):\n return Transform(lambda x: x.replace('\\\\', '/'), Interpolate(\n f'%(prop:builddir)s/{subpath}'))\n\n\ndef get_llvm_toolchains_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-toolchains', *subpaths))\n\n\ndef get_llvm_source_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-project', *subpaths))\n\n\ndef get_llvm_build_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-build', *subpaths))\n\n\ndef get_llvm_install_path(builder_type, *subpaths):\n llvm_workdir = builder_type.llvm_builder_label()\n return get_builddir_subpath(os.path.join('..', llvm_workdir,\n 'llvm-install', *subpaths))\n\n\ndef get_halide_source_path(*subpaths):\n return get_builddir_subpath(os.path.join('halide-source', *subpaths))\n\n\n<mask token>\n\n\ndef get_halide_install_path(builder_type, *subpaths):\n s = 'halide-install'\n if builder_type.sanitizer:\n s += '-' + builder_type.sanitizer\n return get_builddir_subpath(os.path.join(s, *subpaths))\n\n\ndef add_get_halide_source_steps(factory, builder_type):\n factory.addStep(GitHub(name='Get Halide source', locks=[\n performance_lock.access('counting')], codebase='halide', workdir=\n get_halide_source_path(), repourl=\n 'https://github.com/halide/Halide.git', branch=HALIDE_BRANCHES[\n builder_type.halide_branch].ref, mode='incremental'))\n\n\ndef add_get_llvm_source_steps(factory, builder_type):\n factory.addStep(Git(name=\n f'Get LLVM {LLVM_BRANCHES[builder_type.llvm_branch].version.major}',\n locks=[performance_lock.access('counting')], codebase='llvm',\n workdir=get_llvm_source_path(), repourl=\n 'https://github.com/llvm/llvm-project.git', branch=LLVM_BRANCHES[\n builder_type.llvm_branch].ref, mode='incremental'))\n toolchains_dir = get_llvm_toolchains_path()\n factory.addStep(MakeDirectory(name='Make CMake toolchain directory',\n locks=[performance_lock.access('counting')], dir=toolchains_dir,\n haltOnFailure=False))\n factory.addStep(FileDownload(name='Download CMake toolchains',\n mastersrc='toolchain.linux-arm32.cmake', workerdest=\n 'toolchain.linux-arm32.cmake', workdir=toolchains_dir, locks=[\n performance_lock.access('counting')], haltOnFailure=True, mode=420))\n\n\n<mask token>\n\n\ndef get_msvc_config_steps(factory, builder_type):\n arch_for_bits = {(32): 'x64_x86', (64): 'x64'}\n vcvarsall = 'vcvarsall.bat %s && set' % arch_for_bits[builder_type.bits]\n vcvarsdir = (\n 'C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Auxiliary/Build'\n )\n\n def save_interesting_env_vars(rc, stdout, stderr):\n d = {}\n for line in stdout.split('\\n'):\n match = re.match('^([a-zA-Z0-9_-]+)=(.*)$', line.strip())\n if match:\n key = match.group(1).upper()\n value = match.group(2)\n if key in VCVARSALL_ENV_VARS:\n d[key] = value\n return {'env': d}\n factory.addStep(SetPropertyFromCommand(name='Run VcVarsAll',\n description='Run VcVarsAll', workdir=vcvarsdir, locks=[\n performance_lock.access('counting')], haltOnFailure=True, command=\n vcvarsall, extract_fn=save_interesting_env_vars))\n\n\ndef merge_renderable(_base, _extn):\n\n @renderer\n @defer.inlineCallbacks\n def render(props):\n base = yield props.render(_base)\n extn = yield props.render(_extn)\n base.update(extn)\n return base\n return render\n\n\ndef get_distrib_name(_version, target, ext):\n\n @renderer\n @defer.inlineCallbacks\n def render(props):\n rev = props.getProperty('got_revision')['halide']\n version = yield props.render(_version)\n return os.path.join(ARTIFACTS_DIR,\n f'Halide-{version}-{target}-{rev}.{ext}')\n return render\n\n\ndef get_cmake_generator(builder_type):\n return 'Ninja'\n\n\n<mask token>\n\n\ndef get_halide_cmake_options(builder_type, build_dir):\n options = []\n if builder_type.sanitizer and builder_type.purpose != Purpose.llvm_nightly:\n assert builder_type.handles_sanitizers()\n options.append('--preset=%s' % builder_type.sanitizer_preset())\n options += ['-B', build_dir]\n return options\n\n\ndef get_ctest_options(builder_type, build_dir):\n assert builder_type.purpose != Purpose.llvm_nightly\n if builder_type.sanitizer:\n assert builder_type.handles_sanitizers()\n return {'build_config': builder_type.sanitizer_preset()}\n else:\n return {'build_config': 'Release'}\n\n\ndef get_halide_cmake_definitions(builder_type, halide_target='host',\n wasm_jit='wabt'):\n cmake_definitions = {'Clang_DIR': get_llvm_install_path(builder_type,\n 'lib/cmake/clang'), 'CMAKE_INSTALL_PREFIX': get_halide_install_path\n (builder_type), 'Halide_TARGET': halide_target, 'LLD_DIR':\n get_llvm_install_path(builder_type, 'lib/cmake/lld'), 'LLVM_DIR':\n get_llvm_install_path(builder_type, 'lib/cmake/llvm'), 'LLVM_ROOT':\n get_llvm_install_path(builder_type), 'WITH_PYTHON_BINDINGS': 'ON' if\n builder_type.handles_python() else 'OFF', 'WITH_TEST_FUZZ': 'ON' if\n builder_type.sanitizer == 'fuzzer' else 'OFF'}\n if builder_type.sanitizer and builder_type.handles_sanitizers():\n pass\n else:\n cmake_definitions['CMAKE_BUILD_TYPE'] = 'Release'\n if builder_type.has_ccache() and not builder_type.sanitizer_preset():\n cmake_definitions['Halide_CCACHE_BUILD'] = 'ON'\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path(\n 'cmake', 'toolchain.linux-arm32.cmake')\n if builder_type.os == 'windows':\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(\n '%(prop:VCPKG_ROOT)s/scripts/buildsystems/vcpkg.cmake')\n cmake_definitions['pybind11_DIR'] = Interpolate(\n '%(prop:VIRTUAL_ENV)s/share/cmake/pybind11')\n if 'wasm' in halide_target:\n cmake_definitions['WITH_PYTHON_BINDINGS'] = 'OFF'\n if builder_type.handles_wasm() and halide_target.startswith('wasm-'):\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(\n '%(prop:EMSDK)s/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake'\n )\n cmake_definitions['NODE_JS_EXECUTABLE'] = Property(\n 'HALIDE_NODE_JS_PATH')\n if wasm_jit == 'v8':\n cmake_definitions['WITH_WABT'] = 'OFF'\n cmake_definitions['WITH_V8'] = 'ON'\n cmake_definitions['V8_INCLUDE_PATH'\n ] = '/home/halidenightly/v8/v8/include'\n cmake_definitions['V8_LIB_PATH'] = (\n '/home/halidenightly/v8/v8/out/x64.release.static/obj/libv8_monolith.a'\n )\n elif wasm_jit == 'wabt':\n cmake_definitions['WITH_WABT'] = 'ON'\n cmake_definitions['WITH_V8'] = 'OFF'\n cmake_definitions['V8_INCLUDE_PATH'] = ''\n cmake_definitions['V8_LIB_PATH'] = ''\n else:\n assert False, 'Unknown wasm jit ' + str(wasm_jit)\n if builder_type.handles_webgpu() and 'webgpu' in halide_target:\n cmake_definitions['WEBGPU_NODE_BINDINGS'] = Property(\n 'HL_WEBGPU_NODE_BINDINGS')\n cmake_definitions['WEBGPU_NATIVE_LIB'] = Property(\n 'HL_WEBGPU_NATIVE_LIB')\n if builder_type.handles_hexagon() and 'hvx' in halide_target:\n cmake_definitions['Halide_BUILD_HEXAGON_REMOTE_RUNTIME'] = 'ON'\n return cmake_definitions\n\n\ndef get_cmake_build_command(builder_type, build_dir, targets=None):\n cmd = ['ninja', '-C', build_dir, '-j', Property('WORKER_BUILD_PARALLELISM')\n ]\n if builder_type.os == 'windows':\n cmd.append('-v')\n if targets:\n cmd.extend(targets)\n return cmd\n\n\ndef get_llvm_cmake_definitions(builder_type):\n definitions = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_INSTALL_PREFIX':\n get_llvm_install_path(builder_type), 'LLVM_BUILD_32_BITS': 'ON' if \n builder_type.bits == 32 else 'OFF', 'LLVM_ENABLE_ASSERTIONS': 'ON',\n 'LLVM_ENABLE_BINDINGS': 'OFF', 'LLVM_ENABLE_CURL': 'OFF',\n 'LLVM_ENABLE_DIA_SDK': 'OFF', 'LLVM_ENABLE_HTTPLIB': 'OFF',\n 'LLVM_ENABLE_IDE': 'OFF', 'LLVM_ENABLE_LIBXML2': 'OFF',\n 'LLVM_ENABLE_OCAMLDOC': 'OFF', 'LLVM_ENABLE_RTTI': 'ON',\n 'LLVM_ENABLE_TERMINFO': 'OFF', 'LLVM_ENABLE_WARNINGS': 'OFF',\n 'LLVM_ENABLE_ZLIB': 'ON', 'LLVM_ENABLE_ZSTD': 'OFF',\n 'LLVM_INCLUDE_BENCHMARKS': 'OFF', 'LLVM_INCLUDE_EXAMPLES': 'OFF',\n 'LLVM_INCLUDE_TESTS': 'OFF', 'LLVM_TARGETS_TO_BUILD':\n 'X86;ARM;NVPTX;AArch64;Hexagon;PowerPC;WebAssembly'}\n if builder_type.bits == 32:\n definitions['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = 'ONLY'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = 'NEVER'\n if builder_type.handles_riscv():\n definitions['LLVM_TARGETS_TO_BUILD'] += ';RISCV'\n if builder_type.handles_sanitizers():\n definitions['LLVM_ENABLE_RUNTIMES'\n ] = 'compiler-rt;libcxx;libcxxabi;libunwind'\n definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld;clang-tools-extra'\n else:\n definitions['LLVM_ENABLE_PROJECTS'] = 'clang;lld'\n if builder_type.os != 'windows':\n definitions['CMAKE_CXX_FLAGS'] = '-Wno-psabi'\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n definitions['CMAKE_TOOLCHAIN_FILE'] = get_llvm_toolchains_path(\n 'toolchain.linux-arm32.cmake')\n definitions['LLVM_TARGET_ARCH'] = 'ARM'\n definitions['LLVM_DEFAULT_TARGET_TRIPLE'] = 'arm-linux-gnueabihf'\n if (builder_type.arch == 'x86' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n definitions['CMAKE_FIND_ROOT_PATH'] = '/usr/lib/i386-linux-gnu'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'\n if builder_type.os == 'osx':\n definitions['LLVM_ENABLE_SUPPORT_XCODE_SIGNPOSTS'] = 'FORCE_OFF'\n if builder_type.has_ccache():\n definitions['LLVM_CCACHE_BUILD'] = 'ON'\n return definitions\n\n\ndef extend_property(dict_name, **kwargs):\n\n @renderer\n def render(props):\n table = props.getProperty(dict_name, default={})\n table.update(kwargs)\n return table\n return render\n\n\ndef add_env_setup_step(factory, builder_type, enable_ccache=False):\n if builder_type.os == 'windows':\n get_msvc_config_steps(factory, builder_type)\n cxx = 'c++'\n cc = 'cc'\n ld = 'ld'\n if builder_type.os == 'linux':\n cc = 'gcc-9'\n cxx = 'g++-9'\n ld = 'ld'\n if builder_type.arch == 'x86' and builder_type.bits == 32:\n cxx += ' -m32'\n cc += ' -m32'\n ld += ' -melf_i386'\n elif builder_type.os == 'windows':\n cxx = 'cl.exe'\n cc = 'cl.exe'\n if enable_ccache and builder_type.has_ccache():\n cxx = 'ccache ' + cxx\n cc = 'ccache ' + cc\n env = {'CC': cc, 'CXX': cxx, 'LD': ld}\n factory.addStep(SetPropertiesFromEnv(name='Read worker environment',\n variables=['EMSDK', 'HALIDE_NODE_JS_PATH', 'HL_HEXAGON_TOOLS',\n 'HL_WEBGPU_NATIVE_LIB', 'HL_WEBGPU_NODE_BINDINGS',\n 'LD_LIBRARY_PATH', 'VIRTUAL_ENV', 'VCPKG_ROOT']))\n vcpkg_root = Property('VCPKG_ROOT', default=None)\n if builder_type.handles_hexagon():\n hexagon_remote_bin = get_halide_build_path('src', 'runtime',\n 'hexagon_remote')\n env['HL_HEXAGON_SIM_REMOTE'] = Transform(os.path.join,\n hexagon_remote_bin, 'hexagon', 'bin', 'hexagon_sim_remote')\n env['HL_HEXAGON_SIM_CYCLES'] = '1'\n env['LD_LIBRARY_PATH'] = [hexagon_remote_bin, Interpolate(\n '%(prop:HL_HEXAGON_TOOLS)s/lib/iss')]\n env['HEXAGON_SDK_ROOT'] = Interpolate(\n '%(prop:HL_HEXAGON_TOOLS)s/../../../..')\n if builder_type.has_nvidia() and builder_type.handles_vulkan():\n env['VK_INSTANCE_LAYERS'] = 'VK_LAYER_KHRONOS_validation'\n if builder_type.os == 'osx':\n env['METAL_DEVICE_WRAPPER_TYPE'] = '1'\n if builder_type.os == 'windows':\n vcpkg_root = Property('VCPKG_ROOT', default='C:/vcpkg')\n env['VCPKG_ROOT'] = vcpkg_root\n env['CUDA_CACHE_DISABLE'] = '1'\n env['HL_DISABLE_WINDOWS_ABORT_DIALOG'] = '1'\n factory.addStep(SetProperties(name='Initialize environment', properties\n =dict(env=extend_property('env', **env), VCPKG_ROOT=vcpkg_root)))\n\n\n@renderer\ndef get_llvm_latest_commit(props):\n build_dir = props.getProperty('builddir')\n assert not isinstance(build_dir, dict)\n build_dir = build_dir.replace('\\\\', '/')\n return (\n 'cd %s/llvm-project && git log -1 > %s/llvm-install/llvm_latest_commit.txt'\n % (build_dir, build_dir))\n\n\n<mask token>\n\n\ndef add_halide_cmake_build_steps(factory, builder_type):\n source_dir = get_halide_source_path()\n build_dir = get_halide_build_path()\n install_dir = get_halide_install_path(builder_type)\n factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[\n performance_lock.access('counting')], dir=build_dir, haltOnFailure=\n False))\n factory.addStep(MakeDirectory(name='Make Halide Build Dir', locks=[\n performance_lock.access('counting')], dir=build_dir, haltOnFailure=\n False))\n factory.addStep(RemoveDirectory(name='Remove Halide Install Dir', locks\n =[performance_lock.access('counting')], dir=install_dir,\n haltOnFailure=False))\n factory.addStep(MakeDirectory(name='Make Halide Install Dir', locks=[\n performance_lock.access('counting')], dir=install_dir,\n haltOnFailure=False))\n factory.addStep(CMake(name='Configure Halide', description=\n 'Configure Halide', locks=[performance_lock.access('counting')],\n haltOnFailure=True, workdir=build_dir, env=Property('env'), path=\n source_dir, generator=get_cmake_generator(builder_type),\n definitions=get_halide_cmake_definitions(builder_type), options=\n get_halide_cmake_options(builder_type, build_dir)))\n factory.addStep(ShellCommand(name='Build Halide', description=\n 'Build Halide', locks=[performance_lock.access('counting')],\n haltOnFailure=True, workdir=build_dir, env=Property('env'), command\n =get_cmake_build_command(builder_type, build_dir, targets=['all',\n 'install'])))\n\n\ndef add_halide_cmake_package_steps(factory, builder_type):\n source_dir = get_halide_source_path()\n target = builder_type.halide_target()\n ext = 'zip' if builder_type.os == 'windows' else 'tar.gz'\n factory.addStep(SetPropertiesFromCMakeCache(name=\n 'Get Halide package version', workdir=get_halide_build_path(),\n props=['CMAKE_PROJECT_VERSION']))\n extra_env = dict(Clang_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/clang'), LLD_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/lld'), LLVM_DIR=get_llvm_install_path(builder_type,\n 'lib/cmake/llvm'), Halide_VERSION=Property('CMAKE_PROJECT_VERSION'))\n if builder_type.os == 'windows':\n build_dir = get_halide_build_path('packaging_dir')\n if builder_type.arch == 'arm':\n arch = 'ARM' if builder_type.bits == 32 else 'ARM64'\n else:\n arch = 'Win32' if builder_type.bits == 32 else 'x64'\n cmd = [get_halide_source_path('packaging/zip/package.bat'),\n source_dir, build_dir, arch]\n else:\n build_dir = get_halide_build_path()\n cmd = [get_halide_source_path('packaging/tgz/package.sh'),\n source_dir, build_dir]\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n extra_env['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake',\n 'toolchain.linux-arm32.cmake')\n factory.addStep(ShellCommand(name='Package Halide', description=\n 'Package Halide', workdir=build_dir, env=extend_property('env', **\n extra_env), locks=[performance_lock.access('counting')],\n haltOnFailure=True, command=cmd))\n factory.addStep(FileUpload(name='Upload Halide package', workersrc=\n Interpolate(f'Halide-%(prop:CMAKE_PROJECT_VERSION)s-{target}.{ext}'\n ), locks=[performance_lock.access('counting')], haltOnFailure=True,\n workdir=build_dir, mode=420, masterdest=get_distrib_name(Property(\n 'CMAKE_PROJECT_VERSION'), target, ext)))\n\n def pkg_version_and_target(path: Path):\n match = re.match('^(.*)-[a-f0-9]+\\\\.(tar\\\\.gz|tgz|zip)', path.name)\n return match.group(1) if match else None\n factory.addStep(CleanOldFiles(name='Clean old releases', workdir=\n ARTIFACTS_DIR, locks=[performance_lock.access('counting')], groupfn\n =pkg_version_and_target))\n\n\ndef get_gpu_dsp_targets(builder_type):\n if builder_type.sanitizer_preset() is not None:\n return\n if builder_type.has_nvidia():\n yield 'host-cuda', False\n yield 'host-opencl', False\n if builder_type.handles_vulkan():\n yield 'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13', False\n if builder_type.handles_webgpu():\n yield 'host-webgpu', False\n if builder_type.os == 'osx':\n yield 'host-metal', False\n if builder_type.handles_hexagon():\n yield 'host-hvx', True\n\n\ndef get_test_labels(builder_type):\n targets = defaultdict(list)\n preset = builder_type.sanitizer_preset()\n if preset and 'fuzz' in preset:\n targets['host'].extend(['fuzz'])\n return targets\n targets['host'].extend(['internal', 'correctness', 'generator',\n 'autoschedulers_cpu', 'error', 'warning', 'apps', 'performance',\n 'tutorial'])\n if preset:\n return targets\n if (builder_type.arch == 'arm' and builder_type.bits == 32 and \n builder_type.os == 'linux'):\n targets['host'].remove('internal')\n targets['host'].remove('generator')\n if builder_type.handles_python():\n targets['host'].extend(['python'])\n if builder_type.arch == 'x86':\n t = 'x86-%d-%s' % (builder_type.bits, builder_type.os)\n targets[t].extend(['correctness'])\n if builder_type.bits == 64:\n targets['%s-sse41' % t].extend(['correctness'])\n for t, is_simulator in get_gpu_dsp_targets(builder_type):\n if t == 'host-webgpu':\n targets[t].extend(['correctness', 'generator'])\n else:\n targets[t].extend(['correctness', 'generator', 'apps'])\n if 'cuda' in t:\n targets[t].extend(['autoschedulers_cuda'])\n if 'hvx' not in t:\n targets[t].extend(['autoschedulers_gpu'])\n if not is_simulator:\n targets[t].extend(['performance'])\n if builder_type.has_nvidia():\n targets['host-cuda-opencl'].extend(['correctness_multi_gpu'])\n if builder_type.handles_vulkan():\n targets[\n 'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n if builder_type.handles_wasm():\n if builder_type.handles_wasm_wabt():\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/wabt'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n if builder_type.handles_wasm_v8():\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/v8'\n ].extend(['internal', 'correctness', 'generator', 'error',\n 'warning'])\n targets[\n 'wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int-wasm_threads'\n ].extend(['generator', 'apps'])\n if builder_type.handles_webgpu():\n targets['wasm-32-wasmrt-webgpu'].extend(['generator'])\n return targets\n\n\ndef is_time_critical_test(test):\n return test in ['performance', 'autoschedulers_cpu',\n 'autoschedulers_gpu', 'autoschedulers_cuda']\n\n\ndef short_target(halide_target):\n s = halide_target.split('-')\n if len(s) == 1:\n return s[0]\n elif len(s) == 2:\n return '%s-%s' % (s[0], s[1])\n elif len(s) == 3:\n return '%s-%s-%s' % (s[0], s[1], s[2])\n elif len(s) > 3:\n return '%s-%s-%s…' % (s[0], s[1], s[2])\n else:\n return '<unknown>'\n\n\ndef add_halide_cmake_test_steps(factory, builder_type):\n parallelism = Property('WORKER_BUILD_PARALLELISM')\n labels = get_test_labels(builder_type)\n source_dir = get_halide_source_path()\n build_dir = get_halide_build_path()\n install_dir = get_halide_install_path(builder_type)\n keys = list(labels.keys())\n keys.remove('host')\n keys.sort()\n keys.insert(0, 'host')\n for halide_target in keys:\n env = extend_property('env', HL_JIT_TARGET=halide_target)\n desc = 'T=%s' % short_target(halide_target)\n test_labels = labels[halide_target]\n wasm_jit = None\n if halide_target.startswith('wasm-'):\n halide_target, sep, wasm_jit = halide_target.partition('/')\n env = extend_property('env', HL_JIT_TARGET=halide_target)\n if wasm_jit:\n desc = '%s + T=%s' % (wasm_jit, short_target(halide_target))\n if not wasm_jit:\n wasm_jit = 'wabt'\n factory.addStep(CMake(name='Reconfigure for %s' % short_target(\n halide_target), description='Reconfigure for %s' % desc, locks=\n [performance_lock.access('counting')], haltOnFailure=True, env=\n env, workdir=build_dir, path=source_dir, generator=\n get_cmake_generator(builder_type), definitions=\n get_halide_cmake_definitions(builder_type, halide_target=\n halide_target, wasm_jit=wasm_jit), options=\n get_halide_cmake_options(builder_type, build_dir)))\n factory.addStep(ShellCommand(name='Rebuild for %s' % short_target(\n halide_target), description='Rebuild Halide for %s' % desc,\n locks=[performance_lock.access('counting')], haltOnFailure=True,\n workdir=build_dir, env=env, command=get_cmake_build_command(\n builder_type, build_dir, targets=['all', 'install'])))\n do_apps = 'apps' in test_labels\n if do_apps:\n test_labels.remove('apps')\n if not builder_type.handles_python():\n if 'python' in test_labels:\n test_labels.remove('python')\n do_apps = False\n parallel_test_labels = [test for test in test_labels if not\n is_time_critical_test(test)]\n exclusive_test_labels = [test for test in test_labels if\n is_time_critical_test(test)]\n if parallel_test_labels:\n if len(parallel_test_labels) > 2:\n test_set = ','.join([s[0] for s in parallel_test_labels])\n else:\n test_set = ', '.join(parallel_test_labels)\n exclude_tests = []\n if builder_type.os == 'windows' or builder_type.os == 'linux':\n exclude_tests.append('interpolate')\n exclude_tests.append('lens_blur')\n exclude_tests.append('unsharp')\n if builder_type.os == 'linux' or builder_type.bits == 32:\n exclude_tests.append('tutorial_lesson_12')\n if builder_type.sanitizer == 'asan':\n exclude_tests.append('tutorial_lesson_19')\n if builder_type.arch == 'arm' or builder_type.bits == 32:\n exclude_tests.append('tutorial_lesson_19')\n factory.addStep(CTest(name='Test %s %s' % (test_set, desc),\n description='Test %s %s' % (test_set, desc), locks=[\n performance_lock.access('counting')], workdir=build_dir,\n env=env, timeout=3600, labels=parallel_test_labels,\n exclude_tests=exclude_tests, jobs=parallelism, **\n get_ctest_options(builder_type, build_dir)))\n if exclusive_test_labels:\n test_set = ','.join([s.replace('autoschedulers_', 'a_') for s in\n exclusive_test_labels])\n factory.addStep(CTest(name='Test %s %s' % (test_set, desc),\n description='Test %s %s' % (test_set, desc), locks=[\n performance_lock.access('exclusive')], workdir=build_dir,\n env=env, timeout=3600, labels=exclusive_test_labels, **\n get_ctest_options(builder_type, build_dir)))\n if do_apps:\n apps_build_dir = get_halide_build_path('apps')\n apps_source_dir = get_halide_source_path('apps')\n apps_cmake_defs = get_halide_cmake_definitions(builder_type,\n halide_target=halide_target)\n apps_cmake_defs['CMAKE_PREFIX_PATH'] = get_halide_install_path(\n builder_type)\n want_hannk = builder_type.has_tflite(\n ) and not halide_target.startswith('wasm-')\n apps_cmake_defs['ENABLE_APPS_HANNK'\n ] = 'ON' if want_hannk else 'OFF'\n factory.addStep(CMake(name='Configure apps for %s' % desc,\n description='Configure apps for %s' % desc, locks=[\n performance_lock.access('counting')], haltOnFailure=True,\n env=env, workdir=apps_build_dir, path=apps_source_dir,\n generator=get_cmake_generator(builder_type), definitions=\n apps_cmake_defs, options=get_halide_cmake_options(\n builder_type, build_dir)))\n factory.addStep(ShellCommand(name='Build apps for %s' % desc,\n description='Build apps for %s' % desc, locks=[\n performance_lock.access('counting')], haltOnFailure=True,\n workdir=apps_build_dir, env=env, command=\n get_cmake_build_command(builder_type, apps_build_dir)))\n exclude_tests = []\n if builder_type.os == 'windows':\n exclude_tests.append('lens_blur_filter')\n factory.addStep(CTest(name='Test apps for %s' % desc,\n description='Test apps for %s' % desc, locks=[\n performance_lock.access('exclusive')], workdir=\n apps_build_dir, env=env, timeout=3600, exclude_tests=\n exclude_tests, exclude_labels=['slow_tests'], **\n get_ctest_options(builder_type, apps_build_dir)))\n\n\ndef create_halide_make_factory(builder_type):\n assert builder_type.os != 'windows'\n make_threads = Property('WORKER_BUILD_PARALLELISM')\n build_dir = get_halide_build_path()\n factory = BuildFactory()\n add_env_setup_step(factory, builder_type, enable_ccache=True)\n add_get_halide_source_steps(factory, builder_type)\n factory.addStep(RemoveDirectory(name='Remove Halide Build Dir', locks=[\n performance_lock.access('counting')], dir=build_dir))\n target_label_pairs = [('host', 'build_tests')]\n for halide_target, labels_for_target in get_test_labels(builder_type\n ).items():\n if halide_target != 'host':\n continue\n _labels_to_skip = ['autoschedulers_cpu', 'autoschedulers_gpu',\n 'autoschedulers_cuda', 'performance', 'python']\n if builder_type.bits == 32:\n _labels_to_skip.extend(['autoschedulers_cpu',\n 'autoschedulers_gpu', 'autoschedulers_cuda'])\n for label in labels_for_target:\n if label in _labels_to_skip:\n continue\n target_label_pairs.append((halide_target, label))\n for halide_target, label in target_label_pairs:\n env = extend_property('env', LLVM_CONFIG=get_llvm_install_path(\n builder_type, 'bin/llvm-config'), HL_TARGET=halide_target,\n HL_JIT_TARGET=halide_target)\n if is_time_critical_test(label):\n p = 1\n lock_mode = 'exclusive'\n else:\n p = make_threads\n lock_mode = 'counting'\n if label != 'build_tests':\n label = 'test_%s' % label\n factory.addStep(ShellCommand(name='make ' + label, description=\n label + ' ' + halide_target, locks=[performance_lock.access(\n lock_mode)], workdir=build_dir, env=env, haltOnFailure=False,\n command=['make', '-f', get_halide_source_path('Makefile'), '-j',\n p, label], timeout=3600))\n return factory\n\n\ndef create_halide_cmake_factory(builder_type):\n factory = BuildFactory()\n add_env_setup_step(factory, builder_type)\n add_get_halide_source_steps(factory, builder_type)\n add_halide_cmake_build_steps(factory, builder_type)\n add_halide_cmake_test_steps(factory, builder_type)\n if builder_type.purpose == Purpose.halide_nightly:\n add_halide_cmake_package_steps(factory, builder_type)\n return factory\n\n\n<mask token>\n\n\ndef create_halide_builder(arch, bits, os, halide_branch, llvm_branch,\n purpose, buildsystem=BuildSystem.cmake):\n sanitizers = [None]\n if purpose != Purpose.halide_nightly:\n sanitizers.extend(_SANITIZERS)\n for san in sanitizers:\n builder_type = BuilderType(arch, bits, os, halide_branch,\n llvm_branch, purpose, san, buildsystem)\n if san and purpose == Purpose.llvm_nightly:\n continue\n if san and not builder_type.handles_sanitizers():\n continue\n workers = builder_type.get_worker_names()\n builder = BuilderConfig(name=builder_type.builder_label(),\n workernames=workers, factory=create_halide_factory(builder_type\n ), collapseRequests=True, locks=[llvm_build_locks[llvm_branch +\n str(bits)].access('counting')], tags=builder_type.builder_tags())\n builder.builder_type = builder_type\n yield builder\n\n\ndef create_halide_builders():\n for arch, bits, os in get_interesting_halide_targets():\n for halide_branch in HALIDE_NIGHTLIES:\n for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:\n yield from create_halide_builder(arch, bits, os,\n halide_branch, llvm_branch, Purpose.halide_nightly)\n for halide_branch in _HALIDE_RELEASES:\n for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:\n yield from create_halide_builder(arch, bits, os,\n halide_branch, llvm_branch, Purpose.halide_testbranch)\n yield from create_halide_builder(arch, bits, os, HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch)\n yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('x86', 32, 'linux', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('x86', 64, 'osx', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('arm', 64, 'osx', HALIDE_MAIN,\n LLVM_MAIN, Purpose.halide_testbranch, BuildSystem.make)\n for llvm_branch in LLVM_BRANCHES:\n if abs(LLVM_BRANCHES[llvm_branch].version.major - LLVM_BRANCHES[\n LLVM_MAIN].version.major) in [1, 2]:\n yield from create_halide_builder('x86', 64, 'linux',\n HALIDE_MAIN, llvm_branch, Purpose.halide_testbranch)\n\n\ndef create_halide_scheduler(halide_branch):\n\n def is_halide_base_branch(br):\n return any(br == hl.ref for hl in HALIDE_BRANCHES.values())\n\n def is_halide_pr_branch(br):\n return not is_halide_base_branch(br)\n\n def github_base_branch_matches(change):\n ref = change.properties.getProperty('basename')\n return ref == HALIDE_BRANCHES[halide_branch].ref\n builders = [b for b in c['builders'] if b.builder_type.halide_branch ==\n halide_branch and b.builder_type.purpose == Purpose.halide_nightly]\n if builders:\n builder_names = [str(b.name) for b in builders]\n yield Nightly(name='halide-package-' + halide_branch, codebases=[\n 'halide'], builderNames=builder_names, change_filter=\n ChangeFilter(codebase='halide'), hour=4, minute=0)\n yield ForceScheduler(name='force-halide-nightly-' + halide_branch,\n builderNames=builder_names, codebases=['halide'])\n builders = [b for b in c['builders'] if b.builder_type.halide_branch ==\n halide_branch and b.builder_type.purpose == Purpose.halide_testbranch]\n if builders:\n builder_names = [str(b.name) for b in builders]\n yield AnyBranchScheduler(name='halide-testbranch-' + halide_branch,\n codebases=['halide'], change_filter=ChangeFilter(category=\n 'pull', codebase='halide', branch_fn=is_halide_pr_branch,\n filter_fn=github_base_branch_matches), treeStableTimer=60 * 5,\n builderNames=builder_names)\n yield ForceScheduler(name='force-halide-testbranch-' +\n halide_branch, builderNames=builder_names, codebases=['halide'])\n\n\n<mask token>\n\n\ndef create_llvm_builders():\n for arch, bits, os in get_interesting_halide_targets():\n for llvm_branch in LLVM_BRANCHES:\n builder_type = BuilderType(arch, bits, os, None, llvm_branch,\n Purpose.llvm_nightly)\n for w in builder_type.get_worker_names():\n label = builder_type.llvm_builder_label()\n builder = BuilderConfig(name='%s/%s' % (label, w),\n workerbuilddir=label, workernames=[w], factory=\n create_llvm_cmake_factory(builder_type),\n collapseRequests=True, locks=[llvm_build_locks[\n llvm_branch + str(bits)].access('exclusive')], tags=\n builder_type.builder_tags())\n builder.builder_type = builder_type\n yield builder\n\n\n<mask token>\n\n\ndef create_builders():\n yield from create_llvm_builders()\n yield from create_halide_builders()\n\n\ndef create_schedulers():\n for llvm_branch in LLVM_BRANCHES:\n yield from create_llvm_scheduler(llvm_branch)\n for halide_branch in HALIDE_BRANCHES:\n yield from create_halide_scheduler(halide_branch)\n\n\n<mask token>\n\n\ndef prioritize_builders(buildmaster, builders):\n\n def importance(builder):\n builder_type = builder.config.builder_type\n assert builder_type\n if builder_type.purpose == Purpose.llvm_nightly:\n return 0\n if builder_type.purpose == Purpose.halide_testbranch:\n return 1\n if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_15]:\n return 2\n if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_16]:\n return 2\n if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_MAIN]:\n return 3\n return 4\n return list(sorted(builders, key=importance))\n\n\n<mask token>\n\n\nclass SafeGitHubEventHandler(GitHubEventHandler):\n\n def handle_push(self, payload, event):\n ref = payload['ref']\n if re.match('^refs/(heads|tags)/(master|main|release/\\\\d+\\\\.x)$', ref):\n return super().handle_push(payload, event)\n else:\n print(f'SafeGitHubEventHandler: ignoring push event for ref: {ref}'\n )\n return self.skip()\n\n def handle_pull_request(self, payload, event):\n pr = payload['pull_request']\n try:\n if any(label['name'] == 'skip_buildbots' for label in pr['labels']\n ):\n return self.skip()\n if any(r['login'] == 'halidebuildbots' for r in pr[\n 'requested_reviewers']):\n if payload['action'] == 'review_requested':\n payload['action'] = 'synchronize'\n return super().handle_pull_request(payload, event)\n trusted_repos = 'halide/Halide', 'CodeLinaro/Halide'\n if pr['head']['repo']['full_name'] not in trusted_repos:\n return self.skip()\n return super().handle_pull_request(payload, event)\n except KeyError as e:\n print(f'SafeGitHubEventHandler: malformed payload: {payload}')\n print(f'SafeGitHubEventHandler: missing key \"{e}\"')\n return self.skip()\n\n @staticmethod\n def skip():\n return [], 'git'\n\n\n<mask token>\n",
"step-5": "# -*- python -*-\n# ex: set syntax=python:\n# vim: set syntax=python:\n\nimport os\nimport re\nfrom collections import defaultdict, namedtuple\nfrom enum import Enum\nfrom pathlib import Path\n\nimport buildbot.www.authz.endpointmatchers as ems\nfrom buildbot.changes.filter import ChangeFilter\nfrom buildbot.changes.gitpoller import GitPoller\nfrom buildbot.config import BuilderConfig\nfrom buildbot.locks import WorkerLock\nfrom buildbot.process.factory import BuildFactory\nfrom buildbot.process.properties import Interpolate, Property, renderer, Transform\nfrom buildbot.reporters.generators.build import BuildStartEndStatusGenerator\nfrom buildbot.reporters.github import GitHubStatusPush\nfrom buildbot.reporters.message import MessageFormatterRenderable\nfrom buildbot.schedulers.basic import AnyBranchScheduler\nfrom buildbot.schedulers.forcesched import ForceScheduler\nfrom buildbot.schedulers.timed import Nightly\nfrom buildbot.steps.cmake import CMake\nfrom buildbot.steps.master import SetProperties\nfrom buildbot.steps.shell import SetPropertyFromCommand, ShellCommand\nfrom buildbot.steps.source.git import Git\nfrom buildbot.steps.source.github import GitHub\nfrom buildbot.steps.transfer import FileUpload, FileDownload\nfrom buildbot.steps.worker import MakeDirectory, SetPropertiesFromEnv, RemoveDirectory\nfrom buildbot.worker import Worker\nfrom buildbot.www.auth import UserPasswordAuth\nfrom buildbot.www.authz import Authz\nfrom buildbot.www.authz.roles import RolesFromUsername\nfrom buildbot.www.hooks.github import GitHubEventHandler\nfrom twisted.internet import defer\n\nfrom custom_steps import CTest, CleanOldFiles, SetPropertiesFromCMakeCache\n\n# This is the dictionary that the buildmaster pays attention to. We also use\n# a shorter alias to save typing.\nc = BuildmasterConfig = {}\n\n# SECRETS\n\nGITHUB_TOKEN = Path('github_token.txt').read_text().strip()\nWORKER_SECRET = Path('halide_bb_pass.txt').read_text().strip()\nWEBHOOK_SECRET = Path('webhook_token.txt').read_text().strip()\nWWW_PASSWORD = Path('buildbot_www_pass.txt').read_text().strip()\n\n# SERVER SETTINGS\n\nARTIFACTS_DIR = os.environ.get('HALIDE_BB_MASTER_ARTIFACTS_DIR', '/home/halidenightly/artifacts')\nREPO_DIR = Path(__file__, '..', '..').resolve()\n\n# LLVM\n\n# At any given time, we test (at least) 3 LLVM versions:\n# - the current main (changes daily)\n# - the most recent release (expected to be stable)\n# - an older release (expected to be stable)\n#\n# The branches that correspond to these will rotate as new versions\n# are released, but the underlying test logic should not need changing.\n\nVersion = namedtuple('Version', ['major', 'minor', 'patch'])\nVersionedBranch = namedtuple('VersionedBranch', ['ref', 'version'])\n\nLLVM_MAIN = 'main'\nLLVM_RELEASE_17 = 'release_17'\nLLVM_RELEASE_16 = 'release_16'\nLLVM_RELEASE_15 = 'release_15'\n\nLLVM_BRANCHES = {LLVM_MAIN: VersionedBranch(ref='main', version=Version(18, 0, 0)),\n LLVM_RELEASE_17: VersionedBranch(ref='release/17.x', version=Version(17, 0, 0)),\n LLVM_RELEASE_16: VersionedBranch(ref='llvmorg-16.0.6', version=Version(16, 0, 6)),\n LLVM_RELEASE_15: VersionedBranch(ref='llvmorg-15.0.7', version=Version(15, 0, 7))}\n\n# At any given time, Halide has a main branch, which supports (at least)\n# the LLVM main branch and the most recent release branch (and maybe one older).\n#\n# We also support previous release branches; a release branch tracks *only* the\n# corresponding version of LLVM (i.e., Halide 13 is 'release/13.x' and is only\n# built/tested against LLVM13, even though it might still work with other LLVM versions).\n#\n# Note that we deliberately chose branch names that match LLVM's conventions.\n#\n# (Note that there are older releases of Halide that we no longer bother to build/test regularly.)\n\nHALIDE_MAIN = 'main'\nHALIDE_RELEASE_16 = 'release_16'\nHALIDE_RELEASE_15 = 'release_15'\n\n_HALIDE_RELEASES = [\n HALIDE_RELEASE_16,\n HALIDE_RELEASE_15,\n]\n\nHALIDE_BRANCHES = {HALIDE_MAIN: VersionedBranch(ref='main', version=Version(17, 0, 0)),\n HALIDE_RELEASE_16: VersionedBranch(ref='release/16.x', version=Version(16, 0, 6)),\n HALIDE_RELEASE_15: VersionedBranch(ref='release/15.x', version=Version(15, 0, 1))}\n\n# This lists the Halide branch(es) for which we want to build nightlies;\n# it's usually desirable to constrain these to save buildbot time (esp on the slower bots)\n# and avoid branches that aren't changing much (i.e. -- recent releases that aren't\n# likely to need new updates soon).\nHALIDE_NIGHTLIES = [HALIDE_MAIN]\n\n# Given a halide branch, return the 'native' llvm version we expect to use with it.\n# For halide release branches, this is the corresponding llvm release branch; for\n# halide main, it's llvm main.\nLLVM_FOR_HALIDE = {\n HALIDE_MAIN: [LLVM_MAIN, LLVM_RELEASE_17, LLVM_RELEASE_16],\n HALIDE_RELEASE_16: [LLVM_RELEASE_16],\n HALIDE_RELEASE_15: [LLVM_RELEASE_15],\n}\n\n# WORKERS\n\n# Can use Python 3.7 dataclasses instead, if we choose to upgrade to that.\nWorkerConfig = namedtuple('WorkerConfig', ['max_builds', 'j', 'arch', 'bits', 'os'])\n\n# Using nproc+2 on the arm32 builds causes internal errors in gcc-armeabihf. Let's just use nproc.\n_NPROC = Interpolate(\"%(worker:numcpus)s\")\n\n# For machines with max_builds=1, using nproc+2 cores for building is the conventional choice\n# (and what ninja defaults to). Oddly, \"ninja -j 0\" means \"use as many threads as you like\" which\n# is definitely not what we want.\n_NPROC_PLUS_2 = Transform(lambda x: f'{int(x) + 2}', _NPROC)\n\n_WORKERS = [\n ('linux-worker-1', WorkerConfig(max_builds=4, j=8, arch='x86', bits=[32, 64], os='linux')),\n ('linux-worker-4', WorkerConfig(max_builds=4, j=8, arch='x86', bits=[32, 64], os='linux')),\n # 2013 Mac Pro running a 6-core Xeon.\n ('mac-x86-worker-1', WorkerConfig(max_builds=2, j=8, arch='x86', bits=[64], os='osx')),\n # Mac Mini 2018, 3.2 GHz 6-Core Intel Core i7, 16GB memory\n ('mac-x86-worker-2', WorkerConfig(max_builds=2, j=8, arch='x86', bits=[64], os='osx')),\n # Mac Mini 2018, ??? details TBD\n ('mac-x86-worker-3', WorkerConfig(max_builds=2, j=8, arch='x86', bits=[64], os='osx')),\n ('mac-arm-worker-1', WorkerConfig(max_builds=2, j=8, arch='arm', bits=[64], os='osx')),\n # The arm-linux bots here have 4 cores but apparently don't have enough RAM to do more\n # than -j=2 without crashing during LLVM builds.\n ('arm32-linux-worker-1', WorkerConfig(max_builds=1, j=2, arch='arm', bits=[32], os='linux')),\n ('arm32-linux-worker-2', WorkerConfig(max_builds=1, j=2, arch='arm', bits=[32], os='linux')),\n ('arm64-linux-worker-1', WorkerConfig(max_builds=1, j=2, arch='arm', bits=[64], os='linux')),\n ('arm64-linux-worker-2', WorkerConfig(max_builds=1, j=2, arch='arm', bits=[64], os='linux')),\n # The rpi4 has 8GB ram, so apparently it's OK with -j=nproc for now.\n ('rpi4-linux-worker-1', WorkerConfig(max_builds=1, j=_NPROC, arch='arm', bits=[32], os='linux')),\n # TODO: should normally be offline because every D3D12 test fails\n ('win-worker-2', WorkerConfig(max_builds=1, j=_NPROC_PLUS_2, arch='x86', bits=[32, 64], os='windows')),\n # TODO: broken, pending repair till Monday\n # ('win-worker-3', WorkerConfig(max_builds=2, j=_NPROC_PLUS_2, arch='x86', bits=[32, 64], os='windows')),\n]\n\n# The 'workers' list defines the set of recognized buildworkers. Each element is\n# a Worker object, specifying a unique worker name and password. The same\n# worker name and password must be configured on the worker.\nc['workers'] = [Worker(n,\n WORKER_SECRET,\n keepalive_interval=300, # default is 3600 (1 hour). We'll do 5 mins.\n max_builds=cfg.max_builds,\n properties={'WORKER_BUILD_PARALLELISM': cfg.j}) for n, cfg in _WORKERS]\n\n_SANITIZERS = [\n 'asan',\n 'fuzzer', # this isn't *technically* a sanitizer, but is close enough that it's a good fit\n]\n\n# LOCKS\n\n# Performance testing requires exclusive use of a worker\n\n# Compute-intensive build steps will grab this lock in reader\n# mode. The performance test will grab it in exclusive mode.\nperformance_lock = WorkerLock(\"performance_lock\", maxCount=9999)\n\n# When building the LLVM nightlies, we can sync & build LLVM independently\n# from other work, but when we update the install directory, we need to ensure\n# we have an exclusive lock across the entire worker. (Since we have a small\n# number of LLVM versions, and since 'make install' doesn't take very long,\n# we could probably just get by with a single lock for *any* llvm install,\n# but this isn't much harder to do.)\nllvm_build_locks = {}\nfor llvm_branch, info in LLVM_BRANCHES.items():\n for bits in [32, 64]:\n llvm_build_locks[llvm_branch + str(bits)] = WorkerLock(\n f'llvm_install_lock_{info.version.major}_{bits}', maxCount=9999)\n\n# CHANGESOURCES\n\n# Here we point the buildbot at third-party codebases, ie. dependencies.\n# Currently, we only have LLVM's `main` branch configured.\n\nc['change_source'] = [\n GitPoller(\n repourl='https://github.com/llvm/llvm-project.git',\n workdir='gitpoller-llvm-workdir',\n branch=LLVM_BRANCHES[LLVM_MAIN].ref,\n pollInterval=60 * 60 * 24, # Only check llvm once every 24 hours\n pollAtLaunch=True)\n]\n\n# CODEBASES\n\nall_repositories = {\n 'https://github.com/halide/Halide.git': 'halide',\n 'https://github.com/llvm/llvm-project.git': 'llvm',\n}\n\n\ndef codebase_generator(chdict):\n repo = chdict['repository']\n assert repo in all_repositories, \"Codebase not found for chdict: \" + str(chdict)\n return all_repositories[repo]\n\n\nc['codebaseGenerator'] = codebase_generator\n\n\n# BUILDERS\n\n# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:\n# what steps, and which workers can execute them. Note that any particular build will\n# only take place on one worker.\n\n\nclass Purpose(Enum):\n halide_nightly = 1\n halide_testbranch = 2\n llvm_nightly = 3\n\n\nclass BuildSystem(Enum):\n make = 0\n cmake = 1\n\n\nclass BuilderType:\n \"\"\"A class to encapsulate the settings for a specific Builder.\n (Do not confuse with CMake's 'BUILD_TYPE', which is something else.)\n\n It includes:\n - Halide 'target' in the form of arch-bits-os\n - LLVM branch to be used\n - CMake vs Make\n - halide-nightly vs halide-testbranch vs llvm-nightly\n - sanitizers vs none\n\n It doesn't currently include any 'features' because we don't currently\n bake any in at build time.\n\n It doesn't currently include the C++ compiler used (eg gcc7 vs gcc8 vs clang),\n mainly because we currently never test with multiple compilers for a given\n setup. (If we ever need to do so, compiler should be added to this.)\n \"\"\"\n\n def __init__(self, arch, bits, os, halide_branch, llvm_branch, purpose, sanitizer=None,\n buildsystem=BuildSystem.cmake):\n assert arch in ['arm', 'x86']\n assert bits in [32, 64]\n assert os in ['linux', 'windows', 'osx']\n assert llvm_branch in LLVM_BRANCHES, f'{llvm_branch} not recognized'\n\n self.arch = arch\n self.bits = bits\n self.os = os\n self.halide_branch = halide_branch\n self.llvm_branch = llvm_branch\n self.buildsystem = buildsystem\n self.purpose = purpose\n self.sanitizer = sanitizer\n\n if self.halide_branch:\n assert self.purpose != Purpose.llvm_nightly\n assert self.halide_branch in HALIDE_BRANCHES, f'unknown branch {self.halide_branch}'\n assert (self.purpose == Purpose.halide_testbranch or # if not testbranch...\n self.llvm_branch in LLVM_FOR_HALIDE[self.halide_branch])\n else:\n assert self.purpose == Purpose.llvm_nightly\n\n if self.sanitizer:\n assert self.sanitizer in _SANITIZERS\n\n # The armbots aren't configured with Python at all.\n # We don't support the Python bindings on 32-bit at all.\n def handles_python(self):\n if self.bits == 32:\n return False\n\n if self.arch == 'arm' and self.os == 'linux':\n return False\n\n if self.sanitizer_preset() is not None:\n return False\n\n return True\n\n def handles_sanitizers(self):\n if self.buildsystem != BuildSystem.cmake:\n return False\n\n return (self.arch == 'x86'\n and self.bits == 64\n and self.os == 'linux'\n and self.llvm_branch == LLVM_MAIN)\n\n def sanitizer_preset(self):\n if self.handles_sanitizers():\n if self.sanitizer == 'asan':\n return 'linux-x64-asan'\n if self.sanitizer == 'fuzzer':\n return 'linux-x64-fuzzer'\n\n return None\n\n def handles_riscv(self):\n # Only support RISCV on LLVM16 or later.\n return self.llvm_branch not in [LLVM_RELEASE_15]\n\n def handles_hexagon(self):\n return (self.arch == 'x86'\n and self.bits == 64\n and self.os == 'linux'\n and self.llvm_branch == LLVM_MAIN)\n\n def handles_wasm(self):\n is_linux_x64 = (self.arch == 'x86'\n and self.bits == 64\n and self.os == 'linux')\n\n return (self.llvm_branch == LLVM_MAIN\n and (is_linux_x64 or self.os == 'osx'))\n\n def handles_wasm_wabt(self):\n return self.handles_wasm()\n\n def handles_wasm_v8(self):\n # OSX machines don't have V8 installed\n return self.handles_wasm() and self.os == 'linux'\n\n def has_nvidia(self):\n return (self.arch == 'x86'\n and self.bits == 64\n and self.os in ['windows', 'linux'])\n\n def handles_vulkan(self):\n # TODO: disabled temporarily pending fixes to the Vulkan runtime\n return False\n\n # Stick with Linux on x86-64 for now. Others TBD.\n # return (self.arch == 'x86'\n # and self.bits == 64\n # and self.os == 'linux'\n # and self.halide_branch in [HALIDE_MAIN, HALIDE_RELEASE_16])\n\n def handles_webgpu(self):\n # At the moment, the WebGPU team recommends the OSX versions of Dawn/Node\n # as the most robust for testing, so that's all we're set up to test with.\n # (Note that 'Dawn' must be built/installed on the test machines manually;\n # there are no binaries/prebuilts available at this time.)\n return self.os == 'osx' and self.halide_branch not in [HALIDE_RELEASE_15]\n\n def has_tflite(self):\n if self.arch == 'x86' and self.bits == 64 and self.os == 'linux':\n return True\n if self.arch == 'arm' and self.bits == 64 and self.os == 'osx':\n return True\n return False\n\n def has_ccache(self):\n return self.os in ['osx', 'linux']\n\n def halide_target(self):\n return '%s-%d-%s' % (self.arch, self.bits, self.os)\n\n def llvm_builder_label(self):\n return 'llvm-%s-%s' % (LLVM_BRANCHES[self.llvm_branch].version.major, self.halide_target())\n\n def halide_builder_label(self):\n # This currently tries to (somewhat) mimic the existing label pattern,\n # but is arbitrary. (If changed, manual purging of buildbot temporaries\n # is appropriate)\n a = ['halide']\n if self.sanitizer:\n a.append(self.sanitizer)\n if self.purpose == Purpose.halide_testbranch:\n a.append('testbranch')\n elif self.purpose == Purpose.halide_nightly:\n a.append('nightly')\n a.append(self.halide_branch)\n if self.halide_branch == HALIDE_MAIN:\n # Halide master is built against multiple LLVM versions,\n # so append that here for clarity\n a.append(f'llvm{LLVM_BRANCHES[self.llvm_branch].version.major}')\n a.append(self.halide_target())\n a.append(self.buildsystem.name)\n return '-'.join(a)\n\n def builder_label(self):\n if self.purpose == Purpose.llvm_nightly:\n return self.llvm_builder_label()\n else:\n return self.halide_builder_label()\n\n def builder_tags(self):\n return self.builder_label().split('-')\n\n def get_worker_names(self):\n return [n for n, cfg in _WORKERS\n if self.arch == cfg.arch and self.bits in cfg.bits and self.os == cfg.os]\n\n def __str__(self):\n return self.halide_target()\n\n\ndef get_builddir_subpath(subpath):\n # Normalize paths to use forward slashes.\n return Transform(lambda x: x.replace('\\\\', '/'), Interpolate(f'%(prop:builddir)s/{subpath}'))\n\n\ndef get_llvm_toolchains_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-toolchains', *subpaths))\n\n\n# TODO: make private to the LLVM code\ndef get_llvm_source_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-project', *subpaths))\n\n\n# TODO: make private to the LLVM code\ndef get_llvm_build_path(*subpaths):\n return get_builddir_subpath(os.path.join('llvm-build', *subpaths))\n\n\ndef get_llvm_install_path(builder_type, *subpaths):\n # Note that `builder_type.purpose` can be a Halide builder or an LLVM builder;\n # we want to ignore that aspect and produce the same effective path\n # regardless (ie, based only on the other aspects of the builder_type).\n llvm_workdir = builder_type.llvm_builder_label()\n return get_builddir_subpath(os.path.join('..', llvm_workdir, 'llvm-install', *subpaths))\n\n\ndef get_halide_source_path(*subpaths):\n return get_builddir_subpath(os.path.join('halide-source', *subpaths))\n\n\ndef get_halide_build_path(*subpaths):\n return get_builddir_subpath(os.path.join('halide-build', *subpaths))\n\n\ndef get_halide_install_path(builder_type, *subpaths):\n s = 'halide-install'\n if builder_type.sanitizer:\n s += '-' + builder_type.sanitizer\n return get_builddir_subpath(os.path.join(s, *subpaths))\n\n\ndef add_get_halide_source_steps(factory, builder_type):\n factory.addStep(GitHub(name='Get Halide source',\n locks=[performance_lock.access('counting')],\n codebase='halide',\n workdir=get_halide_source_path(),\n repourl='https://github.com/halide/Halide.git',\n branch=HALIDE_BRANCHES[builder_type.halide_branch].ref,\n mode='incremental'))\n\n\ndef add_get_llvm_source_steps(factory, builder_type):\n factory.addStep(Git(name=f'Get LLVM {LLVM_BRANCHES[builder_type.llvm_branch].version.major}',\n locks=[performance_lock.access('counting')],\n codebase='llvm',\n workdir=get_llvm_source_path(),\n repourl='https://github.com/llvm/llvm-project.git',\n branch=LLVM_BRANCHES[builder_type.llvm_branch].ref,\n mode='incremental'))\n\n # Always download the toolchains, even on platforms we don't need 'em\n toolchains_dir = get_llvm_toolchains_path()\n factory.addStep(MakeDirectory(name=\"Make CMake toolchain directory\",\n locks=[performance_lock.access('counting')],\n dir=toolchains_dir,\n haltOnFailure=False))\n factory.addStep(FileDownload(name='Download CMake toolchains',\n mastersrc='toolchain.linux-arm32.cmake', # relative to base dir\n workerdest='toolchain.linux-arm32.cmake', # relative to workdir\n workdir=toolchains_dir,\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n mode=0o644))\n\n\n# Determined by running `set` in cmd.exe before and after vcvarsall.bat\n# and diffing the output. It's likely that we don't need all of these\n# to make things work, but I haven't bothered to figure out what is irrelevant,\n# so I'm erring on the side of maybe too much.\n# noinspection SpellCheckingInspection\nVCVARSALL_ENV_VARS = [\n \"COMMANDPROMPTTYPE\",\n \"DEVENVDIR\",\n \"EXTENSIONSDKDIR\",\n \"FRAMEWORK40VERSION\",\n \"FRAMEWORKDIR\",\n \"FRAMEWORKDIR64\",\n \"FRAMEWORKVERSION\",\n \"FRAMEWORKVERSION64\",\n \"INCLUDE\",\n \"LIB\",\n \"LIBPATH\",\n \"NETFXSDKDIR\",\n \"PATH\",\n \"PLATFORM\",\n \"UCRTVERSION\",\n \"UNIVERSALCRTSDKDIR\",\n \"VCIDEINSTALLDIR\",\n \"VCINSTALLDIR\",\n \"VCTOOLSINSTALLDIR\",\n \"VCTOOLSREDISTDIR\",\n \"VCTOOLSVERSION\",\n \"VISUALSTUDIOVERSION\",\n \"VS110COMNTOOLS\",\n \"VS120COMNTOOLS\",\n \"VS140COMNTOOLS\",\n \"VS160COMNTOOLS\",\n \"VSCMD_ARG_APP_PLAT\",\n \"VSCMD_ARG_HOST_ARCH\",\n \"VSCMD_ARG_TGT_ARCH\",\n \"VSCMD_VER\",\n \"VSINSTALLDIR\",\n \"WINDOWSLIBPATH\",\n \"WINDOWSSDKBINPATH\",\n \"WINDOWSSDKDIR\",\n \"WINDOWSSDKLIBVERSION\",\n \"WINDOWSSDKVERBINPATH\",\n \"WINDOWSSDKVERSION\",\n \"WINDOWSSDK_EXECUTABLEPATH_X64\",\n \"WINDOWSSDK_EXECUTABLEPATH_X86\",\n \"__DOTNET_ADD_64BIT\",\n \"__DOTNET_PREFERRED_BITNESS\",\n \"__VSCMD_PREINIT_PATH\",\n \"__VSCMD_SCRIPT_ERR_COUNT\",\n]\n\n\ndef get_msvc_config_steps(factory, builder_type):\n # ensure that we use the x64 host compiler, not the x86 host compiler\n arch_for_bits = {32: 'x64_x86', 64: 'x64'}\n vcvarsall = 'vcvarsall.bat %s && set' % arch_for_bits[builder_type.bits]\n\n # TODO: surely there is a better way of locating vcvarsall\n # vcvarsdir = \"c:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Auxiliary/Build\"\n vcvarsdir = \"C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Auxiliary/Build\"\n\n # `vsvarsall && set` dumps all the settings to stdout;\n # we'll extract & save just the subset we think are likely to be relevant.\n def save_interesting_env_vars(rc, stdout, stderr):\n d = {}\n for line in stdout.split('\\n'):\n match = re.match(\"^([a-zA-Z0-9_-]+)=(.*)$\", line.strip())\n if match:\n key = match.group(1).upper()\n value = match.group(2)\n if key in VCVARSALL_ENV_VARS:\n d[key] = value\n return {'env': d}\n\n factory.addStep(\n SetPropertyFromCommand(name='Run VcVarsAll',\n description='Run VcVarsAll',\n workdir=vcvarsdir,\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n command=vcvarsall,\n extract_fn=save_interesting_env_vars))\n\n\ndef merge_renderable(_base, _extn):\n @renderer\n @defer.inlineCallbacks\n def render(props):\n base = yield props.render(_base)\n extn = yield props.render(_extn)\n base.update(extn)\n return base\n\n return render\n\n\ndef get_distrib_name(_version, target, ext):\n @renderer\n @defer.inlineCallbacks\n def render(props):\n rev = props.getProperty('got_revision')['halide']\n version = yield props.render(_version)\n return os.path.join(ARTIFACTS_DIR, f'Halide-{version}-{target}-{rev}.{ext}')\n\n return render\n\n\ndef get_cmake_generator(builder_type):\n return 'Ninja'\n\n\ndef get_llvm_cmake_options(builder_type):\n options = []\n return options\n\n\ndef get_halide_cmake_options(builder_type, build_dir):\n options = []\n\n if builder_type.sanitizer and builder_type.purpose != Purpose.llvm_nightly:\n assert builder_type.handles_sanitizers()\n options.append(\"--preset=%s\" % builder_type.sanitizer_preset())\n # append *after* preset so we override the build dir\n options += ['-B', build_dir]\n\n return options\n\n\ndef get_ctest_options(builder_type, build_dir):\n assert builder_type.purpose != Purpose.llvm_nightly\n\n if builder_type.sanitizer:\n assert builder_type.handles_sanitizers()\n # No, this won't work, see https://gitlab.kitware.com/cmake/cmake/-/issues/23982 --\n # fortunately, we don't need to specify the current sanitizer toolchains\n # at test time (just at configure time).\n # return {'preset': builder_type.sanitizer_preset(), 'test_dir': build_dir}\n return {'build_config': builder_type.sanitizer_preset()}\n else:\n return {'build_config': 'Release'}\n\n\ndef get_halide_cmake_definitions(builder_type, halide_target='host', wasm_jit='wabt'):\n cmake_definitions = {\n 'Clang_DIR': get_llvm_install_path(builder_type, 'lib/cmake/clang'),\n 'CMAKE_INSTALL_PREFIX': get_halide_install_path(builder_type),\n 'Halide_TARGET': halide_target,\n 'LLD_DIR': get_llvm_install_path(builder_type, 'lib/cmake/lld'),\n 'LLVM_DIR': get_llvm_install_path(builder_type, 'lib/cmake/llvm'),\n 'LLVM_ROOT': get_llvm_install_path(builder_type),\n 'WITH_PYTHON_BINDINGS': 'ON' if builder_type.handles_python() else 'OFF',\n 'WITH_TEST_FUZZ': 'ON' if builder_type.sanitizer == 'fuzzer' else 'OFF'\n }\n\n if builder_type.sanitizer and builder_type.handles_sanitizers():\n pass\n else:\n cmake_definitions['CMAKE_BUILD_TYPE'] = 'Release'\n\n # Sanitizer builds intermittently fail when using CCache for reasons that aren't\n # clear (\"precompiled header modified\") -- for now, just ignore CCache for them\n if builder_type.has_ccache() and not builder_type.sanitizer_preset():\n cmake_definitions['Halide_CCACHE_BUILD'] = 'ON'\n\n if builder_type.arch == 'arm' and builder_type.bits == 32 and builder_type.os == 'linux':\n # Halide always uses its own toolchain files, from the cmake/ subdir.\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake', 'toolchain.linux-arm32.cmake')\n\n if builder_type.os == 'windows':\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate('%(prop:VCPKG_ROOT)s/scripts/buildsystems/vcpkg.cmake')\n # CMake on Windows can't reliably find our pip-installed PyBind11 unless we set CMAKE_PREFIX_PATH to point to is\n cmake_definitions['pybind11_DIR'] = Interpolate('%(prop:VIRTUAL_ENV)s/share/cmake/pybind11')\n\n # Don't bother with anything Python-related if we are targeting WebAssembly.\n if \"wasm\" in halide_target:\n cmake_definitions['WITH_PYTHON_BINDINGS'] = 'OFF'\n\n # TODO: HALIDE_NODE_JS_PATH is only necessary until EMSDK updates their built-in version of Node\n # to v16.13+; when that is done, remove this definition.\n if builder_type.handles_wasm() and halide_target.startswith(\"wasm-\"):\n cmake_definitions['CMAKE_TOOLCHAIN_FILE'] = Interpolate(\n '%(prop:EMSDK)s/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake')\n cmake_definitions['NODE_JS_EXECUTABLE'] = Property('HALIDE_NODE_JS_PATH')\n if wasm_jit == 'v8':\n cmake_definitions['WITH_WABT'] = 'OFF'\n cmake_definitions['WITH_V8'] = 'ON'\n cmake_definitions['V8_INCLUDE_PATH'] = '/home/halidenightly/v8/v8/include'\n cmake_definitions['V8_LIB_PATH'] = \\\n '/home/halidenightly/v8/v8/out/x64.release.static/obj/libv8_monolith.a'\n elif wasm_jit == 'wabt':\n cmake_definitions['WITH_WABT'] = 'ON'\n cmake_definitions['WITH_V8'] = 'OFF'\n cmake_definitions['V8_INCLUDE_PATH'] = ''\n cmake_definitions['V8_LIB_PATH'] = ''\n else:\n assert False, \"Unknown wasm jit \" + str(wasm_jit)\n\n if builder_type.handles_webgpu() and \"webgpu\" in halide_target:\n # TODO(srj): remove these after https://github.com/halide/Halide/pull/7422 lands\n cmake_definitions['WEBGPU_NODE_BINDINGS'] = Property('HL_WEBGPU_NODE_BINDINGS')\n cmake_definitions['WEBGPU_NATIVE_LIB'] = Property('HL_WEBGPU_NATIVE_LIB')\n\n if builder_type.handles_hexagon() and 'hvx' in halide_target:\n cmake_definitions['Halide_BUILD_HEXAGON_REMOTE_RUNTIME'] = 'ON'\n\n return cmake_definitions\n\n\ndef get_cmake_build_command(builder_type, build_dir, targets=None):\n cmd = ['ninja',\n '-C', build_dir,\n '-j', Property('WORKER_BUILD_PARALLELISM')]\n\n # TODO(srj): for debugging apps/c_backend\n if builder_type.os == \"windows\":\n cmd.append('-v')\n\n if targets:\n cmd.extend(targets)\n\n return cmd\n\n\ndef get_llvm_cmake_definitions(builder_type):\n # Keep sorted!\n definitions = {\n 'CMAKE_BUILD_TYPE': 'Release',\n 'CMAKE_INSTALL_PREFIX': get_llvm_install_path(builder_type),\n 'LLVM_BUILD_32_BITS': ('ON' if builder_type.bits == 32 else 'OFF'),\n 'LLVM_ENABLE_ASSERTIONS': 'ON',\n 'LLVM_ENABLE_BINDINGS': 'OFF',\n 'LLVM_ENABLE_CURL': 'OFF',\n 'LLVM_ENABLE_DIA_SDK': 'OFF',\n 'LLVM_ENABLE_HTTPLIB': 'OFF',\n 'LLVM_ENABLE_IDE': 'OFF',\n 'LLVM_ENABLE_LIBXML2': 'OFF',\n 'LLVM_ENABLE_OCAMLDOC': 'OFF',\n 'LLVM_ENABLE_RTTI': 'ON',\n 'LLVM_ENABLE_TERMINFO': 'OFF',\n 'LLVM_ENABLE_WARNINGS': 'OFF', # silence them, it's not like we're gonna fix them\n 'LLVM_ENABLE_ZLIB': 'ON',\n 'LLVM_ENABLE_ZSTD': 'OFF',\n 'LLVM_INCLUDE_BENCHMARKS': 'OFF',\n 'LLVM_INCLUDE_EXAMPLES': 'OFF',\n 'LLVM_INCLUDE_TESTS': 'OFF',\n 'LLVM_TARGETS_TO_BUILD': 'X86;ARM;NVPTX;AArch64;Hexagon;PowerPC;WebAssembly',\n }\n\n if builder_type.bits == 32:\n definitions['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = \"ONLY\"\n definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = \"ONLY\"\n definitions['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = \"ONLY\"\n definitions['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = \"NEVER\"\n\n if builder_type.handles_riscv():\n definitions['LLVM_TARGETS_TO_BUILD'] += \";RISCV\"\n\n if builder_type.handles_sanitizers():\n definitions['LLVM_ENABLE_RUNTIMES'] = \"compiler-rt;libcxx;libcxxabi;libunwind\"\n # We only need clang-tools-extra if building for sanitizers -- skip them\n # if the builder will never do this, to save time & space.\n definitions['LLVM_ENABLE_PROJECTS'] = \"clang;lld;clang-tools-extra\"\n else:\n definitions['LLVM_ENABLE_PROJECTS'] = \"clang;lld\"\n\n # Some versions of GCC will flood the output with useless warnings about\n # \"parameter passing for argument of type foo changed in GCC 7.1\" unless\n # we disable this warning. This isn't *essential*, but it makes looking at the\n # LLVM build logs much less noisy.\n if builder_type.os != 'windows':\n definitions['CMAKE_CXX_FLAGS'] = '-Wno-psabi'\n\n if builder_type.arch == 'arm' and builder_type.bits == 32 and builder_type.os == 'linux':\n # LLVM doesn't provide a toolchain file, and we can't/don't-want-to rely on the\n # one from Halide, so we'll rely on one that the buildbot downloads to each worker.\n # (Note that this assumes the file has been properly downloaded.)\n definitions['CMAKE_TOOLCHAIN_FILE'] = get_llvm_toolchains_path('toolchain.linux-arm32.cmake')\n definitions['LLVM_TARGET_ARCH'] = 'ARM'\n definitions['LLVM_DEFAULT_TARGET_TRIPLE'] = 'arm-linux-gnueabihf'\n\n if builder_type.arch == 'x86' and builder_type.bits == 32 and builder_type.os == 'linux':\n definitions['CMAKE_FIND_ROOT_PATH'] = '/usr/lib/i386-linux-gnu'\n definitions['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'\n\n # This disables an XCode setting that can get enabled by default\n # when assertions are enabled, but only if your XCode install has\n # certain frameworks installed; we want it disabled, as it prevents\n # prebuilt libraries from working properly with XCode 9.x.\n if builder_type.os == 'osx':\n definitions['LLVM_ENABLE_SUPPORT_XCODE_SIGNPOSTS'] = 'FORCE_OFF'\n\n # We never build LLVM with sanitizers enabled\n if builder_type.has_ccache():\n definitions['LLVM_CCACHE_BUILD'] = 'ON'\n\n return definitions\n\n\ndef extend_property(dict_name, **kwargs):\n @renderer\n def render(props):\n table = props.getProperty(dict_name, default={})\n table.update(kwargs)\n return table\n\n return render\n\n\ndef add_env_setup_step(factory, builder_type, enable_ccache=False):\n if builder_type.os == 'windows':\n # do this first because the SetPropertyFromCommand step isn't smart enough to merge\n get_msvc_config_steps(factory, builder_type)\n\n cxx = 'c++'\n cc = 'cc'\n ld = 'ld'\n\n if builder_type.os == 'linux':\n cc = 'gcc-9'\n cxx = 'g++-9'\n ld = 'ld'\n if builder_type.arch == 'x86' and builder_type.bits == 32:\n cxx += ' -m32'\n cc += ' -m32'\n ld += ' -melf_i386'\n elif builder_type.os == 'windows':\n cxx = 'cl.exe'\n cc = 'cl.exe'\n\n # This is only necessary (or desirable) for make-based builds of Halide;\n # CMake-based builds handle it via Halide_CCACHE_BUILD and/or LLVM_CCACHE_BUILD\n if enable_ccache and builder_type.has_ccache():\n cxx = 'ccache ' + cxx\n cc = 'ccache ' + cc\n\n env = {\n 'CC': cc,\n 'CXX': cxx,\n 'LD': ld,\n }\n\n # TODO: HALIDE_NODE_JS_PATH is only necessary until EMSDK updates their built-in version of Node\n # to v16.13+; when that is done, remove HALIDE_NODE_JS_PATH here and on the workers.\n factory.addStep(SetPropertiesFromEnv(name='Read worker environment',\n variables=['EMSDK',\n 'HALIDE_NODE_JS_PATH',\n 'HL_HEXAGON_TOOLS',\n 'HL_WEBGPU_NATIVE_LIB',\n 'HL_WEBGPU_NODE_BINDINGS',\n 'LD_LIBRARY_PATH',\n 'VIRTUAL_ENV',\n 'VCPKG_ROOT']))\n\n vcpkg_root = Property('VCPKG_ROOT', default=None)\n\n if builder_type.handles_hexagon():\n # Environment variables for testing Hexagon DSP\n hexagon_remote_bin = get_halide_build_path('src', 'runtime', 'hexagon_remote')\n # Assume that HL_HEXAGON_TOOLS points to the correct directory (it might not be /usr/local/hexagon)\n env['HL_HEXAGON_SIM_REMOTE'] = Transform(os.path.join,\n hexagon_remote_bin,\n 'hexagon',\n 'bin',\n 'hexagon_sim_remote')\n env['HL_HEXAGON_SIM_CYCLES'] = '1'\n env['LD_LIBRARY_PATH'] = [\n # no, this will cause a failure at runtime if LD_LIBRARY_PATH is unset (or empty!)\n # Property('LD_LIBRARY_PATH'),\n hexagon_remote_bin,\n Interpolate('%(prop:HL_HEXAGON_TOOLS)s/lib/iss'),\n ]\n env['HEXAGON_SDK_ROOT'] = Interpolate('%(prop:HL_HEXAGON_TOOLS)s/../../../..')\n\n # Force Vulkan validation layer on to catch any driver related errors\n # ... this enables a suite of diagnostic checks implemented in the Vulkan SDK\n # that verifies the driver and application conform to the Vulkan runtime\n # specification. This should not be enabled in production due to the overhead,\n # but we want to catch any changes in driver behaviour and/or spurious errors that\n # may be hard to find (but easy to fix if the right error messages are present)\n if builder_type.has_nvidia() and builder_type.handles_vulkan():\n env['VK_INSTANCE_LAYERS'] = \"VK_LAYER_KHRONOS_validation\"\n\n if builder_type.os == 'osx':\n # Environment variable for turning on Metal API validation\n # This will have no effect on CPU testing, just Metal testing\n env['METAL_DEVICE_WRAPPER_TYPE'] = '1'\n\n if builder_type.os == 'windows':\n vcpkg_root = Property('VCPKG_ROOT', default='C:/vcpkg')\n env['VCPKG_ROOT'] = vcpkg_root\n\n # Current NVidia drivers on our Windows buildbots can corrupt their own\n # cache, leading to many spurious failures. Disable the cache\n # for now, pending NVidia investigation.\n env['CUDA_CACHE_DISABLE'] = '1'\n\n # We don't ever want an Abort, Retry, Ignore dialog in our tests\n env['HL_DISABLE_WINDOWS_ABORT_DIALOG'] = '1'\n\n # Leaving this here (but commented out) in case we need to temporarily\n # disable leak-checking in the future.\n #\n # if builder_type.handles_sanitizers():\n # # Disable leak-checking (for now) for ASAN builds\n # env['ASAN_OPTIONS'] = 'detect_leaks=0'\n\n factory.addStep(SetProperties(\n name='Initialize environment',\n properties=dict(\n env=extend_property('env', **env),\n VCPKG_ROOT=vcpkg_root)))\n\n\n@renderer\ndef get_llvm_latest_commit(props):\n # Note that this property is a dict for multi-codebase builds,\n # but just a string for single-codebase builds.\n build_dir = props.getProperty('builddir')\n assert not isinstance(build_dir, dict)\n\n build_dir = build_dir.replace('\\\\', '/')\n # Can't use got_revision here since we may be using git directly.\n return \"cd %s/llvm-project && git log -1 > %s/llvm-install/llvm_latest_commit.txt\" % (build_dir, build_dir)\n\n\ndef add_llvm_steps(factory, builder_type, clean_rebuild):\n build_dir = get_llvm_build_path()\n install_dir = get_llvm_install_path(builder_type)\n llvm_name = str(LLVM_BRANCHES[builder_type.llvm_branch].version.major)\n\n if clean_rebuild:\n factory.addStep(RemoveDirectory(name=\"Remove LLVM %s Build Dir\" % llvm_name,\n locks=[performance_lock.access('counting')],\n dir=build_dir,\n haltOnFailure=False))\n factory.addStep(RemoveDirectory(name=\"Remove LLVM %s Install Dir\" % llvm_name,\n locks=[performance_lock.access('counting')],\n dir=install_dir,\n haltOnFailure=False))\n\n factory.addStep(MakeDirectory(name=\"Make LLVM %s Build Dir\" % llvm_name,\n locks=[performance_lock.access('counting')],\n dir=build_dir,\n haltOnFailure=False))\n factory.addStep(MakeDirectory(name=\"Make LLVM %s Install Dir\" % llvm_name,\n locks=[performance_lock.access('counting')],\n dir=install_dir,\n haltOnFailure=False))\n\n factory.addStep(\n CMake(name='Configure LLVM %s' % llvm_name,\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n env=Property('env'),\n workdir=build_dir,\n path=get_llvm_source_path('llvm'),\n generator=get_cmake_generator(builder_type),\n definitions=get_llvm_cmake_definitions(builder_type),\n options=get_llvm_cmake_options(builder_type)))\n\n factory.addStep(\n ShellCommand(name='Build LLVM %s' % llvm_name,\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n workdir=build_dir,\n env=Property('env'),\n command=get_cmake_build_command(builder_type, build_dir, targets=['install'])))\n\n # Save the SHA of LLVM's head rev into ${INSTALL}/llvm_version.txt,\n # just to make debugging simpler\n #\n factory.addStep(\n ShellCommand(name='Stamp Install Directory for LLVM %s' % llvm_name,\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n workdir=get_llvm_source_path(),\n env=Property('env'),\n command=get_llvm_latest_commit))\n\n\ndef add_halide_cmake_build_steps(factory, builder_type):\n # Always do a clean build for Halide\n source_dir = get_halide_source_path()\n build_dir = get_halide_build_path()\n install_dir = get_halide_install_path(builder_type)\n factory.addStep(RemoveDirectory(name=\"Remove Halide Build Dir\",\n locks=[performance_lock.access('counting')],\n dir=build_dir,\n haltOnFailure=False))\n factory.addStep(MakeDirectory(name=\"Make Halide Build Dir\",\n locks=[performance_lock.access('counting')],\n dir=build_dir,\n haltOnFailure=False))\n\n factory.addStep(RemoveDirectory(name=\"Remove Halide Install Dir\",\n locks=[performance_lock.access('counting')],\n dir=install_dir,\n haltOnFailure=False))\n factory.addStep(MakeDirectory(name=\"Make Halide Install Dir\",\n locks=[performance_lock.access('counting')],\n dir=install_dir,\n haltOnFailure=False))\n\n factory.addStep(CMake(name='Configure Halide',\n description='Configure Halide',\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n workdir=build_dir,\n env=Property('env'),\n path=source_dir,\n generator=get_cmake_generator(builder_type),\n definitions=get_halide_cmake_definitions(builder_type),\n options=get_halide_cmake_options(builder_type, build_dir)))\n\n factory.addStep(\n ShellCommand(name='Build Halide',\n description='Build Halide',\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n workdir=build_dir,\n env=Property('env'),\n command=get_cmake_build_command(builder_type, build_dir, targets=['all', 'install'])))\n\n\ndef add_halide_cmake_package_steps(factory, builder_type):\n source_dir = get_halide_source_path()\n\n target = builder_type.halide_target()\n ext = 'zip' if builder_type.os == 'windows' else 'tar.gz'\n\n factory.addStep(\n SetPropertiesFromCMakeCache(\n name='Get Halide package version',\n workdir=get_halide_build_path(),\n props=['CMAKE_PROJECT_VERSION']))\n\n extra_env = dict(\n Clang_DIR=get_llvm_install_path(builder_type, 'lib/cmake/clang'),\n LLD_DIR=get_llvm_install_path(builder_type, 'lib/cmake/lld'),\n LLVM_DIR=get_llvm_install_path(builder_type, 'lib/cmake/llvm'),\n Halide_VERSION=Property('CMAKE_PROJECT_VERSION')\n )\n\n if builder_type.os == 'windows':\n # TODO: on Windows, we can't use Ninja for packaging (as we do everywhere\n # else in this cfg) due to a bug in CMake 3.18, so we must use MSBuild;\n # that means we must use a different build directory entirely. To simplify the\n # world, we make this a subdir of the real build dir (so it gets cleaned properly).\n # https://github.com/halide/Halide/issues/5264\n build_dir = get_halide_build_path(\"packaging_dir\")\n\n if builder_type.arch == 'arm':\n arch = 'ARM' if builder_type.bits == 32 else 'ARM64'\n else:\n arch = 'Win32' if builder_type.bits == 32 else 'x64'\n cmd = [get_halide_source_path('packaging/zip/package.bat'), source_dir, build_dir, arch]\n else:\n build_dir = get_halide_build_path()\n cmd = [get_halide_source_path('packaging/tgz/package.sh'), source_dir, build_dir]\n if builder_type.arch == 'arm' and builder_type.bits == 32 and builder_type.os == 'linux':\n extra_env['CMAKE_TOOLCHAIN_FILE'] = get_halide_source_path('cmake', 'toolchain.linux-arm32.cmake')\n\n factory.addStep(\n ShellCommand(name='Package Halide',\n description='Package Halide',\n workdir=build_dir,\n env=extend_property('env', **extra_env),\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n command=cmd))\n\n factory.addStep(\n FileUpload(name='Upload Halide package',\n workersrc=Interpolate(f'Halide-%(prop:CMAKE_PROJECT_VERSION)s-{target}.{ext}'),\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n workdir=build_dir,\n mode=0o644,\n masterdest=get_distrib_name(Property('CMAKE_PROJECT_VERSION'), target, ext)))\n\n def pkg_version_and_target(path: Path):\n # Archives names are formatted like: Halide-[version]-[arch]-[commit].[ext]\n # This grabs \"Halide-[version]-[arch]\".\n match = re.match(r'^(.*)-[a-f0-9]+\\.(tar\\.gz|tgz|zip)', path.name)\n return match.group(1) if match else None\n\n factory.addStep(CleanOldFiles(\n name='Clean old releases',\n workdir=ARTIFACTS_DIR,\n locks=[performance_lock.access('counting')],\n groupfn=pkg_version_and_target))\n\n\n# Figure out which \"non-cpu\" (GPU, DSP, etc) targets this builder can handle.\n# Return (target, is_simulator)\ndef get_gpu_dsp_targets(builder_type):\n if builder_type.sanitizer_preset() is not None:\n return\n\n if builder_type.has_nvidia():\n yield 'host-cuda', False\n yield 'host-opencl', False\n\n # TODO: temporarily disabled because our only windows bot doesn't support it...\n # if builder_type.os == 'windows':\n # yield 'host-d3d12compute', False\n\n # If we're running on a capable GPU, add all optional feature flags to the vulkan target\n # which are required to get all the correctness tests to pass\n if builder_type.handles_vulkan():\n yield 'host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13', False\n\n if builder_type.handles_webgpu():\n yield 'host-webgpu', False\n\n if builder_type.os == 'osx':\n yield 'host-metal', False\n\n if builder_type.handles_hexagon():\n # All the buildbots use a simulator for HVX, so performance tests\n # won't be useful\n yield 'host-hvx', True\n\n\n# Return a dict with halide-targets as the keys, and a list of test-labels for each value.\ndef get_test_labels(builder_type):\n targets = defaultdict(list)\n\n preset = builder_type.sanitizer_preset()\n\n # For the fuzz sanitizer, run only the fuzz tests\n if preset and 'fuzz' in preset:\n targets['host'].extend(['fuzz'])\n return targets\n\n targets['host'].extend(['internal', 'correctness', 'generator',\n 'autoschedulers_cpu', 'error', 'warning', 'apps', 'performance', 'tutorial'])\n\n # For all other sanitizers (eg asan), don't bother with the gpu/etc tests.\n if preset:\n return targets\n\n # TODO: some JIT+generator tests are failing on arm32; disable for now\n # pending fixes (see https://github.com/halide/Halide/issues/4940)\n if builder_type.arch == 'arm' and builder_type.bits == 32 and builder_type.os == 'linux':\n targets['host'].remove('internal')\n targets['host'].remove('generator')\n\n if builder_type.handles_python():\n targets['host'].extend(['python'])\n\n # Test without SSE4.1 on all x86 systems\n if builder_type.arch == 'x86':\n t = 'x86-%d-%s' % (builder_type.bits, builder_type.os)\n targets[t].extend(['correctness'])\n # on x86-64, also test with SSE4.1 (but nothing else that 'host' might sniff)\n if builder_type.bits == 64:\n targets['%s-sse41' % t].extend(['correctness'])\n\n # Test a subset of things on GPU/DSP targets, as appropriate\n for t, is_simulator in get_gpu_dsp_targets(builder_type):\n # TODO(https://github.com/halide/Halide/issues/7420): disable apps for host-gpu until the errors are resolved\n if t == 'host-webgpu':\n targets[t].extend(['correctness', 'generator'])\n else:\n targets[t].extend(['correctness', 'generator', 'apps'])\n if 'cuda' in t:\n targets[t].extend(['autoschedulers_cuda'])\n if 'hvx' not in t:\n targets[t].extend(['autoschedulers_gpu'])\n # Don't do performance testing on simulators.\n if not is_simulator:\n targets[t].extend(['performance'])\n\n # Handle this special case separately\n if builder_type.has_nvidia():\n targets['host-cuda-opencl'].extend(['correctness_multi_gpu'])\n\n # If we're running on a capable GPU, add all optional feature flags to the vulkan target\n # which are required to get all the correctness tests to pass\n if builder_type.handles_vulkan():\n targets['host-vulkan-vk_int8-vk_int16-vk_int64-vk_float16-vk_float64-vk_v13'].extend(\n ['internal', 'correctness', 'generator', 'error', 'warning'])\n\n if builder_type.handles_wasm():\n if builder_type.handles_wasm_wabt():\n # TODO: this is a horrid hack. For now, we want to test JIT with both WABT and V8.\n # Add as a horrible wart on the target string.\n targets['wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/wabt'].extend(\n ['internal', 'correctness', 'generator', 'error', 'warning'])\n\n if builder_type.handles_wasm_v8():\n # TODO: this is a horrid hack. For now, we want to test JIT with both WABT and V8.\n # Add as a horrible wart on the target string.\n targets['wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int/v8'].extend(\n ['internal', 'correctness', 'generator', 'error', 'warning'])\n\n # WABT (and thus WASM JIT) can't handle code build with wasm_threads yet,\n # so only test Generator here\n targets['wasm-32-wasmrt-wasm_simd128-wasm_signext-wasm_sat_float_to_int-wasm_threads'].extend(\n ['generator', 'apps'])\n\n if builder_type.handles_webgpu():\n # Most apps can't handle wasm builds yet.\n targets['wasm-32-wasmrt-webgpu'].extend(['generator'])\n\n return targets\n\n\ndef is_time_critical_test(test):\n # Return true if the test label (or single-test name) is 'time critical' and must\n # be run with an exclusive lock on the buildbot (typically, performance tests)\n return test in ['performance', 'autoschedulers_cpu', 'autoschedulers_gpu', 'autoschedulers_cuda']\n\n\ndef short_target(halide_target):\n s = halide_target.split('-')\n if len(s) == 1:\n return s[0]\n elif len(s) == 2:\n return '%s-%s' % (s[0], s[1])\n elif len(s) == 3:\n return '%s-%s-%s' % (s[0], s[1], s[2])\n elif len(s) > 3:\n return '%s-%s-%s…' % (s[0], s[1], s[2])\n else:\n return '<unknown>'\n\n\ndef add_halide_cmake_test_steps(factory, builder_type):\n parallelism = Property('WORKER_BUILD_PARALLELISM')\n\n labels = get_test_labels(builder_type)\n\n source_dir = get_halide_source_path()\n build_dir = get_halide_build_path()\n install_dir = get_halide_install_path(builder_type) # NOQA\n\n # Since we need to do at least a partial rebuild for each different target,\n # we want to group things by target. Do host first, followed by a key-sorted\n # order, to ensure predictability.\n keys = list(labels.keys())\n keys.remove('host')\n keys.sort()\n keys.insert(0, 'host')\n\n for halide_target in keys:\n # HL_TARGET is now ignored by CMake builds, no need to set\n # (must specify -DHalide_TARGET to CMake instead)\n # env['HL_TARGET'] = halide_target\n env = extend_property('env', HL_JIT_TARGET=halide_target)\n\n desc = 'T=%s' % short_target(halide_target)\n\n # Do this *before* splitting the horrid wasm-specific target string\n test_labels = labels[halide_target]\n\n # wasm targets must ensure that the EMSDK (emcc, etc) are added to the\n # active env.\n wasm_jit = None\n if halide_target.startswith(\"wasm-\"):\n halide_target, sep, wasm_jit = halide_target.partition('/')\n # Re-set HL_JIT_TARGET with the de-warted target string\n env = extend_property('env', HL_JIT_TARGET=halide_target)\n if wasm_jit:\n desc = '%s + T=%s' % (wasm_jit, short_target(halide_target))\n\n if not wasm_jit:\n wasm_jit = 'wabt'\n\n factory.addStep(\n CMake(name='Reconfigure for %s' % short_target(halide_target),\n description='Reconfigure for %s' % desc,\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n env=env,\n workdir=build_dir,\n path=source_dir,\n generator=get_cmake_generator(builder_type),\n definitions=get_halide_cmake_definitions(\n builder_type, halide_target=halide_target, wasm_jit=wasm_jit),\n options=get_halide_cmake_options(builder_type, build_dir)))\n\n factory.addStep(\n ShellCommand(name='Rebuild for %s' % (short_target(halide_target)),\n description='Rebuild Halide for %s' % desc,\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n workdir=build_dir,\n env=env,\n command=get_cmake_build_command(builder_type, build_dir, targets=['all', 'install'])))\n\n do_apps = 'apps' in test_labels\n if do_apps:\n test_labels.remove('apps')\n\n if not builder_type.handles_python():\n if 'python' in test_labels:\n test_labels.remove('python')\n\n # TODO : some of the apps require python, so we must skip them for now also\n do_apps = False\n\n parallel_test_labels = [\n test for test in test_labels if not is_time_critical_test(test)]\n exclusive_test_labels = [test for test in test_labels if is_time_critical_test(test)]\n\n if parallel_test_labels:\n if len(parallel_test_labels) > 2:\n test_set = ','.join([s[0] for s in parallel_test_labels])\n else:\n test_set = ', '.join(parallel_test_labels)\n\n # Build up some special cases to exclude\n exclude_tests = []\n if builder_type.os == 'windows' or builder_type.os == 'linux':\n # TODO: disable lens_blur on windows for now due to\n # https://bugs.llvm.org/show_bug.cgi?id=46176\n # and also due to Windows testbots having inadequate GPU RAM\n # and also due to Linux testbots having inadequate GPU RAM\n exclude_tests.append('interpolate')\n exclude_tests.append('lens_blur')\n exclude_tests.append('unsharp')\n\n if builder_type.os == 'linux' or builder_type.bits == 32:\n # TODO: disable tutorial_lesson_12_using_the_gpu (both C++ and python) on linux and 32-bit\n exclude_tests.append('tutorial_lesson_12')\n\n if builder_type.sanitizer == 'asan':\n # lesson 19 can trigger memory leaks in some of the GPU device drivers,\n # so just exclude it when doing ASAN\n exclude_tests.append('tutorial_lesson_19')\n\n if builder_type.arch == 'arm' or builder_type.bits == 32:\n # TODO: disable lesson_19 on arm32\n # https://github.com/halide/Halide/issues/5224\n exclude_tests.append('tutorial_lesson_19')\n\n factory.addStep(\n CTest(name='Test %s %s' % (test_set, desc),\n description='Test %s %s' % (test_set, desc),\n locks=[performance_lock.access('counting')],\n workdir=build_dir,\n env=env,\n timeout=3600,\n labels=parallel_test_labels,\n exclude_tests=exclude_tests,\n jobs=parallelism,\n **get_ctest_options(builder_type, build_dir)))\n\n if exclusive_test_labels:\n test_set = ','.join([s.replace('autoschedulers_', 'a_') for s in exclusive_test_labels])\n factory.addStep(\n CTest(name='Test %s %s' % (test_set, desc),\n description='Test %s %s' % (test_set, desc),\n locks=[performance_lock.access('exclusive')],\n workdir=build_dir,\n env=env,\n timeout=3600,\n labels=exclusive_test_labels,\n **get_ctest_options(builder_type, build_dir)))\n\n if do_apps:\n apps_build_dir = get_halide_build_path(\"apps\")\n apps_source_dir = get_halide_source_path(\"apps\")\n\n # We currently don't attempt to build any of the apps with wasm\n apps_cmake_defs = get_halide_cmake_definitions(builder_type, halide_target=halide_target)\n apps_cmake_defs['CMAKE_PREFIX_PATH'] = get_halide_install_path(builder_type)\n\n # apps/hannk is expensive to build, and doesn't (yet) build on all systems, so special-case it here\n want_hannk = (builder_type.has_tflite() and not halide_target.startswith(\"wasm-\"))\n apps_cmake_defs['ENABLE_APPS_HANNK'] = 'ON' if want_hannk else 'OFF'\n\n factory.addStep(\n CMake(name='Configure apps for %s' % desc,\n description='Configure apps for %s' % desc,\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n env=env,\n workdir=apps_build_dir,\n path=apps_source_dir,\n generator=get_cmake_generator(builder_type),\n definitions=apps_cmake_defs,\n options=get_halide_cmake_options(builder_type, build_dir)))\n\n factory.addStep(\n ShellCommand(name='Build apps for %s' % desc,\n description='Build apps for %s' % desc,\n locks=[performance_lock.access('counting')],\n haltOnFailure=True,\n workdir=apps_build_dir,\n env=env,\n command=get_cmake_build_command(builder_type, apps_build_dir)))\n\n # Note: do *not* run the apps/ tests in parallel; many of them expect\n # to make full use of all cores, and running in parallel will just slow\n # things down.\n\n exclude_tests = []\n if builder_type.os == 'windows':\n # TODO: disable lens_blur_filter on windows for now due to\n # https://github.com/halide/Halide/issues/5552\n exclude_tests.append('lens_blur_filter')\n\n factory.addStep(\n CTest(name='Test apps for %s' % desc,\n description='Test apps for %s' % desc,\n locks=[performance_lock.access('exclusive')],\n workdir=apps_build_dir,\n env=env,\n timeout=3600,\n exclude_tests=exclude_tests,\n exclude_labels=['slow_tests'],\n **get_ctest_options(builder_type, apps_build_dir)))\n\n\ndef create_halide_make_factory(builder_type):\n assert builder_type.os != 'windows'\n\n make_threads = Property('WORKER_BUILD_PARALLELISM')\n build_dir = get_halide_build_path()\n\n factory = BuildFactory()\n # We never enable sanitizers for Make builds here (only for CMake)\n add_env_setup_step(factory, builder_type, enable_ccache=True)\n\n # It's never necessary to use get_msvc_config_steps() for Make,\n # since we never use Make with MSVC\n\n add_get_halide_source_steps(factory, builder_type)\n\n # Force a full rebuild of Halide every time\n factory.addStep(RemoveDirectory(name=\"Remove Halide Build Dir\",\n locks=[performance_lock.access('counting')],\n dir=build_dir))\n\n target_label_pairs = [('host', 'build_tests')]\n for halide_target, labels_for_target in get_test_labels(builder_type).items():\n\n # For Make we skip every target that isn't plain 'host'\n if halide_target != 'host':\n continue\n\n _labels_to_skip = [\n # auto_schedule and performance requires exclusive machine access and isn't worth it for Make\n \"autoschedulers_cpu\",\n \"autoschedulers_gpu\",\n \"autoschedulers_cuda\",\n \"performance\",\n # Make no longer provides support for building the Python bindings,\n # regardless of builder_type.handles_python()\n \"python\",\n ]\n\n if builder_type.bits == 32:\n # Don't test autoschedulers on 32-bit systems via Make;\n # it's not set up 100% correctly for crosscompilation there\n # and the CMake-based coverage is fine.\n _labels_to_skip.extend(['autoschedulers_cpu', 'autoschedulers_gpu', 'autoschedulers_cuda'])\n\n for label in labels_for_target:\n if label in _labels_to_skip:\n continue\n target_label_pairs.append((halide_target, label))\n\n for halide_target, label in target_label_pairs:\n env = extend_property('env',\n LLVM_CONFIG=get_llvm_install_path(builder_type, 'bin/llvm-config'),\n HL_TARGET=halide_target,\n HL_JIT_TARGET=halide_target)\n\n if is_time_critical_test(label):\n p = 1\n lock_mode = 'exclusive'\n else:\n p = make_threads\n lock_mode = 'counting'\n\n if label != 'build_tests':\n label = 'test_%s' % label\n\n factory.addStep(ShellCommand(name='make ' + label,\n description=label + ' ' + halide_target,\n locks=[performance_lock.access(lock_mode)],\n workdir=build_dir,\n env=env,\n haltOnFailure=False,\n command=['make',\n '-f', get_halide_source_path('Makefile'),\n '-j', p,\n label],\n timeout=3600))\n return factory\n\n\ndef create_halide_cmake_factory(builder_type):\n factory = BuildFactory()\n add_env_setup_step(factory, builder_type)\n add_get_halide_source_steps(factory, builder_type)\n add_halide_cmake_build_steps(factory, builder_type)\n add_halide_cmake_test_steps(factory, builder_type)\n\n # If everything else looks ok, build a distrib.\n if builder_type.purpose == Purpose.halide_nightly:\n add_halide_cmake_package_steps(factory, builder_type)\n\n return factory\n\n\ndef create_halide_factory(builder_type):\n if builder_type.buildsystem == BuildSystem.cmake:\n return create_halide_cmake_factory(builder_type)\n else:\n return create_halide_make_factory(builder_type)\n\n\ndef get_interesting_halide_targets():\n for arch in ['arm', 'x86']:\n for bits in [32, 64]:\n for os in ['linux', 'osx', 'windows']:\n if arch == 'arm' and os == 'windows':\n # No buildbots for windows-on-arm (yet)\n continue\n if os == 'osx' and bits != 64:\n # osx is 64-bit only, period\n continue\n yield arch, bits, os\n\n\ndef create_halide_builder(arch, bits, os, halide_branch, llvm_branch, purpose, buildsystem=BuildSystem.cmake):\n # Always do a build with no sanitizers\n sanitizers = [None]\n\n # Also build with sanitizers (but not if we are doing nightlies)\n if purpose != Purpose.halide_nightly:\n sanitizers.extend(_SANITIZERS)\n\n for san in sanitizers:\n builder_type = BuilderType(arch, bits, os, halide_branch, llvm_branch, purpose, san, buildsystem)\n if san and purpose == Purpose.llvm_nightly:\n continue\n if san and not builder_type.handles_sanitizers():\n continue\n\n workers = builder_type.get_worker_names()\n builder = BuilderConfig(name=builder_type.builder_label(),\n workernames=workers,\n factory=create_halide_factory(builder_type),\n collapseRequests=True,\n # We need counting access to our llvm branch during Halide builds.\n # (We could probably get by with access during only a subset of\n # our steps, but there doesn't appear to be a way to group\n # lock requests across multiple-but-not-all-steps in a Build.)\n locks=[llvm_build_locks[llvm_branch + str(bits)].access('counting')],\n tags=builder_type.builder_tags())\n builder.builder_type = builder_type\n yield builder\n\n\ndef create_halide_builders():\n for arch, bits, os in get_interesting_halide_targets():\n # Create builders for build + package of Halide master + release branches\n # (but only against their 'native' LLVM versions)\n for halide_branch in HALIDE_NIGHTLIES:\n for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:\n yield from create_halide_builder(arch, bits, os, halide_branch, llvm_branch, Purpose.halide_nightly)\n\n # Create the builders for testing pull requests to releases.\n for halide_branch in _HALIDE_RELEASES:\n for llvm_branch in LLVM_FOR_HALIDE[halide_branch]:\n yield from create_halide_builder(arch, bits, os, halide_branch, llvm_branch, Purpose.halide_testbranch)\n\n # Create the builders for testing pull requests to main.\n yield from create_halide_builder(arch, bits, os, HALIDE_MAIN, LLVM_MAIN, Purpose.halide_testbranch)\n\n # Also test Makefiles on x86-linux & osx (but only on Halide main) to ensure they\n # stay healthy. (Note: deliberately skip arm-linux, since they are the slowest bots.)\n yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN, LLVM_MAIN,\n Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('x86', 32, 'linux', HALIDE_MAIN, LLVM_MAIN,\n Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('x86', 64, 'osx', HALIDE_MAIN, LLVM_MAIN,\n Purpose.halide_testbranch, BuildSystem.make)\n yield from create_halide_builder('arm', 64, 'osx', HALIDE_MAIN, LLVM_MAIN,\n Purpose.halide_testbranch, BuildSystem.make)\n\n # Test pull requests for Halide master against the current and previous LLVM, for at least one target.\n for llvm_branch in LLVM_BRANCHES:\n if abs(LLVM_BRANCHES[llvm_branch].version.major - LLVM_BRANCHES[LLVM_MAIN].version.major) in [1, 2]:\n yield from create_halide_builder('x86', 64, 'linux', HALIDE_MAIN, llvm_branch, Purpose.halide_testbranch)\n\n\ndef create_halide_scheduler(halide_branch):\n def is_halide_base_branch(br):\n return any(br == hl.ref for hl in HALIDE_BRANCHES.values())\n\n def is_halide_pr_branch(br):\n # If it's not one of the well-known branches, assume it's a pull request\n return not is_halide_base_branch(br)\n\n def github_base_branch_matches(change):\n ref = change.properties.getProperty('basename')\n return ref == HALIDE_BRANCHES[halide_branch].ref\n\n # ----- nightlies\n builders = [b for b in c['builders']\n if b.builder_type.halide_branch == halide_branch and b.builder_type.purpose == Purpose.halide_nightly]\n if builders:\n builder_names = [str(b.name) for b in builders]\n\n # Start the Halide nightlies at 9PM Pacific; our buildbot master uses UTC for\n # cron, so that's 0400. Note that this is (deliberately) well before\n # the LLVM nightlies get built (currently 11pm start); the idea is\n # that Halide nightlies get built using the previous day's LLVM\n # nightlies, on the assumption that those are more likely to get at\n # least some test coverage (via testbranch) to minimize breakage.\n yield Nightly(\n name='halide-package-' + halide_branch,\n codebases=['halide'],\n builderNames=builder_names,\n change_filter=ChangeFilter(codebase='halide'),\n hour=4,\n minute=0)\n\n yield ForceScheduler(\n name='force-halide-nightly-' + halide_branch,\n builderNames=builder_names,\n codebases=['halide'])\n\n # ----- testbranch\n builders = [b for b in c['builders']\n if b.builder_type.halide_branch == halide_branch\n and b.builder_type.purpose == Purpose.halide_testbranch]\n if builders:\n # NOT SingleBranchScheduler, because this can process changes from many branches (all PRs)\n builder_names = [str(b.name) for b in builders]\n yield AnyBranchScheduler(\n name='halide-testbranch-' + halide_branch,\n codebases=['halide'],\n change_filter=ChangeFilter(category='pull', codebase='halide',\n branch_fn=is_halide_pr_branch,\n filter_fn=github_base_branch_matches),\n treeStableTimer=60 * 5, # seconds\n builderNames=builder_names)\n\n yield ForceScheduler(\n name='force-halide-testbranch-' + halide_branch,\n builderNames=builder_names,\n codebases=['halide'])\n\n\ndef create_llvm_cmake_factory(builder_type):\n factory = BuildFactory()\n add_env_setup_step(factory, builder_type)\n add_get_llvm_source_steps(factory, builder_type)\n\n clean_llvm_rebuild = (builder_type.llvm_branch == LLVM_MAIN)\n\n add_llvm_steps(factory, builder_type, clean_llvm_rebuild)\n\n return factory\n\n\ndef create_llvm_builders():\n for arch, bits, os in get_interesting_halide_targets():\n # Note that we want these Builders to run on *every* eligible worker;\n # the goal is to ensure that all LLVM builds are updated locally\n # on all of the workers.\n for llvm_branch in LLVM_BRANCHES:\n builder_type = BuilderType(arch, bits, os, None, llvm_branch, Purpose.llvm_nightly)\n for w in builder_type.get_worker_names():\n # Note that we need the builder name to be unique across workers,\n # but we want the builddir on the *worker* side to be the same for all workers\n # (to simplify things).\n label = builder_type.llvm_builder_label()\n builder = BuilderConfig(name=\"%s/%s\" % (label, w),\n workerbuilddir=label,\n workernames=[w],\n factory=create_llvm_cmake_factory(builder_type),\n collapseRequests=True,\n # We want exclusive access to this workerlock\n # thru all this Builder's steps. (We could probably\n # get by with holding it just during the install phase,\n # but we'd have to finesse some details like removing\n # the old install directory within the lock, and this\n # is much simpler.)\n locks=[llvm_build_locks[llvm_branch + str(bits)].access('exclusive')],\n tags=builder_type.builder_tags())\n builder.builder_type = builder_type\n yield builder\n\n\ndef create_llvm_scheduler(llvm_branch):\n builders = [str(b.name) for b in c['builders']\n if b.builder_type.llvm_branch == llvm_branch and b.builder_type.purpose == Purpose.llvm_nightly]\n # Start every day at 11PM Pacific; our buildbot use UTC for cron, so that's 0600\n yield Nightly(\n name=f'llvm-nightly-{LLVM_BRANCHES[llvm_branch].version.major}',\n codebases=['llvm'],\n builderNames=builders,\n change_filter=ChangeFilter(codebase='llvm'),\n hour=6,\n minute=0)\n\n for b in builders:\n yield ForceScheduler(\n name='force-llvm-nightly-' + b.replace('/', '_'),\n codebases=['llvm'],\n builderNames=[b])\n\n\ndef create_builders():\n yield from create_llvm_builders()\n yield from create_halide_builders()\n\n\ndef create_schedulers():\n for llvm_branch in LLVM_BRANCHES:\n yield from create_llvm_scheduler(llvm_branch)\n\n for halide_branch in HALIDE_BRANCHES:\n yield from create_halide_scheduler(halide_branch)\n\n\nc['builders'] = list(create_builders())\nc['schedulers'] = list(create_schedulers())\n\n\n# Set the builder priorities\n\n\ndef prioritize_builders(buildmaster, builders):\n def importance(builder):\n builder_type = builder.config.builder_type\n assert builder_type\n\n # LLVM nightlies run only once a day (late at night) and should always\n # get priority over everything else.\n if builder_type.purpose == Purpose.llvm_nightly:\n return 0\n\n # Branch testers all need to come back before we can merge a PR,\n # so they all have equal next-highest priority.\n if builder_type.purpose == Purpose.halide_testbranch:\n return 1\n\n # non-branch testers are mostly used for bisecting failures that\n # didn't show up in the branch testers and doing binary\n # releases. We care most about the most recently-released llvm so\n # that we have a full set of builds for releases, then llvm main\n # for bisection, then older llvm versions.\n if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_15]:\n return 2\n if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_RELEASE_16]:\n return 2\n if builder_type.llvm_branch in LLVM_FOR_HALIDE[HALIDE_MAIN]:\n return 3\n return 4\n\n return list(sorted(builders, key=importance))\n\n\nc['prioritizeBuilders'] = prioritize_builders\n\n\n# GitHub pull request filter\n\nclass SafeGitHubEventHandler(GitHubEventHandler):\n def handle_push(self, payload, event):\n ref = payload['ref']\n if re.match(r\"^refs/(heads|tags)/(master|main|release/\\d+\\.x)$\", ref):\n return super().handle_push(payload, event)\n else:\n print(f'SafeGitHubEventHandler: ignoring push event for ref: {ref}')\n return self.skip()\n\n def handle_pull_request(self, payload, event):\n pr = payload['pull_request']\n try:\n # Skip anything with the 'skip_buildbots' label\n if any(label['name'] == 'skip_buildbots' for label in pr['labels']):\n # print(\"PR %s was skipped due to skip_buildbots\" % str(pr['html_url']))\n return self.skip()\n\n # Test anything (even external) that has 'halidebuildbots' as a reviewer.\n if any(r['login'] == 'halidebuildbots' for r in pr['requested_reviewers']):\n # print(\"PR %s was handled due halidebuildbots\" % str(pr['html_url']))\n if payload['action'] == 'review_requested':\n # Pretend it's a synchronize event instead since private buildbot code\n # rejects review_requested for no apparent reason.\n payload['action'] = 'synchronize'\n return super().handle_pull_request(payload, event)\n\n # Skip external pull requests that originate from untrusted forks\n trusted_repos = (\n 'halide/Halide', # the primary repository is obviously trusted\n 'CodeLinaro/Halide', # the Qualcomm open-source staging fork is trustworthy\n )\n if pr['head']['repo']['full_name'] not in trusted_repos:\n # print(\"PR %s was skipped due to being external:\" % str(pr['head']['repo']['full_name']))\n return self.skip()\n\n # print(\"PR %s is being handled normally\" % str(pr['html_url']))\n return super().handle_pull_request(payload, event)\n\n except KeyError as e:\n print(f'SafeGitHubEventHandler: malformed payload: {payload}')\n print(f'SafeGitHubEventHandler: missing key \"{e}\"')\n return self.skip()\n\n @staticmethod\n def skip():\n return [], 'git'\n\n\n# WEB SERVER\n\n# 'protocols' contains information about protocols which master will use for communicating with workers.\n# You must define at least 'port' option that workers could connect to your master with this protocol.\n# 'port' must match the value configured into the buildworkers (with their --master option)\nc['protocols'] = {'pb': {'port': 9990}}\n\nauthz = Authz(\n allowRules=[ems.ForceBuildEndpointMatcher(role=\"admins\"),\n ems.StopBuildEndpointMatcher(role=\"admins\"),\n ems.RebuildBuildEndpointMatcher(role=\"admins\"),\n ems.EnableSchedulerEndpointMatcher(role=\"admins\")],\n roleMatchers=[RolesFromUsername(roles=[\"admins\"], usernames=[\"halidenightly\"])])\n\nc['www'] = dict(\n auth=UserPasswordAuth({'halidenightly': WWW_PASSWORD}),\n authz=authz,\n port=8012,\n change_hook_dialects={\n 'github': {\n 'secret': WEBHOOK_SECRET,\n 'codebase': 'halide',\n 'skips': [],\n 'class': SafeGitHubEventHandler,\n # 'github_property_whitelist': ['github.base.ref'],\n },\n },\n)\n\n# PROJECT IDENTITY\n\n# the 'title' string will appear at the top of this buildbot\n# installation's html.WebStatus home page (linked to the\n# 'titleURL') and is embedded in the title of the waterfall HTML page.\n\nc['title'] = 'Halide'\nc['titleURL'] = 'http://halide-lang.org'\n\n# the 'buildbotURL' string should point to the location where the buildbot's\n# internal web server (usually the html.WebStatus page) is visible. This\n# typically uses the port number set in the Waterfall 'status' entry, but\n# with an externally-visible host name which the buildbot cannot figure out\n# without some help.\n\nc['buildbotURL'] = 'https://buildbot.halide-lang.org/master/'\n\n# DB URL\n\nc['db'] = {\n # This specifies what database buildbot uses to store its state. You can leave\n # this at its default for all but the largest installations.\n 'db_url': 'sqlite:///state.sqlite',\n}\n\n# GitHub Integration\n\n# Only testbranch builders need to be considered here\nbuilders = [str(b.name) for b in c['builders'] if b.builder_type.purpose != Purpose.llvm_nightly]\ngenerator = BuildStartEndStatusGenerator(builders=builders,\n start_formatter=MessageFormatterRenderable('Build started.'),\n end_formatter=MessageFormatterRenderable('Build done.'))\n\ngs = GitHubStatusPush(token=GITHUB_TOKEN,\n context=Interpolate(\"buildbot/%(prop:buildername)s\"),\n generators=[generator],\n verbose=True)\nc['services'] = [gs]\n\n# Disable sending usage data\nc['buildbotNetUsageData'] = None\n",
"step-ids": [
45,
55,
62,
69,
80
]
}
|
[
45,
55,
62,
69,
80
] |
__author__ = 'cromox'
from time import sleep
import inspect
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from Forex_CFD.features.main_page import FxMainPage
class FxBuySell(FxMainPage):
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
def buy(self, amount):
self.log.info("--> " + inspect.stack()[0][3] + " started")
if self.driver.find_element_by_xpath("//div[@class='visible-input']//input[contains(@id, 'uniqName')]"):
# element = WebDriverWait(driver, 5).until(EC.visibility_of_element_located(
# (By.XPATH, "//div[@class='visible-input']//input[contains(@id, 'uniqName')]")))
element = self.driver.find_elements_by_xpath("//div[@class='visible-input']//input[contains(@id, 'uniqName')]")[0]
element.clear()
for character in str(amount):
element.send_keys(character)
sleep(0.5)
# Confirm Button
if self.driver.find_element_by_xpath("//div[contains(@class,'confirm-button')]"):
self.driver.find_elements_by_xpath("//div[contains(@class,'confirm-button')]")[0].click()
elif self.driver.find_element_by_xpath("//*[contains(text(),'Market closed')]"):
print('Market closed')
self.driver.find_elements_by_xpath("//*[@class='header']//*[@class='close-icon']")[0].click()
def sell(self, amount):
self.log.info("--> " + inspect.stack()[0][3] + " started")
# Switching to sell
self.driver.find_elements_by_xpath("//div[@data-dojo-attach-event='click: setDirectionSell']")[0].click()
# From there on it's exactly like the buy
self.buy(amount)
def script_click_xpath(self, xpath):
self.log.info("--> " + inspect.stack()[0][3] + " started")
self.driver.execute_script(f"document.evaluate(\"{xpath}\", document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.click()")
def open_stock_dialog(self, stock):
self.log.info("--> " + inspect.stack()[0][3] + " started")
WebDriverWait(self.driver, 5).until(EC.visibility_of_any_elements_located((By.XPATH, "//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]")))
elem = self.driver.find_elements_by_xpath("//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]")
# try both elements
try:
elem[0].click()
except:
elem[1].click()
# Search the stock
elem = self.driver.find_element_by_xpath("//input[@placeholder=\"Instrument search\"]")
# Setting the max length to 100 so the API'll be able to enter long stocks names
self.driver.execute_script("arguments[0].setAttribute('maxlength',arguments[1])", elem, 100)
elem.send_keys(stock)
# Open its dialog with JS. Selenium couldn't open the dialog itself.
self.script_click_xpath(f"//*[@id='list-results-instruments']//span[contains(@class, 'instrument-name') and .='{stock}']")
sleep(1)
def buy_stock(self, stock, amount):
self.log.info("--> " + inspect.stack()[0][3] + " started")
self.open_stock_dialog(stock)
self.buy(amount)
sleep(0.5)
def sell_stock(self, stock, amount):
self.log.info("--> " + inspect.stack()[0][3] + " started")
# It's just opening a stock and selling it
self.open_stock_dialog(stock)
self.sell(amount)
sleep(0.5)
|
normal
|
{
"blob_id": "5850be6aef6e4adb36a122cb8e5ffe044b1c9009",
"index": 4589,
"step-1": "<mask token>\n\n\nclass FxBuySell(FxMainPage):\n <mask token>\n\n def buy(self, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n if self.driver.find_element_by_xpath(\n \"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\"):\n element = self.driver.find_elements_by_xpath(\n \"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\"\n )[0]\n element.clear()\n for character in str(amount):\n element.send_keys(character)\n sleep(0.5)\n if self.driver.find_element_by_xpath(\n \"//div[contains(@class,'confirm-button')]\"):\n self.driver.find_elements_by_xpath(\n \"//div[contains(@class,'confirm-button')]\")[0].click()\n elif self.driver.find_element_by_xpath(\n \"//*[contains(text(),'Market closed')]\"):\n print('Market closed')\n self.driver.find_elements_by_xpath(\n \"//*[@class='header']//*[@class='close-icon']\")[0].click()\n <mask token>\n <mask token>\n\n def open_stock_dialog(self, stock):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n WebDriverWait(self.driver, 5).until(EC.\n visibility_of_any_elements_located((By.XPATH,\n \"//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]\")))\n elem = self.driver.find_elements_by_xpath(\n \"//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]\")\n try:\n elem[0].click()\n except:\n elem[1].click()\n elem = self.driver.find_element_by_xpath(\n '//input[@placeholder=\"Instrument search\"]')\n self.driver.execute_script(\n \"arguments[0].setAttribute('maxlength',arguments[1])\", elem, 100)\n elem.send_keys(stock)\n self.script_click_xpath(\n f\"//*[@id='list-results-instruments']//span[contains(@class, 'instrument-name') and .='{stock}']\"\n )\n sleep(1)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FxBuySell(FxMainPage):\n <mask token>\n\n def buy(self, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n if self.driver.find_element_by_xpath(\n \"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\"):\n element = self.driver.find_elements_by_xpath(\n \"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\"\n )[0]\n element.clear()\n for character in str(amount):\n element.send_keys(character)\n sleep(0.5)\n if self.driver.find_element_by_xpath(\n \"//div[contains(@class,'confirm-button')]\"):\n self.driver.find_elements_by_xpath(\n \"//div[contains(@class,'confirm-button')]\")[0].click()\n elif self.driver.find_element_by_xpath(\n \"//*[contains(text(),'Market closed')]\"):\n print('Market closed')\n self.driver.find_elements_by_xpath(\n \"//*[@class='header']//*[@class='close-icon']\")[0].click()\n <mask token>\n <mask token>\n\n def open_stock_dialog(self, stock):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n WebDriverWait(self.driver, 5).until(EC.\n visibility_of_any_elements_located((By.XPATH,\n \"//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]\")))\n elem = self.driver.find_elements_by_xpath(\n \"//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]\")\n try:\n elem[0].click()\n except:\n elem[1].click()\n elem = self.driver.find_element_by_xpath(\n '//input[@placeholder=\"Instrument search\"]')\n self.driver.execute_script(\n \"arguments[0].setAttribute('maxlength',arguments[1])\", elem, 100)\n elem.send_keys(stock)\n self.script_click_xpath(\n f\"//*[@id='list-results-instruments']//span[contains(@class, 'instrument-name') and .='{stock}']\"\n )\n sleep(1)\n\n def buy_stock(self, stock, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n self.open_stock_dialog(stock)\n self.buy(amount)\n sleep(0.5)\n\n def sell_stock(self, stock, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n self.open_stock_dialog(stock)\n self.sell(amount)\n sleep(0.5)\n",
"step-3": "<mask token>\n\n\nclass FxBuySell(FxMainPage):\n\n def __init__(self, driver):\n super().__init__(driver)\n self.driver = driver\n\n def buy(self, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n if self.driver.find_element_by_xpath(\n \"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\"):\n element = self.driver.find_elements_by_xpath(\n \"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\"\n )[0]\n element.clear()\n for character in str(amount):\n element.send_keys(character)\n sleep(0.5)\n if self.driver.find_element_by_xpath(\n \"//div[contains(@class,'confirm-button')]\"):\n self.driver.find_elements_by_xpath(\n \"//div[contains(@class,'confirm-button')]\")[0].click()\n elif self.driver.find_element_by_xpath(\n \"//*[contains(text(),'Market closed')]\"):\n print('Market closed')\n self.driver.find_elements_by_xpath(\n \"//*[@class='header']//*[@class='close-icon']\")[0].click()\n\n def sell(self, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n self.driver.find_elements_by_xpath(\n \"//div[@data-dojo-attach-event='click: setDirectionSell']\")[0\n ].click()\n self.buy(amount)\n\n def script_click_xpath(self, xpath):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n self.driver.execute_script(\n f'document.evaluate(\"{xpath}\", document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.click()'\n )\n\n def open_stock_dialog(self, stock):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n WebDriverWait(self.driver, 5).until(EC.\n visibility_of_any_elements_located((By.XPATH,\n \"//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]\")))\n elem = self.driver.find_elements_by_xpath(\n \"//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]\")\n try:\n elem[0].click()\n except:\n elem[1].click()\n elem = self.driver.find_element_by_xpath(\n '//input[@placeholder=\"Instrument search\"]')\n self.driver.execute_script(\n \"arguments[0].setAttribute('maxlength',arguments[1])\", elem, 100)\n elem.send_keys(stock)\n self.script_click_xpath(\n f\"//*[@id='list-results-instruments']//span[contains(@class, 'instrument-name') and .='{stock}']\"\n )\n sleep(1)\n\n def buy_stock(self, stock, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n self.open_stock_dialog(stock)\n self.buy(amount)\n sleep(0.5)\n\n def sell_stock(self, stock, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n self.open_stock_dialog(stock)\n self.sell(amount)\n sleep(0.5)\n",
"step-4": "__author__ = 'cromox'\n<mask token>\n\n\nclass FxBuySell(FxMainPage):\n\n def __init__(self, driver):\n super().__init__(driver)\n self.driver = driver\n\n def buy(self, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n if self.driver.find_element_by_xpath(\n \"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\"):\n element = self.driver.find_elements_by_xpath(\n \"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\"\n )[0]\n element.clear()\n for character in str(amount):\n element.send_keys(character)\n sleep(0.5)\n if self.driver.find_element_by_xpath(\n \"//div[contains(@class,'confirm-button')]\"):\n self.driver.find_elements_by_xpath(\n \"//div[contains(@class,'confirm-button')]\")[0].click()\n elif self.driver.find_element_by_xpath(\n \"//*[contains(text(),'Market closed')]\"):\n print('Market closed')\n self.driver.find_elements_by_xpath(\n \"//*[@class='header']//*[@class='close-icon']\")[0].click()\n\n def sell(self, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n self.driver.find_elements_by_xpath(\n \"//div[@data-dojo-attach-event='click: setDirectionSell']\")[0\n ].click()\n self.buy(amount)\n\n def script_click_xpath(self, xpath):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n self.driver.execute_script(\n f'document.evaluate(\"{xpath}\", document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.click()'\n )\n\n def open_stock_dialog(self, stock):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n WebDriverWait(self.driver, 5).until(EC.\n visibility_of_any_elements_located((By.XPATH,\n \"//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]\")))\n elem = self.driver.find_elements_by_xpath(\n \"//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]\")\n try:\n elem[0].click()\n except:\n elem[1].click()\n elem = self.driver.find_element_by_xpath(\n '//input[@placeholder=\"Instrument search\"]')\n self.driver.execute_script(\n \"arguments[0].setAttribute('maxlength',arguments[1])\", elem, 100)\n elem.send_keys(stock)\n self.script_click_xpath(\n f\"//*[@id='list-results-instruments']//span[contains(@class, 'instrument-name') and .='{stock}']\"\n )\n sleep(1)\n\n def buy_stock(self, stock, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n self.open_stock_dialog(stock)\n self.buy(amount)\n sleep(0.5)\n\n def sell_stock(self, stock, amount):\n self.log.info('--> ' + inspect.stack()[0][3] + ' started')\n self.open_stock_dialog(stock)\n self.sell(amount)\n sleep(0.5)\n",
"step-5": "__author__ = 'cromox'\n\nfrom time import sleep\nimport inspect\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom Forex_CFD.features.main_page import FxMainPage\n\nclass FxBuySell(FxMainPage):\n\n def __init__(self, driver):\n super().__init__(driver)\n self.driver = driver\n\n def buy(self, amount):\n self.log.info(\"--> \" + inspect.stack()[0][3] + \" started\")\n if self.driver.find_element_by_xpath(\"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\"):\n # element = WebDriverWait(driver, 5).until(EC.visibility_of_element_located(\n # (By.XPATH, \"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\")))\n element = self.driver.find_elements_by_xpath(\"//div[@class='visible-input']//input[contains(@id, 'uniqName')]\")[0]\n element.clear()\n for character in str(amount):\n element.send_keys(character)\n sleep(0.5)\n # Confirm Button\n if self.driver.find_element_by_xpath(\"//div[contains(@class,'confirm-button')]\"):\n self.driver.find_elements_by_xpath(\"//div[contains(@class,'confirm-button')]\")[0].click()\n elif self.driver.find_element_by_xpath(\"//*[contains(text(),'Market closed')]\"):\n print('Market closed')\n self.driver.find_elements_by_xpath(\"//*[@class='header']//*[@class='close-icon']\")[0].click()\n\n def sell(self, amount):\n self.log.info(\"--> \" + inspect.stack()[0][3] + \" started\")\n # Switching to sell\n self.driver.find_elements_by_xpath(\"//div[@data-dojo-attach-event='click: setDirectionSell']\")[0].click()\n # From there on it's exactly like the buy\n self.buy(amount)\n\n def script_click_xpath(self, xpath):\n self.log.info(\"--> \" + inspect.stack()[0][3] + \" started\")\n self.driver.execute_script(f\"document.evaluate(\\\"{xpath}\\\", document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.click()\")\n\n def open_stock_dialog(self, stock):\n self.log.info(\"--> \" + inspect.stack()[0][3] + \" started\")\n WebDriverWait(self.driver, 5).until(EC.visibility_of_any_elements_located((By.XPATH, \"//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]\")))\n elem = self.driver.find_elements_by_xpath(\"//span[contains(@data-dojo-attach-event, 'onOpenDialogClick')]\")\n # try both elements\n try:\n elem[0].click()\n except:\n elem[1].click()\n # Search the stock\n elem = self.driver.find_element_by_xpath(\"//input[@placeholder=\\\"Instrument search\\\"]\")\n # Setting the max length to 100 so the API'll be able to enter long stocks names\n self.driver.execute_script(\"arguments[0].setAttribute('maxlength',arguments[1])\", elem, 100)\n elem.send_keys(stock)\n # Open its dialog with JS. Selenium couldn't open the dialog itself.\n self.script_click_xpath(f\"//*[@id='list-results-instruments']//span[contains(@class, 'instrument-name') and .='{stock}']\")\n sleep(1)\n\n def buy_stock(self, stock, amount):\n self.log.info(\"--> \" + inspect.stack()[0][3] + \" started\")\n self.open_stock_dialog(stock)\n self.buy(amount)\n sleep(0.5)\n\n def sell_stock(self, stock, amount):\n self.log.info(\"--> \" + inspect.stack()[0][3] + \" started\")\n # It's just opening a stock and selling it\n self.open_stock_dialog(stock)\n self.sell(amount)\n sleep(0.5)",
"step-ids": [
3,
5,
8,
9,
11
]
}
|
[
3,
5,
8,
9,
11
] |
"""
USERS MODEL
"""
from www import app
import mongoengine
import datetime
class User(mongoengine.Document):
username = mongoengine.StringField(required=True)
password = mongoengine.StringField(required=True)
email = mongoengine.StringField(required=True)
active_hash = mongoengine.StringField(required=False, default=None)
active_hash_expires = mongoengine.DateTimeField(required=False,
default=None)
recover_hash = mongoengine.StringField(required=False)
recover_hash_expires = mongoengine.DateTimeField(required=False)
active = mongoengine.BooleanField(required=True, default=False)
locked = mongoengine.BooleanField(required=True, default=True) # locked changes depending on user active or not
first_name = mongoengine.StringField(required=False)
last_name = mongoengine.StringField(required=False)
show_as = mongoengine.StringField(required=False)
date_of_birth = mongoengine.DateTimeField(required=False)
created_at = mongoengine.DateTimeField(required=True, default=datetime.datetime.utcnow())
updated_at = mongoengine.DateTimeField(required=False, default=datetime.datetime.utcnow())
meta = {
'db_alias': app.config["DEFAULT_DATABASE_ALIAS"],
'collection': 'users',
}
@classmethod
def pre_save(cls, sender, document, **kwargs):
document.updated_at = datetime.datetime.utcnow()
mongoengine.signals.pre_save.connect(User.pre_save, sender=User)
|
normal
|
{
"blob_id": "51cdb41836415c08609ee6a6bcc3adbaf2533da4",
"index": 3697,
"step-1": "<mask token>\n\n\nclass User(mongoengine.Document):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False, default\n =None)\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True)\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n created_at = mongoengine.DateTimeField(required=True, default=datetime.\n datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime\n .datetime.utcnow())\n meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':\n 'users'}\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False, default\n =None)\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True)\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n created_at = mongoengine.DateTimeField(required=True, default=datetime.\n datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime\n .datetime.utcnow())\n meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':\n 'users'}\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\nmongoengine.signals.pre_save.connect(User.pre_save, sender=User)\n",
"step-4": "<mask token>\nfrom www import app\nimport mongoengine\nimport datetime\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False, default\n =None)\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True)\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n created_at = mongoengine.DateTimeField(required=True, default=datetime.\n datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime\n .datetime.utcnow())\n meta = {'db_alias': app.config['DEFAULT_DATABASE_ALIAS'], 'collection':\n 'users'}\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\nmongoengine.signals.pre_save.connect(User.pre_save, sender=User)\n",
"step-5": "\"\"\"\n USERS MODEL\n\"\"\"\n\nfrom www import app\nimport mongoengine\nimport datetime\n\n\nclass User(mongoengine.Document):\n username = mongoengine.StringField(required=True)\n password = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n\n active_hash = mongoengine.StringField(required=False, default=None)\n active_hash_expires = mongoengine.DateTimeField(required=False,\n default=None)\n\n recover_hash = mongoengine.StringField(required=False)\n recover_hash_expires = mongoengine.DateTimeField(required=False)\n\n active = mongoengine.BooleanField(required=True, default=False)\n locked = mongoengine.BooleanField(required=True, default=True) # locked changes depending on user active or not\n\n first_name = mongoengine.StringField(required=False)\n last_name = mongoengine.StringField(required=False)\n show_as = mongoengine.StringField(required=False)\n date_of_birth = mongoengine.DateTimeField(required=False)\n\n created_at = mongoengine.DateTimeField(required=True, default=datetime.datetime.utcnow())\n updated_at = mongoengine.DateTimeField(required=False, default=datetime.datetime.utcnow())\n\n meta = {\n 'db_alias': app.config[\"DEFAULT_DATABASE_ALIAS\"],\n 'collection': 'users',\n }\n\n @classmethod\n def pre_save(cls, sender, document, **kwargs):\n document.updated_at = datetime.datetime.utcnow()\n\n\nmongoengine.signals.pre_save.connect(User.pre_save, sender=User)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(2 * 3)
<|reserved_special_token_0|>
if a >= b:
print('You can drive the car, you are ', a)
else:
print('Sorry, you are too small')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(2 * 3)
<|reserved_special_token_0|>
a = int(input('Enter your age: '))
b = 18
if a >= b:
print('You can drive the car, you are ', a)
else:
print('Sorry, you are too small')
<|reserved_special_token_1|>
# mathematical operators
'''
* multiply
/ divide (normal)
// divide (integer)
% modulus (remainder)
+ add
- subtract
** exponent (raise to)
'''
print(2 * 3)
# comparison operators
'''
== equal to
!= not equal to
> greater than
< less than
>= greater or equal to
<= less or equal to
'''
a = int(input("Enter your age: "))
b = 18
if a >= b:
print("You can drive the car, you are ", a)
else:
print("Sorry, you are too small")
|
flexible
|
{
"blob_id": "911257bad3baab89e29db3facb08ec41269b41e3",
"index": 9953,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(2 * 3)\n<mask token>\nif a >= b:\n print('You can drive the car, you are ', a)\nelse:\n print('Sorry, you are too small')\n",
"step-3": "<mask token>\nprint(2 * 3)\n<mask token>\na = int(input('Enter your age: '))\nb = 18\nif a >= b:\n print('You can drive the car, you are ', a)\nelse:\n print('Sorry, you are too small')\n",
"step-4": "# mathematical operators\n'''\n* multiply\n/ divide (normal)\n// divide (integer)\n% modulus (remainder)\n+ add\n- subtract\n** exponent (raise to)\n'''\nprint(2 * 3)\n# comparison operators\n'''\n== equal to\n!= not equal to\n> greater than\n< less than\n>= greater or equal to\n<= less or equal to\n'''\na = int(input(\"Enter your age: \"))\nb = 18\nif a >= b:\n print(\"You can drive the car, you are \", a)\nelse:\n print(\"Sorry, you are too small\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db.models import Max
from django.core.validators import RegexValidator
from django.utils import timezone
class User(AbstractUser):
is_developer = models.BooleanField('developer status', default=False)
is_marketing = models.BooleanField('marketing status', default=False)
email = models.EmailField(unique=True, null=True, blank=True)
def __str__(self):
return self.username
class Application(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
app_code = models.CharField(max_length=30, blank=True, null=True)
name = models.CharField(max_length=100, blank=True, null=True)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Application.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "APP" + "{0:03d}".format(max)
super().save(*kwargs)
class Page(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
application = models.ForeignKey(Application, on_delete=models.CASCADE, related_name='applications')
name = models.CharField(max_length=100)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Page.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "PG" + "{0:03d}".format(max)
super().save(*kwargs)
class Location(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
loc_code = models.CharField(max_length=30, null=True, blank=True, unique=True)
page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='pages')
is_slider = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
name = models.CharField(max_length=100)
width = models.IntegerField()
height = models.IntegerField()
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Location.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "LOC" + "{0:03d}".format(max)
super().save(*kwargs)
class Banner(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
name = models.CharField(max_length=100)
caption = models.TextField()
description = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to='images/', verbose_name='Banner', blank=True)
height = models.IntegerField()
width = models.IntegerField()
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def delete(self, *args, **kwargs):
self.image.delete(save=False)
super(Banner, self).delete(*args, **kwargs)
def save(self, **kwargs):
if not self.id:
max = Banner.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "BN" + "{0:03d}".format(max)
super().save(*kwargs)
class Campaign(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='locations')
campaign_code = models.CharField(max_length=30, null=True, blank=True)
priority = models.IntegerField(null=True, blank=True)
date_created = models.DateField(null=True, blank=True)
date_updated = models.DateField(null=True, blank=True)
valid_date_start = models.DateField(null=True, blank=True)
valid_date_end = models.DateField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CMP" + "{0:03d}".format(max)
super().save(*kwargs)
class Installation(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
banner = models.ForeignKey(Banner, on_delete=models.CASCADE, related_name='banners', blank=True, null=True)
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE, related_name='campaigns')
redirect = models.URLField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Installation.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "INS" + "{0:03d}".format(max)
super().save(*kwargs)
source_choices = (
('random', 'Generate nomor secara acak'),
('csv', 'Upload file .csv'),
)
class ContactSource(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
source = models.CharField(max_length=30, choices=source_choices)
def __str__(self):
return self.source
def save(self, **kwargs):
if not self.id:
max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CONSRC" + "{0:03d}".format(max)
super().save(*kwargs)
class Contact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
source = models.ForeignKey(ContactSource, on_delete=models.CASCADE, related_name='contactsources')
name = models.CharField(max_length=100)
numbers = models.FileField(upload_to='pickles/contact/')
is_deleted = models.BooleanField(default=False)
deleted_datetime = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Contact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CON" + "{0:03d}".format(max)
super().save(*kwargs)
class GenerateContact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact')
first_code = models.CharField(max_length=4, validators=[RegexValidator(r'^\d{0,10}$')])
digits = models.CharField(max_length=30, validators=[RegexValidator(r'^\d{0,10}$')])
generate_numbers = models.CharField(max_length=30, validators=[RegexValidator(r'^\d{0,10}$')])
def save(self, **kwargs):
if not self.id:
max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "GENCON" + "{0:03d}".format(max)
super().save(*kwargs)
status_choices = (
('complete', 'Sudah Dikirim'),
('uncomplete', 'Belum Dikirim'),
)
class SMSBlast(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
message_title = models.CharField(max_length=100)
message_text = models.CharField(max_length=160)
send_date = models.DateField(null=True, blank=True)
send_time = models.TimeField(null=True, blank=True)
is_now = models.BooleanField(default=False)
def __str__(self):
return self.message_title
def save(self, **kwargs):
if not self.id:
max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "SMS" + "{0:03d}".format(max)
super().save(*kwargs)
class ContactAndSMS(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=12)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='smsncon_contact')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE, related_name='smsncon_smsblast')
def save(self, **kwargs):
if not self.id:
max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CONANDSMS" + "{0:03d}".format(max)
super().save(*kwargs)
class SMSBlastJob(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
job_id = models.CharField(max_length=100, blank=True, null=True)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact_job')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE, related_name='smsblast_job')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "SMSJOB" + "{0:03d}".format(max)
super().save(*kwargs)
class SMSStatus(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=10)
job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE, related_name='job_status')
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact_status')
status = models.FileField(upload_to='pickles/status/')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "SMSSTAT" + "{0:03d}".format(max)
super().save(*kwargs)
|
normal
|
{
"blob_id": "94e9e7c4c09c8c4de4c8f2649707a949d5f5f856",
"index": 7836,
"step-1": "<mask token>\n\n\nclass Location(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Banner(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n name = models.CharField(max_length=100)\n caption = models.TextField()\n description = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to='images/', verbose_name='Banner',\n blank=True)\n height = models.IntegerField()\n width = models.IntegerField()\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def delete(self, *args, **kwargs):\n self.image.delete(save=False)\n super(Banner, self).delete(*args, **kwargs)\n\n def save(self, **kwargs):\n if not self.id:\n max = Banner.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'BN' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Campaign(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n location = models.ForeignKey(Location, on_delete=models.CASCADE,\n related_name='locations')\n campaign_code = models.CharField(max_length=30, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n date_created = models.DateField(null=True, blank=True)\n date_updated = models.DateField(null=True, blank=True)\n valid_date_start = models.DateField(null=True, blank=True)\n valid_date_end = models.DateField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CMP' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Installation(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n banner = models.ForeignKey(Banner, on_delete=models.CASCADE,\n related_name='banners', blank=True, null=True)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,\n related_name='campaigns')\n redirect = models.URLField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Installation.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'INS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass ContactSource(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n source = models.CharField(max_length=30, choices=source_choices)\n\n def __str__(self):\n return self.source\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONSRC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Contact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,\n related_name='contactsources')\n name = models.CharField(max_length=100)\n numbers = models.FileField(upload_to='pickles/contact/')\n is_deleted = models.BooleanField(default=False)\n deleted_datetime = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Contact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass GenerateContact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact')\n first_code = models.CharField(max_length=4, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n digits = models.CharField(max_length=30, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n generate_numbers = models.CharField(max_length=30, validators=[\n RegexValidator('^\\\\d{0,10}$')])\n\n def save(self, **kwargs):\n if not self.id:\n max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'GENCON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass SMSBlast(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n message_title = models.CharField(max_length=100)\n message_text = models.CharField(max_length=160)\n send_date = models.DateField(null=True, blank=True)\n send_time = models.TimeField(null=True, blank=True)\n is_now = models.BooleanField(default=False)\n\n def __str__(self):\n return self.message_title\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass ContactAndSMS(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=12)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='smsncon_contact')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsncon_smsblast')\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONANDSMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSBlastJob(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n job_id = models.CharField(max_length=100, blank=True, null=True)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_job')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsblast_job')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSJOB' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSStatus(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=10)\n job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,\n related_name='job_status')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_status')\n status = models.FileField(upload_to='pickles/status/')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSSTAT' + '{0:03d}'.format(max)\n super().save(*kwargs)\n",
"step-2": "<mask token>\n\n\nclass Location(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Location.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'LOC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Banner(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n name = models.CharField(max_length=100)\n caption = models.TextField()\n description = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to='images/', verbose_name='Banner',\n blank=True)\n height = models.IntegerField()\n width = models.IntegerField()\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def delete(self, *args, **kwargs):\n self.image.delete(save=False)\n super(Banner, self).delete(*args, **kwargs)\n\n def save(self, **kwargs):\n if not self.id:\n max = Banner.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'BN' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Campaign(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n location = models.ForeignKey(Location, on_delete=models.CASCADE,\n related_name='locations')\n campaign_code = models.CharField(max_length=30, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n date_created = models.DateField(null=True, blank=True)\n date_updated = models.DateField(null=True, blank=True)\n valid_date_start = models.DateField(null=True, blank=True)\n valid_date_end = models.DateField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CMP' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Installation(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n banner = models.ForeignKey(Banner, on_delete=models.CASCADE,\n related_name='banners', blank=True, null=True)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,\n related_name='campaigns')\n redirect = models.URLField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Installation.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'INS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass ContactSource(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n source = models.CharField(max_length=30, choices=source_choices)\n\n def __str__(self):\n return self.source\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONSRC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Contact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,\n related_name='contactsources')\n name = models.CharField(max_length=100)\n numbers = models.FileField(upload_to='pickles/contact/')\n is_deleted = models.BooleanField(default=False)\n deleted_datetime = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Contact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass GenerateContact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact')\n first_code = models.CharField(max_length=4, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n digits = models.CharField(max_length=30, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n generate_numbers = models.CharField(max_length=30, validators=[\n RegexValidator('^\\\\d{0,10}$')])\n\n def save(self, **kwargs):\n if not self.id:\n max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'GENCON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass SMSBlast(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n message_title = models.CharField(max_length=100)\n message_text = models.CharField(max_length=160)\n send_date = models.DateField(null=True, blank=True)\n send_time = models.TimeField(null=True, blank=True)\n is_now = models.BooleanField(default=False)\n\n def __str__(self):\n return self.message_title\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass ContactAndSMS(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=12)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='smsncon_contact')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsncon_smsblast')\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONANDSMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSBlastJob(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n job_id = models.CharField(max_length=100, blank=True, null=True)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_job')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsblast_job')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSJOB' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSStatus(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=10)\n job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,\n related_name='job_status')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_status')\n status = models.FileField(upload_to='pickles/status/')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSSTAT' + '{0:03d}'.format(max)\n super().save(*kwargs)\n",
"step-3": "<mask token>\n\n\nclass Application(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Page(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n application = models.ForeignKey(Application, on_delete=models.CASCADE,\n related_name='applications')\n name = models.CharField(max_length=100)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Page.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'PG' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Location(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n loc_code = models.CharField(max_length=30, null=True, blank=True,\n unique=True)\n page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name=\n 'pages')\n is_slider = models.BooleanField(default=False)\n is_active = models.BooleanField(default=False)\n name = models.CharField(max_length=100)\n width = models.IntegerField()\n height = models.IntegerField()\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Location.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'LOC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Banner(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n name = models.CharField(max_length=100)\n caption = models.TextField()\n description = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to='images/', verbose_name='Banner',\n blank=True)\n height = models.IntegerField()\n width = models.IntegerField()\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def delete(self, *args, **kwargs):\n self.image.delete(save=False)\n super(Banner, self).delete(*args, **kwargs)\n\n def save(self, **kwargs):\n if not self.id:\n max = Banner.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'BN' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Campaign(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n location = models.ForeignKey(Location, on_delete=models.CASCADE,\n related_name='locations')\n campaign_code = models.CharField(max_length=30, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n date_created = models.DateField(null=True, blank=True)\n date_updated = models.DateField(null=True, blank=True)\n valid_date_start = models.DateField(null=True, blank=True)\n valid_date_end = models.DateField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CMP' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Installation(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n banner = models.ForeignKey(Banner, on_delete=models.CASCADE,\n related_name='banners', blank=True, null=True)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,\n related_name='campaigns')\n redirect = models.URLField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Installation.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'INS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass ContactSource(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n source = models.CharField(max_length=30, choices=source_choices)\n\n def __str__(self):\n return self.source\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONSRC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Contact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,\n related_name='contactsources')\n name = models.CharField(max_length=100)\n numbers = models.FileField(upload_to='pickles/contact/')\n is_deleted = models.BooleanField(default=False)\n deleted_datetime = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Contact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass GenerateContact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact')\n first_code = models.CharField(max_length=4, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n digits = models.CharField(max_length=30, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n generate_numbers = models.CharField(max_length=30, validators=[\n RegexValidator('^\\\\d{0,10}$')])\n\n def save(self, **kwargs):\n if not self.id:\n max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'GENCON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass SMSBlast(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n message_title = models.CharField(max_length=100)\n message_text = models.CharField(max_length=160)\n send_date = models.DateField(null=True, blank=True)\n send_time = models.TimeField(null=True, blank=True)\n is_now = models.BooleanField(default=False)\n\n def __str__(self):\n return self.message_title\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass ContactAndSMS(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=12)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='smsncon_contact')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsncon_smsblast')\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONANDSMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSBlastJob(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n job_id = models.CharField(max_length=100, blank=True, null=True)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_job')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsblast_job')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSJOB' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSStatus(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=10)\n job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,\n related_name='job_status')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_status')\n status = models.FileField(upload_to='pickles/status/')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSSTAT' + '{0:03d}'.format(max)\n super().save(*kwargs)\n",
"step-4": "<mask token>\n\n\nclass User(AbstractUser):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Application(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n app_code = models.CharField(max_length=30, blank=True, null=True)\n name = models.CharField(max_length=100, blank=True, null=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Application.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'APP' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Page(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n application = models.ForeignKey(Application, on_delete=models.CASCADE,\n related_name='applications')\n name = models.CharField(max_length=100)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Page.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'PG' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Location(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n loc_code = models.CharField(max_length=30, null=True, blank=True,\n unique=True)\n page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name=\n 'pages')\n is_slider = models.BooleanField(default=False)\n is_active = models.BooleanField(default=False)\n name = models.CharField(max_length=100)\n width = models.IntegerField()\n height = models.IntegerField()\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Location.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'LOC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Banner(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n name = models.CharField(max_length=100)\n caption = models.TextField()\n description = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to='images/', verbose_name='Banner',\n blank=True)\n height = models.IntegerField()\n width = models.IntegerField()\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def delete(self, *args, **kwargs):\n self.image.delete(save=False)\n super(Banner, self).delete(*args, **kwargs)\n\n def save(self, **kwargs):\n if not self.id:\n max = Banner.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'BN' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Campaign(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n location = models.ForeignKey(Location, on_delete=models.CASCADE,\n related_name='locations')\n campaign_code = models.CharField(max_length=30, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n date_created = models.DateField(null=True, blank=True)\n date_updated = models.DateField(null=True, blank=True)\n valid_date_start = models.DateField(null=True, blank=True)\n valid_date_end = models.DateField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CMP' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Installation(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n banner = models.ForeignKey(Banner, on_delete=models.CASCADE,\n related_name='banners', blank=True, null=True)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE,\n related_name='campaigns')\n redirect = models.URLField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Installation.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'INS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass ContactSource(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n source = models.CharField(max_length=30, choices=source_choices)\n\n def __str__(self):\n return self.source\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONSRC' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass Contact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n source = models.ForeignKey(ContactSource, on_delete=models.CASCADE,\n related_name='contactsources')\n name = models.CharField(max_length=100)\n numbers = models.FileField(upload_to='pickles/contact/')\n is_deleted = models.BooleanField(default=False)\n deleted_datetime = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Contact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass GenerateContact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact')\n first_code = models.CharField(max_length=4, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n digits = models.CharField(max_length=30, validators=[RegexValidator(\n '^\\\\d{0,10}$')])\n generate_numbers = models.CharField(max_length=30, validators=[\n RegexValidator('^\\\\d{0,10}$')])\n\n def save(self, **kwargs):\n if not self.id:\n max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'GENCON' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\n<mask token>\n\n\nclass SMSBlast(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n message_title = models.CharField(max_length=100)\n message_text = models.CharField(max_length=160)\n send_date = models.DateField(null=True, blank=True)\n send_time = models.TimeField(null=True, blank=True)\n is_now = models.BooleanField(default=False)\n\n def __str__(self):\n return self.message_title\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass ContactAndSMS(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=12)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='smsncon_contact')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsncon_smsblast')\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'CONANDSMS' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSBlastJob(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n job_id = models.CharField(max_length=100, blank=True, null=True)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_job')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE,\n related_name='smsblast_job')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSJOB' + '{0:03d}'.format(max)\n super().save(*kwargs)\n\n\nclass SMSStatus(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=10)\n job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE,\n related_name='job_status')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE,\n related_name='contact_status')\n status = models.FileField(upload_to='pickles/status/')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = 'SMSSTAT' + '{0:03d}'.format(max)\n super().save(*kwargs)\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db.models import Max\nfrom django.core.validators import RegexValidator\nfrom django.utils import timezone\n\nclass User(AbstractUser):\n is_developer = models.BooleanField('developer status', default=False)\n is_marketing = models.BooleanField('marketing status', default=False)\n email = models.EmailField(unique=True, null=True, blank=True)\n\n def __str__(self):\n return self.username\n\nclass Application(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n app_code = models.CharField(max_length=30, blank=True, null=True)\n name = models.CharField(max_length=100, blank=True, null=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Application.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"APP\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Page(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n application = models.ForeignKey(Application, on_delete=models.CASCADE, related_name='applications')\n name = models.CharField(max_length=100)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Page.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"PG\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Location(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n loc_code = models.CharField(max_length=30, null=True, blank=True, unique=True)\n page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='pages')\n is_slider = models.BooleanField(default=False)\n is_active = models.BooleanField(default=False)\n name = models.CharField(max_length=100)\n width = models.IntegerField()\n height = models.IntegerField()\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Location.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"LOC\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Banner(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=5)\n name = models.CharField(max_length=100)\n caption = models.TextField()\n description = models.TextField(blank=True, null=True)\n image = models.ImageField(upload_to='images/', verbose_name='Banner', blank=True)\n height = models.IntegerField()\n width = models.IntegerField()\n is_archived = models.BooleanField(default=False)\n \n def __str__(self):\n return self.name\n\n def delete(self, *args, **kwargs):\n self.image.delete(save=False)\n\n super(Banner, self).delete(*args, **kwargs)\n\n def save(self, **kwargs):\n if not self.id:\n max = Banner.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"BN\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Campaign(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='locations')\n campaign_code = models.CharField(max_length=30, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n date_created = models.DateField(null=True, blank=True)\n date_updated = models.DateField(null=True, blank=True)\n valid_date_start = models.DateField(null=True, blank=True)\n valid_date_end = models.DateField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"CMP\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Installation(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n banner = models.ForeignKey(Banner, on_delete=models.CASCADE, related_name='banners', blank=True, null=True)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE, related_name='campaigns')\n redirect = models.URLField(null=True, blank=True)\n\n def save(self, **kwargs):\n if not self.id:\n max = Installation.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"INS\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nsource_choices = (\n ('random', 'Generate nomor secara acak'),\n ('csv', 'Upload file .csv'),\n)\n\nclass ContactSource(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n source = models.CharField(max_length=30, choices=source_choices)\n\n def __str__(self):\n return self.source\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"CONSRC\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass Contact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n source = models.ForeignKey(ContactSource, on_delete=models.CASCADE, related_name='contactsources')\n name = models.CharField(max_length=100)\n numbers = models.FileField(upload_to='pickles/contact/')\n is_deleted = models.BooleanField(default=False)\n deleted_datetime = models.DateTimeField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, **kwargs):\n if not self.id:\n max = Contact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"CON\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass GenerateContact(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact')\n first_code = models.CharField(max_length=4, validators=[RegexValidator(r'^\\d{0,10}$')])\n digits = models.CharField(max_length=30, validators=[RegexValidator(r'^\\d{0,10}$')])\n generate_numbers = models.CharField(max_length=30, validators=[RegexValidator(r'^\\d{0,10}$')])\n\n def save(self, **kwargs):\n if not self.id:\n max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"GENCON\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nstatus_choices = (\n ('complete', 'Sudah Dikirim'),\n ('uncomplete', 'Belum Dikirim'),\n)\n\nclass SMSBlast(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=6)\n message_title = models.CharField(max_length=100)\n message_text = models.CharField(max_length=160)\n send_date = models.DateField(null=True, blank=True)\n send_time = models.TimeField(null=True, blank=True)\n is_now = models.BooleanField(default=False)\n\n def __str__(self):\n return self.message_title\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"SMS\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass ContactAndSMS(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=12)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='smsncon_contact')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE, related_name='smsncon_smsblast')\n\n def save(self, **kwargs):\n if not self.id:\n max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"CONANDSMS\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass SMSBlastJob(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=9)\n job_id = models.CharField(max_length=100, blank=True, null=True)\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact_job')\n smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE, related_name='smsblast_job')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"SMSJOB\" + \"{0:03d}\".format(max)\n super().save(*kwargs)\n\nclass SMSStatus(models.Model):\n id = models.CharField(primary_key=True, editable=False, max_length=10)\n job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE, related_name='job_status')\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact_status')\n status = models.FileField(upload_to='pickles/status/')\n\n def __str__(self):\n return self.job_id\n\n def save(self, **kwargs):\n if not self.id:\n max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']\n if max is not None:\n max = max[-3:]\n max = int(max)\n max += 1\n else:\n max = 1\n self.id = \"SMSSTAT\" + \"{0:03d}\".format(max)\n super().save(*kwargs)",
"step-ids": [
38,
40,
46,
50,
55
]
}
|
[
38,
40,
46,
50,
55
] |
<|reserved_special_token_0|>
def Create_list():
i = 0
for file in os.listdir(os.getcwd()):
if file.endswith('openplc'):
Monitoredlist.append(file)
i += 1
if i == 0:
print('No Files are being monitored!')
else:
print('The files being monitored are as follows')
print(Monitoredlist)
def Hasher():
BLOCKSIZE = 65536
hasher = hashlib.sha1()
del list_create[:]
for i in range(len(Monitoredlist)):
list_create.append(Monitoredlist[i])
with open(Monitoredlist[i], 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
list_create.append(hasher.hexdigest())
<|reserved_special_token_0|>
def Read_hash():
progpath = os.getcwd()
dirpath = progpath + '/Details'
os.chdir(dirpath)
file = open('Record.txt', 'r')
list_compare = []
list_compare = file.readlines()
list_compare = [x[:-1] for x in list_compare]
os.chdir(progpath)
if list_compare == list_create:
Response(0)
else:
Response(1)
def Response(flag):
if flag == 1:
print('Ladder Logic Tampered')
else:
print('Ladder Logic is Secure')
def main():
Create_list()
Hasher()
print(list_create)
Create_record()
Read_hash()
while 1:
Hasher()
Read_hash()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Create_list():
i = 0
for file in os.listdir(os.getcwd()):
if file.endswith('openplc'):
Monitoredlist.append(file)
i += 1
if i == 0:
print('No Files are being monitored!')
else:
print('The files being monitored are as follows')
print(Monitoredlist)
def Hasher():
BLOCKSIZE = 65536
hasher = hashlib.sha1()
del list_create[:]
for i in range(len(Monitoredlist)):
list_create.append(Monitoredlist[i])
with open(Monitoredlist[i], 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
list_create.append(hasher.hexdigest())
def Create_record():
progpath = os.getcwd()
dirpath = progpath + '/Details'
if not os.path.exists(dirpath):
os.makedirs(dirpath)
os.chdir(dirpath)
file = open('Record.txt', 'w')
for item in list_create:
file.write('%s\n' % item)
file.close()
os.chdir(progpath)
def Read_hash():
progpath = os.getcwd()
dirpath = progpath + '/Details'
os.chdir(dirpath)
file = open('Record.txt', 'r')
list_compare = []
list_compare = file.readlines()
list_compare = [x[:-1] for x in list_compare]
os.chdir(progpath)
if list_compare == list_create:
Response(0)
else:
Response(1)
def Response(flag):
if flag == 1:
print('Ladder Logic Tampered')
else:
print('Ladder Logic is Secure')
def main():
Create_list()
Hasher()
print(list_create)
Create_record()
Read_hash()
while 1:
Hasher()
Read_hash()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Monitoredlist = []
list_create = []
list_compare = []
def Create_list():
i = 0
for file in os.listdir(os.getcwd()):
if file.endswith('openplc'):
Monitoredlist.append(file)
i += 1
if i == 0:
print('No Files are being monitored!')
else:
print('The files being monitored are as follows')
print(Monitoredlist)
def Hasher():
BLOCKSIZE = 65536
hasher = hashlib.sha1()
del list_create[:]
for i in range(len(Monitoredlist)):
list_create.append(Monitoredlist[i])
with open(Monitoredlist[i], 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
list_create.append(hasher.hexdigest())
def Create_record():
progpath = os.getcwd()
dirpath = progpath + '/Details'
if not os.path.exists(dirpath):
os.makedirs(dirpath)
os.chdir(dirpath)
file = open('Record.txt', 'w')
for item in list_create:
file.write('%s\n' % item)
file.close()
os.chdir(progpath)
def Read_hash():
progpath = os.getcwd()
dirpath = progpath + '/Details'
os.chdir(dirpath)
file = open('Record.txt', 'r')
list_compare = []
list_compare = file.readlines()
list_compare = [x[:-1] for x in list_compare]
os.chdir(progpath)
if list_compare == list_create:
Response(0)
else:
Response(1)
def Response(flag):
if flag == 1:
print('Ladder Logic Tampered')
else:
print('Ladder Logic is Secure')
def main():
Create_list()
Hasher()
print(list_create)
Create_record()
Read_hash()
while 1:
Hasher()
Read_hash()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import hashlib
import os
Monitoredlist = []
list_create = []
list_compare = []
def Create_list():
i = 0
for file in os.listdir(os.getcwd()):
if file.endswith('openplc'):
Monitoredlist.append(file)
i += 1
if i == 0:
print('No Files are being monitored!')
else:
print('The files being monitored are as follows')
print(Monitoredlist)
def Hasher():
BLOCKSIZE = 65536
hasher = hashlib.sha1()
del list_create[:]
for i in range(len(Monitoredlist)):
list_create.append(Monitoredlist[i])
with open(Monitoredlist[i], 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
list_create.append(hasher.hexdigest())
def Create_record():
progpath = os.getcwd()
dirpath = progpath + '/Details'
if not os.path.exists(dirpath):
os.makedirs(dirpath)
os.chdir(dirpath)
file = open('Record.txt', 'w')
for item in list_create:
file.write('%s\n' % item)
file.close()
os.chdir(progpath)
def Read_hash():
progpath = os.getcwd()
dirpath = progpath + '/Details'
os.chdir(dirpath)
file = open('Record.txt', 'r')
list_compare = []
list_compare = file.readlines()
list_compare = [x[:-1] for x in list_compare]
os.chdir(progpath)
if list_compare == list_create:
Response(0)
else:
Response(1)
def Response(flag):
if flag == 1:
print('Ladder Logic Tampered')
else:
print('Ladder Logic is Secure')
def main():
Create_list()
Hasher()
print(list_create)
Create_record()
Read_hash()
while 1:
Hasher()
Read_hash()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# --------------------------------------------------------------------------------------------------
# Property of UAH
# IDS module for ladder logic monitoring
# This codes is Written by Rishabh Das
# Date:- 18th June 2018
# --------------------------------------------------------------------------------------------------
import hashlib
import os
# ---------------------------------------------------------------------------------------------------
# This section declares the Global variables of the project
# ---------------------------------------------------------------------------------------------------
Monitoredlist=[]
list_create=[]
list_compare=[]
# ---------------------------------------------------------------------------------------------------
# This section notes the number of files in the directory and creates the list of the files that needs
# to be monitored
# ---------------------------------------------------------------------------------------------------
def Create_list():
i=0
for file in os.listdir(os.getcwd()):
if file.endswith("openplc"):
Monitoredlist.append(file)
i += 1
if i==0:
print("No Files are being monitored!")
else:
print("The files being monitored are as follows")
print(Monitoredlist)
# ---------------------------------------------------------------------------------------------------
# This is the Hasher module that creates the hash for the files and maintains a table of the file
# hashes
# ---------------------------------------------------------------------------------------------------
def Hasher():
BLOCKSIZE = 65536
hasher = hashlib.sha1()
del list_create[:]
for i in range(len(Monitoredlist)):
list_create.append(Monitoredlist[i])
with open(Monitoredlist[i], 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
list_create.append(hasher.hexdigest())
#print(list_create)
# --------------------------------------------------------------------------------------------------
# This Function records the hash of the files being monitored to a text file. This should only be
# called when the program is being executed for the first time
# --------------------------------------------------------------------------------------------------
def Create_record():
progpath = os.getcwd()
dirpath = progpath + '/Details'
if not os.path.exists(dirpath):
os.makedirs(dirpath)
os.chdir(dirpath)
file = open('Record.txt',"w")
for item in list_create:
file.write("%s\n" % item)
file.close()
os.chdir(progpath)
# --------------------------------------------------------------------------------------------------
# This module parses the stored hashes and stores them into a fresh python list
# --------------------------------------------------------------------------------------------------
def Read_hash():
progpath = os.getcwd()
dirpath = progpath + '/Details'
os.chdir(dirpath)
file = open('Record.txt', 'r')
list_compare=[]
list_compare = file.readlines()
list_compare = [x[:-1] for x in list_compare]
os.chdir(progpath)
#print(list_compare)
#print(list_create)
if list_compare == list_create:
Response(0)
else:
Response(1)
# --------------------------------------------------------------------------------------------------
# Once the change is detected this module is used to respond to the threat
# flag ->>>> 1 Change is detected
# flag ->>>> 0 No change
# --------------------------------------------------------------------------------------------------
def Response(flag):
if flag==1:
print("Ladder Logic Tampered")
#Launch recovery routine
else:
print("Ladder Logic is Secure")
# --------------------------------------------------------------------------------------------------
# The main Function
# --------------------------------------------------------------------------------------------------
def main():
Create_list()
Hasher()
print(list_create)
Create_record()
Read_hash() # First call with 0 argument
while(1):
Hasher()
Read_hash() # Next calls are all performed by argument
# 1. Create the folder for storing the new file->Done
# 2. Module to compare the files with a new file->Done
# 3. Module to backup the ladder logics
# 4. Module to restore the ladder logic
# 5. Reporting unit->Done
# 6. Push code to GitHub->Done
if __name__ == "__main__": main()
|
flexible
|
{
"blob_id": "6f8ce77dd45f555ca092482715b6ccaa33414fd8",
"index": 4176,
"step-1": "<mask token>\n\n\ndef Create_list():\n i = 0\n for file in os.listdir(os.getcwd()):\n if file.endswith('openplc'):\n Monitoredlist.append(file)\n i += 1\n if i == 0:\n print('No Files are being monitored!')\n else:\n print('The files being monitored are as follows')\n print(Monitoredlist)\n\n\ndef Hasher():\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n del list_create[:]\n for i in range(len(Monitoredlist)):\n list_create.append(Monitoredlist[i])\n with open(Monitoredlist[i], 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n list_create.append(hasher.hexdigest())\n\n\n<mask token>\n\n\ndef Read_hash():\n progpath = os.getcwd()\n dirpath = progpath + '/Details'\n os.chdir(dirpath)\n file = open('Record.txt', 'r')\n list_compare = []\n list_compare = file.readlines()\n list_compare = [x[:-1] for x in list_compare]\n os.chdir(progpath)\n if list_compare == list_create:\n Response(0)\n else:\n Response(1)\n\n\ndef Response(flag):\n if flag == 1:\n print('Ladder Logic Tampered')\n else:\n print('Ladder Logic is Secure')\n\n\ndef main():\n Create_list()\n Hasher()\n print(list_create)\n Create_record()\n Read_hash()\n while 1:\n Hasher()\n Read_hash()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef Create_list():\n i = 0\n for file in os.listdir(os.getcwd()):\n if file.endswith('openplc'):\n Monitoredlist.append(file)\n i += 1\n if i == 0:\n print('No Files are being monitored!')\n else:\n print('The files being monitored are as follows')\n print(Monitoredlist)\n\n\ndef Hasher():\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n del list_create[:]\n for i in range(len(Monitoredlist)):\n list_create.append(Monitoredlist[i])\n with open(Monitoredlist[i], 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n list_create.append(hasher.hexdigest())\n\n\ndef Create_record():\n progpath = os.getcwd()\n dirpath = progpath + '/Details'\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n os.chdir(dirpath)\n file = open('Record.txt', 'w')\n for item in list_create:\n file.write('%s\\n' % item)\n file.close()\n os.chdir(progpath)\n\n\ndef Read_hash():\n progpath = os.getcwd()\n dirpath = progpath + '/Details'\n os.chdir(dirpath)\n file = open('Record.txt', 'r')\n list_compare = []\n list_compare = file.readlines()\n list_compare = [x[:-1] for x in list_compare]\n os.chdir(progpath)\n if list_compare == list_create:\n Response(0)\n else:\n Response(1)\n\n\ndef Response(flag):\n if flag == 1:\n print('Ladder Logic Tampered')\n else:\n print('Ladder Logic is Secure')\n\n\ndef main():\n Create_list()\n Hasher()\n print(list_create)\n Create_record()\n Read_hash()\n while 1:\n Hasher()\n Read_hash()\n\n\n<mask token>\n",
"step-3": "<mask token>\nMonitoredlist = []\nlist_create = []\nlist_compare = []\n\n\ndef Create_list():\n i = 0\n for file in os.listdir(os.getcwd()):\n if file.endswith('openplc'):\n Monitoredlist.append(file)\n i += 1\n if i == 0:\n print('No Files are being monitored!')\n else:\n print('The files being monitored are as follows')\n print(Monitoredlist)\n\n\ndef Hasher():\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n del list_create[:]\n for i in range(len(Monitoredlist)):\n list_create.append(Monitoredlist[i])\n with open(Monitoredlist[i], 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n list_create.append(hasher.hexdigest())\n\n\ndef Create_record():\n progpath = os.getcwd()\n dirpath = progpath + '/Details'\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n os.chdir(dirpath)\n file = open('Record.txt', 'w')\n for item in list_create:\n file.write('%s\\n' % item)\n file.close()\n os.chdir(progpath)\n\n\ndef Read_hash():\n progpath = os.getcwd()\n dirpath = progpath + '/Details'\n os.chdir(dirpath)\n file = open('Record.txt', 'r')\n list_compare = []\n list_compare = file.readlines()\n list_compare = [x[:-1] for x in list_compare]\n os.chdir(progpath)\n if list_compare == list_create:\n Response(0)\n else:\n Response(1)\n\n\ndef Response(flag):\n if flag == 1:\n print('Ladder Logic Tampered')\n else:\n print('Ladder Logic is Secure')\n\n\ndef main():\n Create_list()\n Hasher()\n print(list_create)\n Create_record()\n Read_hash()\n while 1:\n Hasher()\n Read_hash()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import hashlib\nimport os\nMonitoredlist = []\nlist_create = []\nlist_compare = []\n\n\ndef Create_list():\n i = 0\n for file in os.listdir(os.getcwd()):\n if file.endswith('openplc'):\n Monitoredlist.append(file)\n i += 1\n if i == 0:\n print('No Files are being monitored!')\n else:\n print('The files being monitored are as follows')\n print(Monitoredlist)\n\n\ndef Hasher():\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n del list_create[:]\n for i in range(len(Monitoredlist)):\n list_create.append(Monitoredlist[i])\n with open(Monitoredlist[i], 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n list_create.append(hasher.hexdigest())\n\n\ndef Create_record():\n progpath = os.getcwd()\n dirpath = progpath + '/Details'\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n os.chdir(dirpath)\n file = open('Record.txt', 'w')\n for item in list_create:\n file.write('%s\\n' % item)\n file.close()\n os.chdir(progpath)\n\n\ndef Read_hash():\n progpath = os.getcwd()\n dirpath = progpath + '/Details'\n os.chdir(dirpath)\n file = open('Record.txt', 'r')\n list_compare = []\n list_compare = file.readlines()\n list_compare = [x[:-1] for x in list_compare]\n os.chdir(progpath)\n if list_compare == list_create:\n Response(0)\n else:\n Response(1)\n\n\ndef Response(flag):\n if flag == 1:\n print('Ladder Logic Tampered')\n else:\n print('Ladder Logic is Secure')\n\n\ndef main():\n Create_list()\n Hasher()\n print(list_create)\n Create_record()\n Read_hash()\n while 1:\n Hasher()\n Read_hash()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# --------------------------------------------------------------------------------------------------\n# Property of UAH\n# IDS module for ladder logic monitoring\n# This codes is Written by Rishabh Das\n# Date:- 18th June 2018\n# --------------------------------------------------------------------------------------------------\nimport hashlib\nimport os\n\n# ---------------------------------------------------------------------------------------------------\n# This section declares the Global variables of the project\n# ---------------------------------------------------------------------------------------------------\nMonitoredlist=[]\nlist_create=[]\nlist_compare=[]\n\n\n# ---------------------------------------------------------------------------------------------------\n# This section notes the number of files in the directory and creates the list of the files that needs\n# to be monitored\n# ---------------------------------------------------------------------------------------------------\ndef Create_list():\n i=0\n for file in os.listdir(os.getcwd()):\n if file.endswith(\"openplc\"):\n Monitoredlist.append(file)\n i += 1\n if i==0:\n print(\"No Files are being monitored!\")\n else:\n print(\"The files being monitored are as follows\")\n print(Monitoredlist)\n# ---------------------------------------------------------------------------------------------------\n# This is the Hasher module that creates the hash for the files and maintains a table of the file\n# hashes\n# ---------------------------------------------------------------------------------------------------\ndef Hasher():\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n del list_create[:]\n for i in range(len(Monitoredlist)):\n list_create.append(Monitoredlist[i])\n with open(Monitoredlist[i], 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n list_create.append(hasher.hexdigest())\n #print(list_create)\n# --------------------------------------------------------------------------------------------------\n# This Function records the hash of the files being monitored to a text file. This should only be\n# called when the program is being executed for the first time\n# --------------------------------------------------------------------------------------------------\ndef Create_record():\n progpath = os.getcwd()\n dirpath = progpath + '/Details'\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n os.chdir(dirpath)\n file = open('Record.txt',\"w\")\n for item in list_create:\n file.write(\"%s\\n\" % item)\n file.close()\n os.chdir(progpath)\n# --------------------------------------------------------------------------------------------------\n# This module parses the stored hashes and stores them into a fresh python list\n# --------------------------------------------------------------------------------------------------\ndef Read_hash():\n progpath = os.getcwd()\n dirpath = progpath + '/Details'\n os.chdir(dirpath) \n file = open('Record.txt', 'r')\n list_compare=[]\n list_compare = file.readlines()\n list_compare = [x[:-1] for x in list_compare]\n os.chdir(progpath)\n #print(list_compare)\n #print(list_create)\n if list_compare == list_create:\n Response(0)\n else:\n Response(1)\n# --------------------------------------------------------------------------------------------------\n# Once the change is detected this module is used to respond to the threat\n# flag ->>>> 1 Change is detected\n# flag ->>>> 0 No change\n# --------------------------------------------------------------------------------------------------\ndef Response(flag):\n if flag==1:\n print(\"Ladder Logic Tampered\")\n #Launch recovery routine\n else:\n print(\"Ladder Logic is Secure\")\n# --------------------------------------------------------------------------------------------------\n# The main Function\n# --------------------------------------------------------------------------------------------------\ndef main():\n Create_list()\n Hasher()\n print(list_create)\n Create_record()\n Read_hash() # First call with 0 argument\n while(1):\n Hasher()\n Read_hash() # Next calls are all performed by argument\n \n\n# 1. Create the folder for storing the new file->Done\n# 2. Module to compare the files with a new file->Done\n# 3. Module to backup the ladder logics\n# 4. Module to restore the ladder logic\n# 5. Reporting unit->Done\n# 6. Push code to GitHub->Done\n\nif __name__ == \"__main__\": main()\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
from contextlib import contextmanager
from filecmp import cmp, dircmp
from shutil import copyfile, copytree, rmtree
import pytest
from demisto_sdk.commands.common.constants import PACKS_DIR, TEST_PLAYBOOKS_DIR
from demisto_sdk.commands.common.tools import src_root
TEST_DATA = src_root() / 'tests' / 'test_files'
TEST_CONTENT_REPO = TEST_DATA / 'content_slim'
TEST_PRIVATE_CONTENT_REPO = TEST_DATA / 'private_content_slim'
UNIT_TEST_DATA = (src_root() / 'commands' / 'create_artifacts' / 'tests' / 'data')
COMMON_SERVER = UNIT_TEST_DATA / 'common_server'
ARTIFACTS_EXPEXTED_RESULTS = TEST_DATA / 'artifacts'
def same_folders(src1, src2):
"""Assert if folder contains diffrent files"""
dcmp = dircmp(src1, src2)
if dcmp.left_only or dcmp.right_only:
return False
for sub_dcmp in dcmp.subdirs.values():
same_folders(sub_dcmp.left, sub_dcmp.right)
return True
@contextmanager
def destroy_by_ext(suffix: str):
"""Modify file content to invalid by file extention - json/yaml.
Open:
- Choose file by file extention.
- Modify file content to not valid.
Close:
- Modify content to the original state.
"""
if suffix == 'json':
file = TEST_CONTENT_REPO / "Packs" / "Sample01" / "Classifiers" / "classifier-sample_new.json"
else:
file = TEST_CONTENT_REPO / "Packs" / "Sample01" / "TestPlaybooks" / "playbook-sample_test1.yml"
old_data = file.read_text()
file.write_text("{123dfdsf,}\nfdsfdsf")
try:
yield
finally:
file.write_text(old_data)
@contextmanager
def duplicate_file():
"""Create duplicate file name in content repository.
Open:
- Create copy of file in content.
Close:
- Delete copied file.
"""
file = TEST_CONTENT_REPO / PACKS_DIR / "Sample01" / TEST_PLAYBOOKS_DIR / "playbook-sample_test1.yml"
new_file = TEST_CONTENT_REPO / PACKS_DIR / "Sample02" / TEST_PLAYBOOKS_DIR / "playbook-sample_test1.yml"
try:
copyfile(file, new_file)
yield
finally:
new_file.unlink()
@contextmanager
def temp_dir():
"""Create Temp direcotry for test.
Open:
- Create temp directory.
Close:
- Delete temp directory.
"""
temp = UNIT_TEST_DATA / 'temp'
try:
temp.mkdir(parents=True, exist_ok=True)
yield temp
finally:
rmtree(temp)
@pytest.fixture()
def mock_git(mocker):
"""Mock git Repo object"""
from demisto_sdk.commands.common.content import Content
# Mock git working directory
mocker.patch.object(Content, 'git')
Content.git().working_tree_dir = TEST_CONTENT_REPO
yield
@pytest.fixture()
def private_repo():
"""Create Temp private repo structure from original content structure.
Open:
- Create a copy of regular content.
- Delete - content/TestPlaybooks dir.
Close:
- Delete private content folder.
"""
try:
copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)
test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR
rmtree(test_playbook_dir)
yield TEST_PRIVATE_CONTENT_REPO
finally:
rmtree(TEST_PRIVATE_CONTENT_REPO)
def test_modify_common_server_constants():
""" Modify global variables in CommonServerPython.py
When: CommonServerPython.py contains:
- Global variable - CONTENT_RELEASE_VERSION = '0.0.0'
- Global variable - CONTENT_BRANCH_NAME = ''
Given: Parameters:
- Content version x.x.x
- Active branch - xxxx
Then: CommonServerPython.py changes:
- Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'
- Global variable - CONTENT_BRANCH_NAME = 'xxxx'
Notes:
- After test clean up changes.
"""
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import \
modify_common_server_constants
path_before = COMMON_SERVER / 'CommonServerPython.py'
path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'
old_data = path_before.read_text()
modify_common_server_constants(path_before, '6.0.0', 'test')
assert cmp(path_before, path_excepted)
path_before.write_text(old_data)
def test_dump_pack(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, Pack, create_dirs, dump_pack)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
create_dirs(artifact_manager=config)
dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO / PACKS_DIR / 'Sample01'))
assert same_folders(src1=temp / 'content_packs' / 'Sample01',
src2=ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' / 'Sample01')
def test_create_content_artifacts(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, create_content_artifacts)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 0
assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'content')
def test_create_private_content_artifacts(private_repo):
from demisto_sdk.commands.common.content import Content
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, create_content_artifacts)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
config.content = Content(private_repo)
exit_code = create_content_artifacts(artifact_manager=config)
assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'private')
assert exit_code == 0
@pytest.mark.parametrize(argnames="suffix", argvalues=["yml", "json"])
def test_malformed_file_failue(suffix: str, mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, create_content_artifacts)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
with destroy_by_ext(suffix):
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 1
def test_duplicate_file_failure(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, create_content_artifacts)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
with duplicate_file():
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 1
|
normal
|
{
"blob_id": "8928c2ff49cbad2a54252d41665c10437a471eeb",
"index": 1404,
"step-1": "<mask token>\n\n\ndef same_folders(src1, src2):\n \"\"\"Assert if folder contains diffrent files\"\"\"\n dcmp = dircmp(src1, src2)\n if dcmp.left_only or dcmp.right_only:\n return False\n for sub_dcmp in dcmp.subdirs.values():\n same_folders(sub_dcmp.left, sub_dcmp.right)\n return True\n\n\n@contextmanager\ndef destroy_by_ext(suffix: str):\n \"\"\"Modify file content to invalid by file extention - json/yaml.\n\n Open:\n - Choose file by file extention.\n - Modify file content to not valid.\n\n Close:\n - Modify content to the original state.\n \"\"\"\n if suffix == 'json':\n file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'Classifiers' /\n 'classifier-sample_new.json')\n else:\n file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'TestPlaybooks' /\n 'playbook-sample_test1.yml')\n old_data = file.read_text()\n file.write_text('{123dfdsf,}\\nfdsfdsf')\n try:\n yield\n finally:\n file.write_text(old_data)\n\n\n<mask token>\n\n\[email protected]()\ndef mock_git(mocker):\n \"\"\"Mock git Repo object\"\"\"\n from demisto_sdk.commands.common.content import Content\n mocker.patch.object(Content, 'git')\n Content.git().working_tree_dir = TEST_CONTENT_REPO\n yield\n\n\[email protected]()\ndef private_repo():\n \"\"\"Create Temp private repo structure from original content structure.\n\n Open:\n - Create a copy of regular content.\n - Delete - content/TestPlaybooks dir.\n\n Close:\n - Delete private content folder.\n \"\"\"\n try:\n copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)\n test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR\n rmtree(test_playbook_dir)\n yield TEST_PRIVATE_CONTENT_REPO\n finally:\n rmtree(TEST_PRIVATE_CONTENT_REPO)\n\n\ndef test_modify_common_server_constants():\n \"\"\" Modify global variables in CommonServerPython.py\n\n When: CommonServerPython.py contains:\n - Global variable - CONTENT_RELEASE_VERSION = '0.0.0'\n - Global variable - CONTENT_BRANCH_NAME = ''\n\n Given: Parameters:\n - Content version x.x.x\n - Active branch - xxxx\n\n Then: CommonServerPython.py changes:\n - Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'\n - Global variable - CONTENT_BRANCH_NAME = 'xxxx'\n\n Notes:\n - After test clean up changes.\n \"\"\"\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import modify_common_server_constants\n path_before = COMMON_SERVER / 'CommonServerPython.py'\n path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'\n old_data = path_before.read_text()\n modify_common_server_constants(path_before, '6.0.0', 'test')\n assert cmp(path_before, path_excepted)\n path_before.write_text(old_data)\n\n\ndef test_dump_pack(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, Pack, create_dirs, dump_pack\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n create_dirs(artifact_manager=config)\n dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO /\n PACKS_DIR / 'Sample01'))\n assert same_folders(src1=temp / 'content_packs' / 'Sample01', src2=\n ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' /\n 'Sample01')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef same_folders(src1, src2):\n \"\"\"Assert if folder contains diffrent files\"\"\"\n dcmp = dircmp(src1, src2)\n if dcmp.left_only or dcmp.right_only:\n return False\n for sub_dcmp in dcmp.subdirs.values():\n same_folders(sub_dcmp.left, sub_dcmp.right)\n return True\n\n\n@contextmanager\ndef destroy_by_ext(suffix: str):\n \"\"\"Modify file content to invalid by file extention - json/yaml.\n\n Open:\n - Choose file by file extention.\n - Modify file content to not valid.\n\n Close:\n - Modify content to the original state.\n \"\"\"\n if suffix == 'json':\n file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'Classifiers' /\n 'classifier-sample_new.json')\n else:\n file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'TestPlaybooks' /\n 'playbook-sample_test1.yml')\n old_data = file.read_text()\n file.write_text('{123dfdsf,}\\nfdsfdsf')\n try:\n yield\n finally:\n file.write_text(old_data)\n\n\n<mask token>\n\n\[email protected]()\ndef mock_git(mocker):\n \"\"\"Mock git Repo object\"\"\"\n from demisto_sdk.commands.common.content import Content\n mocker.patch.object(Content, 'git')\n Content.git().working_tree_dir = TEST_CONTENT_REPO\n yield\n\n\[email protected]()\ndef private_repo():\n \"\"\"Create Temp private repo structure from original content structure.\n\n Open:\n - Create a copy of regular content.\n - Delete - content/TestPlaybooks dir.\n\n Close:\n - Delete private content folder.\n \"\"\"\n try:\n copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)\n test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR\n rmtree(test_playbook_dir)\n yield TEST_PRIVATE_CONTENT_REPO\n finally:\n rmtree(TEST_PRIVATE_CONTENT_REPO)\n\n\ndef test_modify_common_server_constants():\n \"\"\" Modify global variables in CommonServerPython.py\n\n When: CommonServerPython.py contains:\n - Global variable - CONTENT_RELEASE_VERSION = '0.0.0'\n - Global variable - CONTENT_BRANCH_NAME = ''\n\n Given: Parameters:\n - Content version x.x.x\n - Active branch - xxxx\n\n Then: CommonServerPython.py changes:\n - Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'\n - Global variable - CONTENT_BRANCH_NAME = 'xxxx'\n\n Notes:\n - After test clean up changes.\n \"\"\"\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import modify_common_server_constants\n path_before = COMMON_SERVER / 'CommonServerPython.py'\n path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'\n old_data = path_before.read_text()\n modify_common_server_constants(path_before, '6.0.0', 'test')\n assert cmp(path_before, path_excepted)\n path_before.write_text(old_data)\n\n\ndef test_dump_pack(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, Pack, create_dirs, dump_pack\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n create_dirs(artifact_manager=config)\n dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO /\n PACKS_DIR / 'Sample01'))\n assert same_folders(src1=temp / 'content_packs' / 'Sample01', src2=\n ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' /\n 'Sample01')\n\n\n<mask token>\n\n\[email protected](argnames='suffix', argvalues=['yml', 'json'])\ndef test_malformed_file_failue(suffix: str, mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n with destroy_by_ext(suffix):\n exit_code = create_content_artifacts(artifact_manager=config)\n assert exit_code == 1\n\n\ndef test_duplicate_file_failure(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n with duplicate_file():\n exit_code = create_content_artifacts(artifact_manager=config)\n assert exit_code == 1\n",
"step-3": "<mask token>\n\n\ndef same_folders(src1, src2):\n \"\"\"Assert if folder contains diffrent files\"\"\"\n dcmp = dircmp(src1, src2)\n if dcmp.left_only or dcmp.right_only:\n return False\n for sub_dcmp in dcmp.subdirs.values():\n same_folders(sub_dcmp.left, sub_dcmp.right)\n return True\n\n\n@contextmanager\ndef destroy_by_ext(suffix: str):\n \"\"\"Modify file content to invalid by file extention - json/yaml.\n\n Open:\n - Choose file by file extention.\n - Modify file content to not valid.\n\n Close:\n - Modify content to the original state.\n \"\"\"\n if suffix == 'json':\n file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'Classifiers' /\n 'classifier-sample_new.json')\n else:\n file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'TestPlaybooks' /\n 'playbook-sample_test1.yml')\n old_data = file.read_text()\n file.write_text('{123dfdsf,}\\nfdsfdsf')\n try:\n yield\n finally:\n file.write_text(old_data)\n\n\n<mask token>\n\n\[email protected]()\ndef mock_git(mocker):\n \"\"\"Mock git Repo object\"\"\"\n from demisto_sdk.commands.common.content import Content\n mocker.patch.object(Content, 'git')\n Content.git().working_tree_dir = TEST_CONTENT_REPO\n yield\n\n\[email protected]()\ndef private_repo():\n \"\"\"Create Temp private repo structure from original content structure.\n\n Open:\n - Create a copy of regular content.\n - Delete - content/TestPlaybooks dir.\n\n Close:\n - Delete private content folder.\n \"\"\"\n try:\n copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)\n test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR\n rmtree(test_playbook_dir)\n yield TEST_PRIVATE_CONTENT_REPO\n finally:\n rmtree(TEST_PRIVATE_CONTENT_REPO)\n\n\ndef test_modify_common_server_constants():\n \"\"\" Modify global variables in CommonServerPython.py\n\n When: CommonServerPython.py contains:\n - Global variable - CONTENT_RELEASE_VERSION = '0.0.0'\n - Global variable - CONTENT_BRANCH_NAME = ''\n\n Given: Parameters:\n - Content version x.x.x\n - Active branch - xxxx\n\n Then: CommonServerPython.py changes:\n - Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'\n - Global variable - CONTENT_BRANCH_NAME = 'xxxx'\n\n Notes:\n - After test clean up changes.\n \"\"\"\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import modify_common_server_constants\n path_before = COMMON_SERVER / 'CommonServerPython.py'\n path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'\n old_data = path_before.read_text()\n modify_common_server_constants(path_before, '6.0.0', 'test')\n assert cmp(path_before, path_excepted)\n path_before.write_text(old_data)\n\n\ndef test_dump_pack(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, Pack, create_dirs, dump_pack\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n create_dirs(artifact_manager=config)\n dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO /\n PACKS_DIR / 'Sample01'))\n assert same_folders(src1=temp / 'content_packs' / 'Sample01', src2=\n ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' /\n 'Sample01')\n\n\ndef test_create_content_artifacts(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n exit_code = create_content_artifacts(artifact_manager=config)\n assert exit_code == 0\n assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'content')\n\n\n<mask token>\n\n\[email protected](argnames='suffix', argvalues=['yml', 'json'])\ndef test_malformed_file_failue(suffix: str, mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n with destroy_by_ext(suffix):\n exit_code = create_content_artifacts(artifact_manager=config)\n assert exit_code == 1\n\n\ndef test_duplicate_file_failure(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n with duplicate_file():\n exit_code = create_content_artifacts(artifact_manager=config)\n assert exit_code == 1\n",
"step-4": "<mask token>\n\n\ndef same_folders(src1, src2):\n \"\"\"Assert if folder contains diffrent files\"\"\"\n dcmp = dircmp(src1, src2)\n if dcmp.left_only or dcmp.right_only:\n return False\n for sub_dcmp in dcmp.subdirs.values():\n same_folders(sub_dcmp.left, sub_dcmp.right)\n return True\n\n\n@contextmanager\ndef destroy_by_ext(suffix: str):\n \"\"\"Modify file content to invalid by file extention - json/yaml.\n\n Open:\n - Choose file by file extention.\n - Modify file content to not valid.\n\n Close:\n - Modify content to the original state.\n \"\"\"\n if suffix == 'json':\n file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'Classifiers' /\n 'classifier-sample_new.json')\n else:\n file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'TestPlaybooks' /\n 'playbook-sample_test1.yml')\n old_data = file.read_text()\n file.write_text('{123dfdsf,}\\nfdsfdsf')\n try:\n yield\n finally:\n file.write_text(old_data)\n\n\n@contextmanager\ndef duplicate_file():\n \"\"\"Create duplicate file name in content repository.\n\n Open:\n - Create copy of file in content.\n\n Close:\n - Delete copied file.\n \"\"\"\n file = (TEST_CONTENT_REPO / PACKS_DIR / 'Sample01' / TEST_PLAYBOOKS_DIR /\n 'playbook-sample_test1.yml')\n new_file = (TEST_CONTENT_REPO / PACKS_DIR / 'Sample02' /\n TEST_PLAYBOOKS_DIR / 'playbook-sample_test1.yml')\n try:\n copyfile(file, new_file)\n yield\n finally:\n new_file.unlink()\n\n\n@contextmanager\ndef temp_dir():\n \"\"\"Create Temp direcotry for test.\n\n Open:\n - Create temp directory.\n\n Close:\n - Delete temp directory.\n \"\"\"\n temp = UNIT_TEST_DATA / 'temp'\n try:\n temp.mkdir(parents=True, exist_ok=True)\n yield temp\n finally:\n rmtree(temp)\n\n\[email protected]()\ndef mock_git(mocker):\n \"\"\"Mock git Repo object\"\"\"\n from demisto_sdk.commands.common.content import Content\n mocker.patch.object(Content, 'git')\n Content.git().working_tree_dir = TEST_CONTENT_REPO\n yield\n\n\[email protected]()\ndef private_repo():\n \"\"\"Create Temp private repo structure from original content structure.\n\n Open:\n - Create a copy of regular content.\n - Delete - content/TestPlaybooks dir.\n\n Close:\n - Delete private content folder.\n \"\"\"\n try:\n copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)\n test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR\n rmtree(test_playbook_dir)\n yield TEST_PRIVATE_CONTENT_REPO\n finally:\n rmtree(TEST_PRIVATE_CONTENT_REPO)\n\n\ndef test_modify_common_server_constants():\n \"\"\" Modify global variables in CommonServerPython.py\n\n When: CommonServerPython.py contains:\n - Global variable - CONTENT_RELEASE_VERSION = '0.0.0'\n - Global variable - CONTENT_BRANCH_NAME = ''\n\n Given: Parameters:\n - Content version x.x.x\n - Active branch - xxxx\n\n Then: CommonServerPython.py changes:\n - Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'\n - Global variable - CONTENT_BRANCH_NAME = 'xxxx'\n\n Notes:\n - After test clean up changes.\n \"\"\"\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import modify_common_server_constants\n path_before = COMMON_SERVER / 'CommonServerPython.py'\n path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'\n old_data = path_before.read_text()\n modify_common_server_constants(path_before, '6.0.0', 'test')\n assert cmp(path_before, path_excepted)\n path_before.write_text(old_data)\n\n\ndef test_dump_pack(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, Pack, create_dirs, dump_pack\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n create_dirs(artifact_manager=config)\n dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO /\n PACKS_DIR / 'Sample01'))\n assert same_folders(src1=temp / 'content_packs' / 'Sample01', src2=\n ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' /\n 'Sample01')\n\n\ndef test_create_content_artifacts(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n exit_code = create_content_artifacts(artifact_manager=config)\n assert exit_code == 0\n assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'content')\n\n\ndef test_create_private_content_artifacts(private_repo):\n from demisto_sdk.commands.common.content import Content\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n config.content = Content(private_repo)\n exit_code = create_content_artifacts(artifact_manager=config)\n assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'private')\n assert exit_code == 0\n\n\[email protected](argnames='suffix', argvalues=['yml', 'json'])\ndef test_malformed_file_failue(suffix: str, mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n with destroy_by_ext(suffix):\n exit_code = create_content_artifacts(artifact_manager=config)\n assert exit_code == 1\n\n\ndef test_duplicate_file_failure(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp, content_version=\n '6.0.0', zip=False, suffix='', cpus=1, packs=False)\n with duplicate_file():\n exit_code = create_content_artifacts(artifact_manager=config)\n assert exit_code == 1\n",
"step-5": "from contextlib import contextmanager\nfrom filecmp import cmp, dircmp\nfrom shutil import copyfile, copytree, rmtree\n\nimport pytest\nfrom demisto_sdk.commands.common.constants import PACKS_DIR, TEST_PLAYBOOKS_DIR\nfrom demisto_sdk.commands.common.tools import src_root\n\nTEST_DATA = src_root() / 'tests' / 'test_files'\nTEST_CONTENT_REPO = TEST_DATA / 'content_slim'\nTEST_PRIVATE_CONTENT_REPO = TEST_DATA / 'private_content_slim'\nUNIT_TEST_DATA = (src_root() / 'commands' / 'create_artifacts' / 'tests' / 'data')\nCOMMON_SERVER = UNIT_TEST_DATA / 'common_server'\nARTIFACTS_EXPEXTED_RESULTS = TEST_DATA / 'artifacts'\n\n\ndef same_folders(src1, src2):\n \"\"\"Assert if folder contains diffrent files\"\"\"\n dcmp = dircmp(src1, src2)\n if dcmp.left_only or dcmp.right_only:\n return False\n for sub_dcmp in dcmp.subdirs.values():\n same_folders(sub_dcmp.left, sub_dcmp.right)\n\n return True\n\n\n@contextmanager\ndef destroy_by_ext(suffix: str):\n \"\"\"Modify file content to invalid by file extention - json/yaml.\n\n Open:\n - Choose file by file extention.\n - Modify file content to not valid.\n\n Close:\n - Modify content to the original state.\n \"\"\"\n if suffix == 'json':\n file = TEST_CONTENT_REPO / \"Packs\" / \"Sample01\" / \"Classifiers\" / \"classifier-sample_new.json\"\n else:\n file = TEST_CONTENT_REPO / \"Packs\" / \"Sample01\" / \"TestPlaybooks\" / \"playbook-sample_test1.yml\"\n old_data = file.read_text()\n file.write_text(\"{123dfdsf,}\\nfdsfdsf\")\n\n try:\n yield\n finally:\n file.write_text(old_data)\n\n\n@contextmanager\ndef duplicate_file():\n \"\"\"Create duplicate file name in content repository.\n\n Open:\n - Create copy of file in content.\n\n Close:\n - Delete copied file.\n \"\"\"\n file = TEST_CONTENT_REPO / PACKS_DIR / \"Sample01\" / TEST_PLAYBOOKS_DIR / \"playbook-sample_test1.yml\"\n new_file = TEST_CONTENT_REPO / PACKS_DIR / \"Sample02\" / TEST_PLAYBOOKS_DIR / \"playbook-sample_test1.yml\"\n try:\n copyfile(file, new_file)\n yield\n finally:\n new_file.unlink()\n\n\n@contextmanager\ndef temp_dir():\n \"\"\"Create Temp direcotry for test.\n\n Open:\n - Create temp directory.\n\n Close:\n - Delete temp directory.\n \"\"\"\n temp = UNIT_TEST_DATA / 'temp'\n try:\n temp.mkdir(parents=True, exist_ok=True)\n yield temp\n finally:\n rmtree(temp)\n\n\[email protected]()\ndef mock_git(mocker):\n \"\"\"Mock git Repo object\"\"\"\n from demisto_sdk.commands.common.content import Content\n\n # Mock git working directory\n mocker.patch.object(Content, 'git')\n Content.git().working_tree_dir = TEST_CONTENT_REPO\n yield\n\n\[email protected]()\ndef private_repo():\n \"\"\"Create Temp private repo structure from original content structure.\n\n Open:\n - Create a copy of regular content.\n - Delete - content/TestPlaybooks dir.\n\n Close:\n - Delete private content folder.\n \"\"\"\n try:\n copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)\n test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR\n rmtree(test_playbook_dir)\n yield TEST_PRIVATE_CONTENT_REPO\n finally:\n rmtree(TEST_PRIVATE_CONTENT_REPO)\n\n\ndef test_modify_common_server_constants():\n \"\"\" Modify global variables in CommonServerPython.py\n\n When: CommonServerPython.py contains:\n - Global variable - CONTENT_RELEASE_VERSION = '0.0.0'\n - Global variable - CONTENT_BRANCH_NAME = ''\n\n Given: Parameters:\n - Content version x.x.x\n - Active branch - xxxx\n\n Then: CommonServerPython.py changes:\n - Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'\n - Global variable - CONTENT_BRANCH_NAME = 'xxxx'\n\n Notes:\n - After test clean up changes.\n \"\"\"\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import \\\n modify_common_server_constants\n path_before = COMMON_SERVER / 'CommonServerPython.py'\n path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'\n old_data = path_before.read_text()\n modify_common_server_constants(path_before, '6.0.0', 'test')\n\n assert cmp(path_before, path_excepted)\n\n path_before.write_text(old_data)\n\n\ndef test_dump_pack(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (\n ArtifactsManager, Pack, create_dirs, dump_pack)\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp,\n content_version='6.0.0',\n zip=False,\n suffix='',\n cpus=1,\n packs=False)\n\n create_dirs(artifact_manager=config)\n dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO / PACKS_DIR / 'Sample01'))\n\n assert same_folders(src1=temp / 'content_packs' / 'Sample01',\n src2=ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' / 'Sample01')\n\n\ndef test_create_content_artifacts(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (\n ArtifactsManager, create_content_artifacts)\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp,\n content_version='6.0.0',\n zip=False,\n suffix='',\n cpus=1,\n packs=False)\n exit_code = create_content_artifacts(artifact_manager=config)\n\n assert exit_code == 0\n assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'content')\n\n\ndef test_create_private_content_artifacts(private_repo):\n from demisto_sdk.commands.common.content import Content\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (\n ArtifactsManager, create_content_artifacts)\n\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp,\n content_version='6.0.0',\n zip=False,\n suffix='',\n cpus=1,\n packs=False)\n config.content = Content(private_repo)\n exit_code = create_content_artifacts(artifact_manager=config)\n\n assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'private')\n assert exit_code == 0\n\n\[email protected](argnames=\"suffix\", argvalues=[\"yml\", \"json\"])\ndef test_malformed_file_failue(suffix: str, mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (\n ArtifactsManager, create_content_artifacts)\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp,\n content_version='6.0.0',\n zip=False,\n suffix='',\n cpus=1,\n packs=False)\n\n with destroy_by_ext(suffix):\n exit_code = create_content_artifacts(artifact_manager=config)\n\n assert exit_code == 1\n\n\ndef test_duplicate_file_failure(mock_git):\n from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (\n ArtifactsManager, create_content_artifacts)\n with temp_dir() as temp:\n config = ArtifactsManager(artifacts_path=temp,\n content_version='6.0.0',\n zip=False,\n suffix='',\n cpus=1,\n packs=False)\n\n with duplicate_file():\n exit_code = create_content_artifacts(artifact_manager=config)\n\n assert exit_code == 1\n",
"step-ids": [
6,
8,
9,
12,
15
]
}
|
[
6,
8,
9,
12,
15
] |
<|reserved_special_token_0|>
class AllFieldsAdmin(admin.ModelAdmin):
<|reserved_special_token_0|>
def __init__(self, model, admin_site):
self.list_display = [field.name for field in model._meta.fields if
field.name not in ['id']]
super(AllFieldsAdmin, self).__init__(model, admin_site)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AllFieldsAdmin(admin.ModelAdmin):
"""
A model admin that displays all field in admin excpet Many to many and pk field
"""
def __init__(self, model, admin_site):
self.list_display = [field.name for field in model._meta.fields if
field.name not in ['id']]
super(AllFieldsAdmin, self).__init__(model, admin_site)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AllFieldsAdmin(admin.ModelAdmin):
"""
A model admin that displays all field in admin excpet Many to many and pk field
"""
def __init__(self, model, admin_site):
self.list_display = [field.name for field in model._meta.fields if
field.name not in ['id']]
super(AllFieldsAdmin, self).__init__(model, admin_site)
admin.site.register(FbAuth)
<|reserved_special_token_1|>
from django.contrib import admin
from registration.models import FbAuth
class AllFieldsAdmin(admin.ModelAdmin):
"""
A model admin that displays all field in admin excpet Many to many and pk field
"""
def __init__(self, model, admin_site):
self.list_display = [field.name for field in model._meta.fields if
field.name not in ['id']]
super(AllFieldsAdmin, self).__init__(model, admin_site)
admin.site.register(FbAuth)
<|reserved_special_token_1|>
from django.contrib import admin
# Register your models here.
from registration.models import FbAuth
class AllFieldsAdmin(admin.ModelAdmin):
"""
A model admin that displays all field in admin excpet Many to many and pk field
"""
def __init__(self, model, admin_site):
self.list_display = [field.name for field in model._meta.fields
if field.name not in ["id"]]
super(AllFieldsAdmin, self).__init__(model, admin_site)
admin.site.register(FbAuth)
|
flexible
|
{
"blob_id": "821afa85eb783b4bf1018800f598a3294c4cbcfb",
"index": 9532,
"step-1": "<mask token>\n\n\nclass AllFieldsAdmin(admin.ModelAdmin):\n <mask token>\n\n def __init__(self, model, admin_site):\n self.list_display = [field.name for field in model._meta.fields if \n field.name not in ['id']]\n super(AllFieldsAdmin, self).__init__(model, admin_site)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AllFieldsAdmin(admin.ModelAdmin):\n \"\"\"\n A model admin that displays all field in admin excpet Many to many and pk field\n \"\"\"\n\n def __init__(self, model, admin_site):\n self.list_display = [field.name for field in model._meta.fields if \n field.name not in ['id']]\n super(AllFieldsAdmin, self).__init__(model, admin_site)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AllFieldsAdmin(admin.ModelAdmin):\n \"\"\"\n A model admin that displays all field in admin excpet Many to many and pk field\n \"\"\"\n\n def __init__(self, model, admin_site):\n self.list_display = [field.name for field in model._meta.fields if \n field.name not in ['id']]\n super(AllFieldsAdmin, self).__init__(model, admin_site)\n\n\nadmin.site.register(FbAuth)\n",
"step-4": "from django.contrib import admin\nfrom registration.models import FbAuth\n\n\nclass AllFieldsAdmin(admin.ModelAdmin):\n \"\"\"\n A model admin that displays all field in admin excpet Many to many and pk field\n \"\"\"\n\n def __init__(self, model, admin_site):\n self.list_display = [field.name for field in model._meta.fields if \n field.name not in ['id']]\n super(AllFieldsAdmin, self).__init__(model, admin_site)\n\n\nadmin.site.register(FbAuth)\n",
"step-5": "from django.contrib import admin\n\n# Register your models here.\nfrom registration.models import FbAuth\n\n\nclass AllFieldsAdmin(admin.ModelAdmin):\n\n \"\"\"\n A model admin that displays all field in admin excpet Many to many and pk field\n \"\"\"\n\n def __init__(self, model, admin_site):\n self.list_display = [field.name for field in model._meta.fields\n if field.name not in [\"id\"]]\n super(AllFieldsAdmin, self).__init__(model, admin_site)\n\nadmin.site.register(FbAuth)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from integral_image import calc_integral_image
class Region:
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def calc_feature(self, cumul_sum):
yy = self.y + self.height
xx = self.x + self.width
return cumul_sum[yy][xx] - cumul_sum[yy][x] - cumul_sum[y][xx
] + cumul_sum[y][x]
|
normal
|
{
"blob_id": "03e92eae4edb4bdbe9fa73e39e7d5f7669746fe5",
"index": 3859,
"step-1": "<mask token>\n\n\nclass Region:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Region:\n <mask token>\n\n def calc_feature(self, cumul_sum):\n yy = self.y + self.height\n xx = self.x + self.width\n return cumul_sum[yy][xx] - cumul_sum[yy][x] - cumul_sum[y][xx\n ] + cumul_sum[y][x]\n",
"step-3": "<mask token>\n\n\nclass Region:\n\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n\n def calc_feature(self, cumul_sum):\n yy = self.y + self.height\n xx = self.x + self.width\n return cumul_sum[yy][xx] - cumul_sum[yy][x] - cumul_sum[y][xx\n ] + cumul_sum[y][x]\n",
"step-4": "from integral_image import calc_integral_image\n\n\nclass Region:\n\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n\n def calc_feature(self, cumul_sum):\n yy = self.y + self.height\n xx = self.x + self.width\n return cumul_sum[yy][xx] - cumul_sum[yy][x] - cumul_sum[y][xx\n ] + cumul_sum[y][x]\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
try:
from LoggerPlugin import LoggerPlugin
except ImportError:
from RTOC.LoggerPlugin import LoggerPlugin
from .holdPeak_VC820.vc820py.vc820 import MultimeterMessage
import serial
import sys
import traceback
from PyQt5 import uic
from PyQt5 import QtWidgets
import logging as log
log.basicConfig(level=log.INFO)
logging = log.getLogger(__name__)
devicename = "HoldPeak"
default_device = 'COM7'
SERIAL_BAUDRATE = 2400
SERIAL_BYTESIZE = 8
SERIAL_TIMEOUT = 1
SAMPLERATE = 1
class Plugin(LoggerPlugin):
"""
Zeichnet die Messdaten eines HoldPeak VC820 Multimeters auf
"""
def __init__(self, *args, **kwargs):
# Plugin setup
super(Plugin, self).__init__(*args, **kwargs)
self.setDeviceName(devicename)
self.smallGUI = True
self._last_value = 0
self._jump_allowed = True
# Data-logger thread
self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)
# self.updater.start()
def __openPort(self, portname=default_device):
# Communication setup
#self.portname = "/dev/ttyUSB0"
#self.portname = "COM7"
self.portname = portname
#################################################################################
# os.system("sudo chmod a+rw /dev/ttyUSB0")
# #######
# uncomment this line if you do not set device rules:
# > sudo nano /etc/udev/rules.d/50-myusb.rules
# > * SUBSYSTEMS=="usb", ATTRS{idVendor}=="067b", ATTRS{idProduct}=="2303", GROUP="users", MODE="0666"
# > [Strg+O, Strg+X]
# > sudo udevadm control --reload
# Ref: http://ask.xmodulo.com/change-usb-device-permission-linux.html
#################################################################################
try:
self._serial_port = serial.Serial(
self.portname, baudrate=SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE, timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)
# dtr and rts settings required for adapter
self._serial_port.dtr = True
self._serial_port.rts = False
# -------------
return True
except Exception:
tb = traceback.format_exc()
logging.debug(tb)
return False
# THIS IS YOUR THREAD
def _updateT(self):
valid, value, unit = self._get_data()
if unit == "V":
datanames = ["Spannung"]
elif unit == "A":
datanames = ["Strom"]
elif unit == "Ohm":
datanames = ["Widerstand"]
elif unit == "°C":
datanames = ["Temperatur"]
elif unit == "F":
datanames = ["Kapazität"]
elif unit == "Hz":
datanames = ["Frequenz"]
else:
datanames = [unit]
if valid:
if abs(self._last_value-value) >= 2 and not self._jump_allowed:
self._jump_allowed = True
else:
self.stream(y=[value], snames=datanames, unit=unit)
self._jump_allowed = False
self._last_value = value
def loadGUI(self):
self.widget = QtWidgets.QWidget()
packagedir = self.getDir(__file__)
uic.loadUi(packagedir+"/holdPeak_VC820/portSelectWidget.ui", self.widget)
# self.setCallbacks()
self.widget.pushButton.clicked.connect(self.__openPortCallback)
self.__openPortCallback()
return self.widget
def __openPortCallback(self):
if self.run:
self.cancel()
self.widget.pushButton.setText("Verbinden")
else:
port = self.widget.comboBox.currentText()
if self.__openPort(port):
self.start()
self.widget.pushButton.setText("Beenden")
else:
self.cancel()
self.widget.pushButton.setText("Fehler")
def _get_data(self):
test = self._serial_port.read(1)
if len(test) != 1:
logging.error("recieved incomplete data, skipping...", file=sys.stderr)
return False, None, None
if MultimeterMessage.check_first_byte(test[0]):
data = test + self._serial_port.read(MultimeterMessage.MESSAGE_LENGTH-1)
else:
logging.error("received incorrect data (%s), skipping..." % test.hex(), file=sys.stderr)
return False, None, None
if len(data) != MultimeterMessage.MESSAGE_LENGTH:
logging.error("received incomplete message (%s), skipping..." %
data.hex(), file=sys.stderr)
return False, None, None
try:
message = MultimeterMessage(data)
#message.value = message.get_base_reading()
except ValueError as e:
logging.debug(e)
logging.error("Error decoding: %s on message %s" % (str(e), data.hex()))
return False, None, None
# logging.debug(str(message))
# return True, message.value, message.unit
return True, round(message.value*message.multiplier, 10), message.base_unit
if __name__ == "__main__":
standalone = Plugin()
standalone.setup()
|
normal
|
{
"blob_id": "c3efaeab600ec9a7a9fffdfad5c9dc1faad8fee7",
"index": 726,
"step-1": "<mask token>\n\n\nclass Plugin(LoggerPlugin):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(Plugin, self).__init__(*args, **kwargs)\n self.setDeviceName(devicename)\n self.smallGUI = True\n self._last_value = 0\n self._jump_allowed = True\n self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)\n\n def __openPort(self, portname=default_device):\n self.portname = portname\n try:\n self._serial_port = serial.Serial(self.portname, baudrate=\n SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,\n timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)\n self._serial_port.dtr = True\n self._serial_port.rts = False\n return True\n except Exception:\n tb = traceback.format_exc()\n logging.debug(tb)\n return False\n\n def _updateT(self):\n valid, value, unit = self._get_data()\n if unit == 'V':\n datanames = ['Spannung']\n elif unit == 'A':\n datanames = ['Strom']\n elif unit == 'Ohm':\n datanames = ['Widerstand']\n elif unit == '°C':\n datanames = ['Temperatur']\n elif unit == 'F':\n datanames = ['Kapazität']\n elif unit == 'Hz':\n datanames = ['Frequenz']\n else:\n datanames = [unit]\n if valid:\n if abs(self._last_value - value) >= 2 and not self._jump_allowed:\n self._jump_allowed = True\n else:\n self.stream(y=[value], snames=datanames, unit=unit)\n self._jump_allowed = False\n self._last_value = value\n\n def loadGUI(self):\n self.widget = QtWidgets.QWidget()\n packagedir = self.getDir(__file__)\n uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self\n .widget)\n self.widget.pushButton.clicked.connect(self.__openPortCallback)\n self.__openPortCallback()\n return self.widget\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Plugin(LoggerPlugin):\n \"\"\"\nZeichnet die Messdaten eines HoldPeak VC820 Multimeters auf\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Plugin, self).__init__(*args, **kwargs)\n self.setDeviceName(devicename)\n self.smallGUI = True\n self._last_value = 0\n self._jump_allowed = True\n self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)\n\n def __openPort(self, portname=default_device):\n self.portname = portname\n try:\n self._serial_port = serial.Serial(self.portname, baudrate=\n SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,\n timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)\n self._serial_port.dtr = True\n self._serial_port.rts = False\n return True\n except Exception:\n tb = traceback.format_exc()\n logging.debug(tb)\n return False\n\n def _updateT(self):\n valid, value, unit = self._get_data()\n if unit == 'V':\n datanames = ['Spannung']\n elif unit == 'A':\n datanames = ['Strom']\n elif unit == 'Ohm':\n datanames = ['Widerstand']\n elif unit == '°C':\n datanames = ['Temperatur']\n elif unit == 'F':\n datanames = ['Kapazität']\n elif unit == 'Hz':\n datanames = ['Frequenz']\n else:\n datanames = [unit]\n if valid:\n if abs(self._last_value - value) >= 2 and not self._jump_allowed:\n self._jump_allowed = True\n else:\n self.stream(y=[value], snames=datanames, unit=unit)\n self._jump_allowed = False\n self._last_value = value\n\n def loadGUI(self):\n self.widget = QtWidgets.QWidget()\n packagedir = self.getDir(__file__)\n uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self\n .widget)\n self.widget.pushButton.clicked.connect(self.__openPortCallback)\n self.__openPortCallback()\n return self.widget\n\n def __openPortCallback(self):\n if self.run:\n self.cancel()\n self.widget.pushButton.setText('Verbinden')\n else:\n port = self.widget.comboBox.currentText()\n if self.__openPort(port):\n self.start()\n self.widget.pushButton.setText('Beenden')\n else:\n self.cancel()\n self.widget.pushButton.setText('Fehler')\n\n def _get_data(self):\n test = self._serial_port.read(1)\n if len(test) != 1:\n logging.error('recieved incomplete data, skipping...', file=sys\n .stderr)\n return False, None, None\n if MultimeterMessage.check_first_byte(test[0]):\n data = test + self._serial_port.read(MultimeterMessage.\n MESSAGE_LENGTH - 1)\n else:\n logging.error('received incorrect data (%s), skipping...' %\n test.hex(), file=sys.stderr)\n return False, None, None\n if len(data) != MultimeterMessage.MESSAGE_LENGTH:\n logging.error('received incomplete message (%s), skipping...' %\n data.hex(), file=sys.stderr)\n return False, None, None\n try:\n message = MultimeterMessage(data)\n except ValueError as e:\n logging.debug(e)\n logging.error('Error decoding: %s on message %s' % (str(e),\n data.hex()))\n return False, None, None\n return True, round(message.value * message.multiplier, 10\n ), message.base_unit\n\n\n<mask token>\n",
"step-3": "try:\n from LoggerPlugin import LoggerPlugin\nexcept ImportError:\n from RTOC.LoggerPlugin import LoggerPlugin\n<mask token>\nlog.basicConfig(level=log.INFO)\nlogging = log.getLogger(__name__)\ndevicename = 'HoldPeak'\ndefault_device = 'COM7'\nSERIAL_BAUDRATE = 2400\nSERIAL_BYTESIZE = 8\nSERIAL_TIMEOUT = 1\nSAMPLERATE = 1\n\n\nclass Plugin(LoggerPlugin):\n \"\"\"\nZeichnet die Messdaten eines HoldPeak VC820 Multimeters auf\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Plugin, self).__init__(*args, **kwargs)\n self.setDeviceName(devicename)\n self.smallGUI = True\n self._last_value = 0\n self._jump_allowed = True\n self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)\n\n def __openPort(self, portname=default_device):\n self.portname = portname\n try:\n self._serial_port = serial.Serial(self.portname, baudrate=\n SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,\n timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)\n self._serial_port.dtr = True\n self._serial_port.rts = False\n return True\n except Exception:\n tb = traceback.format_exc()\n logging.debug(tb)\n return False\n\n def _updateT(self):\n valid, value, unit = self._get_data()\n if unit == 'V':\n datanames = ['Spannung']\n elif unit == 'A':\n datanames = ['Strom']\n elif unit == 'Ohm':\n datanames = ['Widerstand']\n elif unit == '°C':\n datanames = ['Temperatur']\n elif unit == 'F':\n datanames = ['Kapazität']\n elif unit == 'Hz':\n datanames = ['Frequenz']\n else:\n datanames = [unit]\n if valid:\n if abs(self._last_value - value) >= 2 and not self._jump_allowed:\n self._jump_allowed = True\n else:\n self.stream(y=[value], snames=datanames, unit=unit)\n self._jump_allowed = False\n self._last_value = value\n\n def loadGUI(self):\n self.widget = QtWidgets.QWidget()\n packagedir = self.getDir(__file__)\n uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self\n .widget)\n self.widget.pushButton.clicked.connect(self.__openPortCallback)\n self.__openPortCallback()\n return self.widget\n\n def __openPortCallback(self):\n if self.run:\n self.cancel()\n self.widget.pushButton.setText('Verbinden')\n else:\n port = self.widget.comboBox.currentText()\n if self.__openPort(port):\n self.start()\n self.widget.pushButton.setText('Beenden')\n else:\n self.cancel()\n self.widget.pushButton.setText('Fehler')\n\n def _get_data(self):\n test = self._serial_port.read(1)\n if len(test) != 1:\n logging.error('recieved incomplete data, skipping...', file=sys\n .stderr)\n return False, None, None\n if MultimeterMessage.check_first_byte(test[0]):\n data = test + self._serial_port.read(MultimeterMessage.\n MESSAGE_LENGTH - 1)\n else:\n logging.error('received incorrect data (%s), skipping...' %\n test.hex(), file=sys.stderr)\n return False, None, None\n if len(data) != MultimeterMessage.MESSAGE_LENGTH:\n logging.error('received incomplete message (%s), skipping...' %\n data.hex(), file=sys.stderr)\n return False, None, None\n try:\n message = MultimeterMessage(data)\n except ValueError as e:\n logging.debug(e)\n logging.error('Error decoding: %s on message %s' % (str(e),\n data.hex()))\n return False, None, None\n return True, round(message.value * message.multiplier, 10\n ), message.base_unit\n\n\nif __name__ == '__main__':\n standalone = Plugin()\n standalone.setup()\n",
"step-4": "try:\n from LoggerPlugin import LoggerPlugin\nexcept ImportError:\n from RTOC.LoggerPlugin import LoggerPlugin\nfrom .holdPeak_VC820.vc820py.vc820 import MultimeterMessage\nimport serial\nimport sys\nimport traceback\nfrom PyQt5 import uic\nfrom PyQt5 import QtWidgets\nimport logging as log\nlog.basicConfig(level=log.INFO)\nlogging = log.getLogger(__name__)\ndevicename = 'HoldPeak'\ndefault_device = 'COM7'\nSERIAL_BAUDRATE = 2400\nSERIAL_BYTESIZE = 8\nSERIAL_TIMEOUT = 1\nSAMPLERATE = 1\n\n\nclass Plugin(LoggerPlugin):\n \"\"\"\nZeichnet die Messdaten eines HoldPeak VC820 Multimeters auf\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Plugin, self).__init__(*args, **kwargs)\n self.setDeviceName(devicename)\n self.smallGUI = True\n self._last_value = 0\n self._jump_allowed = True\n self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)\n\n def __openPort(self, portname=default_device):\n self.portname = portname\n try:\n self._serial_port = serial.Serial(self.portname, baudrate=\n SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE,\n timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)\n self._serial_port.dtr = True\n self._serial_port.rts = False\n return True\n except Exception:\n tb = traceback.format_exc()\n logging.debug(tb)\n return False\n\n def _updateT(self):\n valid, value, unit = self._get_data()\n if unit == 'V':\n datanames = ['Spannung']\n elif unit == 'A':\n datanames = ['Strom']\n elif unit == 'Ohm':\n datanames = ['Widerstand']\n elif unit == '°C':\n datanames = ['Temperatur']\n elif unit == 'F':\n datanames = ['Kapazität']\n elif unit == 'Hz':\n datanames = ['Frequenz']\n else:\n datanames = [unit]\n if valid:\n if abs(self._last_value - value) >= 2 and not self._jump_allowed:\n self._jump_allowed = True\n else:\n self.stream(y=[value], snames=datanames, unit=unit)\n self._jump_allowed = False\n self._last_value = value\n\n def loadGUI(self):\n self.widget = QtWidgets.QWidget()\n packagedir = self.getDir(__file__)\n uic.loadUi(packagedir + '/holdPeak_VC820/portSelectWidget.ui', self\n .widget)\n self.widget.pushButton.clicked.connect(self.__openPortCallback)\n self.__openPortCallback()\n return self.widget\n\n def __openPortCallback(self):\n if self.run:\n self.cancel()\n self.widget.pushButton.setText('Verbinden')\n else:\n port = self.widget.comboBox.currentText()\n if self.__openPort(port):\n self.start()\n self.widget.pushButton.setText('Beenden')\n else:\n self.cancel()\n self.widget.pushButton.setText('Fehler')\n\n def _get_data(self):\n test = self._serial_port.read(1)\n if len(test) != 1:\n logging.error('recieved incomplete data, skipping...', file=sys\n .stderr)\n return False, None, None\n if MultimeterMessage.check_first_byte(test[0]):\n data = test + self._serial_port.read(MultimeterMessage.\n MESSAGE_LENGTH - 1)\n else:\n logging.error('received incorrect data (%s), skipping...' %\n test.hex(), file=sys.stderr)\n return False, None, None\n if len(data) != MultimeterMessage.MESSAGE_LENGTH:\n logging.error('received incomplete message (%s), skipping...' %\n data.hex(), file=sys.stderr)\n return False, None, None\n try:\n message = MultimeterMessage(data)\n except ValueError as e:\n logging.debug(e)\n logging.error('Error decoding: %s on message %s' % (str(e),\n data.hex()))\n return False, None, None\n return True, round(message.value * message.multiplier, 10\n ), message.base_unit\n\n\nif __name__ == '__main__':\n standalone = Plugin()\n standalone.setup()\n",
"step-5": "try:\n from LoggerPlugin import LoggerPlugin\nexcept ImportError:\n from RTOC.LoggerPlugin import LoggerPlugin\n\nfrom .holdPeak_VC820.vc820py.vc820 import MultimeterMessage\nimport serial\nimport sys\nimport traceback\n\nfrom PyQt5 import uic\nfrom PyQt5 import QtWidgets\nimport logging as log\nlog.basicConfig(level=log.INFO)\nlogging = log.getLogger(__name__)\n\ndevicename = \"HoldPeak\"\ndefault_device = 'COM7'\nSERIAL_BAUDRATE = 2400\nSERIAL_BYTESIZE = 8\nSERIAL_TIMEOUT = 1\nSAMPLERATE = 1\n\nclass Plugin(LoggerPlugin):\n \"\"\"\nZeichnet die Messdaten eines HoldPeak VC820 Multimeters auf\n \"\"\"\n def __init__(self, *args, **kwargs):\n # Plugin setup\n super(Plugin, self).__init__(*args, **kwargs)\n self.setDeviceName(devicename)\n self.smallGUI = True\n\n self._last_value = 0\n self._jump_allowed = True\n # Data-logger thread\n self.setPerpetualTimer(self._updateT, samplerate=SAMPLERATE)\n # self.updater.start()\n\n def __openPort(self, portname=default_device):\n # Communication setup\n #self.portname = \"/dev/ttyUSB0\"\n #self.portname = \"COM7\"\n self.portname = portname\n #################################################################################\n # os.system(\"sudo chmod a+rw /dev/ttyUSB0\")\n # #######\n # uncomment this line if you do not set device rules:\n # > sudo nano /etc/udev/rules.d/50-myusb.rules\n # > * SUBSYSTEMS==\"usb\", ATTRS{idVendor}==\"067b\", ATTRS{idProduct}==\"2303\", GROUP=\"users\", MODE=\"0666\"\n # > [Strg+O, Strg+X]\n # > sudo udevadm control --reload\n # Ref: http://ask.xmodulo.com/change-usb-device-permission-linux.html\n #################################################################################\n try:\n self._serial_port = serial.Serial(\n self.portname, baudrate=SERIAL_BAUDRATE, parity='N', bytesize=SERIAL_BYTESIZE, timeout=SERIAL_TIMEOUT, rtscts=1, dsrdtr=1)\n # dtr and rts settings required for adapter\n self._serial_port.dtr = True\n self._serial_port.rts = False\n # -------------\n return True\n except Exception:\n tb = traceback.format_exc()\n logging.debug(tb)\n return False\n\n # THIS IS YOUR THREAD\n def _updateT(self):\n valid, value, unit = self._get_data()\n if unit == \"V\":\n datanames = [\"Spannung\"]\n elif unit == \"A\":\n datanames = [\"Strom\"]\n elif unit == \"Ohm\":\n datanames = [\"Widerstand\"]\n elif unit == \"°C\":\n datanames = [\"Temperatur\"]\n elif unit == \"F\":\n datanames = [\"Kapazität\"]\n elif unit == \"Hz\":\n datanames = [\"Frequenz\"]\n else:\n datanames = [unit]\n if valid:\n if abs(self._last_value-value) >= 2 and not self._jump_allowed:\n self._jump_allowed = True\n else:\n self.stream(y=[value], snames=datanames, unit=unit)\n self._jump_allowed = False\n self._last_value = value\n\n def loadGUI(self):\n self.widget = QtWidgets.QWidget()\n packagedir = self.getDir(__file__)\n uic.loadUi(packagedir+\"/holdPeak_VC820/portSelectWidget.ui\", self.widget)\n # self.setCallbacks()\n self.widget.pushButton.clicked.connect(self.__openPortCallback)\n self.__openPortCallback()\n return self.widget\n\n def __openPortCallback(self):\n if self.run:\n self.cancel()\n self.widget.pushButton.setText(\"Verbinden\")\n else:\n port = self.widget.comboBox.currentText()\n if self.__openPort(port):\n self.start()\n self.widget.pushButton.setText(\"Beenden\")\n else:\n self.cancel()\n self.widget.pushButton.setText(\"Fehler\")\n\n def _get_data(self):\n test = self._serial_port.read(1)\n if len(test) != 1:\n logging.error(\"recieved incomplete data, skipping...\", file=sys.stderr)\n return False, None, None\n if MultimeterMessage.check_first_byte(test[0]):\n data = test + self._serial_port.read(MultimeterMessage.MESSAGE_LENGTH-1)\n else:\n logging.error(\"received incorrect data (%s), skipping...\" % test.hex(), file=sys.stderr)\n return False, None, None\n if len(data) != MultimeterMessage.MESSAGE_LENGTH:\n logging.error(\"received incomplete message (%s), skipping...\" %\n data.hex(), file=sys.stderr)\n return False, None, None\n try:\n message = MultimeterMessage(data)\n #message.value = message.get_base_reading()\n except ValueError as e:\n logging.debug(e)\n logging.error(\"Error decoding: %s on message %s\" % (str(e), data.hex()))\n return False, None, None\n # logging.debug(str(message))\n # return True, message.value, message.unit\n return True, round(message.value*message.multiplier, 10), message.base_unit\n\n\nif __name__ == \"__main__\":\n standalone = Plugin()\n standalone.setup()\n",
"step-ids": [
5,
8,
10,
11,
12
]
}
|
[
5,
8,
10,
11,
12
] |
from flask import Flask, request, redirect, url_for, render_template
from flask_modus import Modus
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config[
'SQLALCHEMY_DATABASE_URI'] = "postgres://localhost/flask_one_to_many"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
modus = Modus(app)
db = SQLAlchemy(app)
Migrate(app, db)
class Student(db.Model):
__tablename__ = "students"
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student',
lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = "excuses"
id = db.Column(db.Integer, primary_key = True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey("students.id"))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=["GET", "POST"])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'],
request.form['last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=["GET", "PATCH"])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route("/students/<int:id>/excuses", methods = ["GET", "POST"])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == "POST":
new_excuse = Excuse(content = request.form.get("content"), is_believable = request.form.get("is_believable"), student_id = id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for("excuses_index", id = id))
excuses_list = found_student.excuses.all()
return render_template("excuses/index.html", excuses=excuses_list, student= found_student)
@app.route("/students/<int:id>/excuses/new")
def new_excuse(id):
return render_template("/excuses/new.html", id = id)
@app.route("/students/<int:id>/excuses/<int:excuse_id>/edit", methods = ["GET", "PATCH","DELETE"])
def edit_excuse(id,excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id = found_student.id))
elif request.method == b"PATCH":
found_excuse.content = request.form.get("content")
found_excuse.is_believable = request.form.get("is_believable")
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for("excuses_index", id = found_student.id))
# return render_template("excuses/index.html",excuses = excuses_list, student = found_student)
return render_template("excuses/edit.html",excuse = found_excuse, student = found_student)
|
normal
|
{
"blob_id": "026e06e777d64f8724ec5e89a7829b3a42a25d6b",
"index": 800,
"step-1": "<mask token>\n\n\nclass Student(db.Model):\n __tablename__ = 'students'\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.Text)\n last_name = db.Column(db.Text)\n excuses = db.relationship('Excuse', backref='student', lazy='dynamic')\n\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\n\nclass Excuse(db.Model):\n __tablename__ = 'excuses'\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text)\n is_believable = db.Column(db.Text)\n student_id = db.Column(db.Integer, db.ForeignKey('students.id'))\n\n\[email protected]('/')\ndef root():\n return redirect(url_for('index'))\n\n\[email protected]('/students', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n new_student = Student(request.form['first_name'], request.form[\n 'last_name'])\n db.session.add(new_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/index.html', students=Student.query.all())\n\n\[email protected]('/students/new')\ndef new():\n return render_template('students/new.html')\n\n\[email protected]('/students/<int:id>/edit')\ndef edit(id):\n return render_template('students/edit.html', student=Student.query.get(id))\n\n\[email protected]('/students/<int:id>', methods=['GET', 'PATCH'])\ndef show(id):\n found_student = Student.query.get(id)\n if request.method == b'PATCH':\n found_student.first_name = request.form['first_name']\n found_student.last_name = request.form['last_name']\n db.session.add(found_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/show.html', student=found_student)\n\n\[email protected]('/students/<int:id>/excuses', methods=['GET', 'POST'])\ndef excuses_index(id):\n found_student = Student.query.get(id)\n if request.method == 'POST':\n new_excuse = Excuse(content=request.form.get('content'),\n is_believable=request.form.get('is_believable'), student_id=id)\n db.session.add(new_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=id))\n excuses_list = found_student.excuses.all()\n return render_template('excuses/index.html', excuses=excuses_list,\n student=found_student)\n\n\[email protected]('/students/<int:id>/excuses/new')\ndef new_excuse(id):\n return render_template('/excuses/new.html', id=id)\n\n\[email protected]('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[\n 'GET', 'PATCH', 'DELETE'])\ndef edit_excuse(id, excuse_id):\n print(id)\n found_student = Student.query.get(id)\n found_excuse = Excuse.query.get(excuse_id)\n excuses_list = found_student.excuses.all()\n if request.method == b'DELETE':\n db.session.delete(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n elif request.method == b'PATCH':\n found_excuse.content = request.form.get('content')\n found_excuse.is_believable = request.form.get('is_believable')\n db.session.add(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n return render_template('excuses/edit.html', excuse=found_excuse,\n student=found_student)\n",
"step-2": "<mask token>\nMigrate(app, db)\n\n\nclass Student(db.Model):\n __tablename__ = 'students'\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.Text)\n last_name = db.Column(db.Text)\n excuses = db.relationship('Excuse', backref='student', lazy='dynamic')\n\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\n\nclass Excuse(db.Model):\n __tablename__ = 'excuses'\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text)\n is_believable = db.Column(db.Text)\n student_id = db.Column(db.Integer, db.ForeignKey('students.id'))\n\n\[email protected]('/')\ndef root():\n return redirect(url_for('index'))\n\n\[email protected]('/students', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n new_student = Student(request.form['first_name'], request.form[\n 'last_name'])\n db.session.add(new_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/index.html', students=Student.query.all())\n\n\[email protected]('/students/new')\ndef new():\n return render_template('students/new.html')\n\n\[email protected]('/students/<int:id>/edit')\ndef edit(id):\n return render_template('students/edit.html', student=Student.query.get(id))\n\n\[email protected]('/students/<int:id>', methods=['GET', 'PATCH'])\ndef show(id):\n found_student = Student.query.get(id)\n if request.method == b'PATCH':\n found_student.first_name = request.form['first_name']\n found_student.last_name = request.form['last_name']\n db.session.add(found_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/show.html', student=found_student)\n\n\[email protected]('/students/<int:id>/excuses', methods=['GET', 'POST'])\ndef excuses_index(id):\n found_student = Student.query.get(id)\n if request.method == 'POST':\n new_excuse = Excuse(content=request.form.get('content'),\n is_believable=request.form.get('is_believable'), student_id=id)\n db.session.add(new_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=id))\n excuses_list = found_student.excuses.all()\n return render_template('excuses/index.html', excuses=excuses_list,\n student=found_student)\n\n\[email protected]('/students/<int:id>/excuses/new')\ndef new_excuse(id):\n return render_template('/excuses/new.html', id=id)\n\n\[email protected]('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[\n 'GET', 'PATCH', 'DELETE'])\ndef edit_excuse(id, excuse_id):\n print(id)\n found_student = Student.query.get(id)\n found_excuse = Excuse.query.get(excuse_id)\n excuses_list = found_student.excuses.all()\n if request.method == b'DELETE':\n db.session.delete(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n elif request.method == b'PATCH':\n found_excuse.content = request.form.get('content')\n found_excuse.is_believable = request.form.get('is_believable')\n db.session.add(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n return render_template('excuses/edit.html', excuse=found_excuse,\n student=found_student)\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'postgres://localhost/flask_one_to_many'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\nmodus = Modus(app)\ndb = SQLAlchemy(app)\nMigrate(app, db)\n\n\nclass Student(db.Model):\n __tablename__ = 'students'\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.Text)\n last_name = db.Column(db.Text)\n excuses = db.relationship('Excuse', backref='student', lazy='dynamic')\n\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\n\nclass Excuse(db.Model):\n __tablename__ = 'excuses'\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text)\n is_believable = db.Column(db.Text)\n student_id = db.Column(db.Integer, db.ForeignKey('students.id'))\n\n\[email protected]('/')\ndef root():\n return redirect(url_for('index'))\n\n\[email protected]('/students', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n new_student = Student(request.form['first_name'], request.form[\n 'last_name'])\n db.session.add(new_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/index.html', students=Student.query.all())\n\n\[email protected]('/students/new')\ndef new():\n return render_template('students/new.html')\n\n\[email protected]('/students/<int:id>/edit')\ndef edit(id):\n return render_template('students/edit.html', student=Student.query.get(id))\n\n\[email protected]('/students/<int:id>', methods=['GET', 'PATCH'])\ndef show(id):\n found_student = Student.query.get(id)\n if request.method == b'PATCH':\n found_student.first_name = request.form['first_name']\n found_student.last_name = request.form['last_name']\n db.session.add(found_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/show.html', student=found_student)\n\n\[email protected]('/students/<int:id>/excuses', methods=['GET', 'POST'])\ndef excuses_index(id):\n found_student = Student.query.get(id)\n if request.method == 'POST':\n new_excuse = Excuse(content=request.form.get('content'),\n is_believable=request.form.get('is_believable'), student_id=id)\n db.session.add(new_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=id))\n excuses_list = found_student.excuses.all()\n return render_template('excuses/index.html', excuses=excuses_list,\n student=found_student)\n\n\[email protected]('/students/<int:id>/excuses/new')\ndef new_excuse(id):\n return render_template('/excuses/new.html', id=id)\n\n\[email protected]('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[\n 'GET', 'PATCH', 'DELETE'])\ndef edit_excuse(id, excuse_id):\n print(id)\n found_student = Student.query.get(id)\n found_excuse = Excuse.query.get(excuse_id)\n excuses_list = found_student.excuses.all()\n if request.method == b'DELETE':\n db.session.delete(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n elif request.method == b'PATCH':\n found_excuse.content = request.form.get('content')\n found_excuse.is_believable = request.form.get('is_believable')\n db.session.add(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n return render_template('excuses/edit.html', excuse=found_excuse,\n student=found_student)\n",
"step-4": "from flask import Flask, request, redirect, url_for, render_template\nfrom flask_modus import Modus\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'postgres://localhost/flask_one_to_many'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\nmodus = Modus(app)\ndb = SQLAlchemy(app)\nMigrate(app, db)\n\n\nclass Student(db.Model):\n __tablename__ = 'students'\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.Text)\n last_name = db.Column(db.Text)\n excuses = db.relationship('Excuse', backref='student', lazy='dynamic')\n\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\n\nclass Excuse(db.Model):\n __tablename__ = 'excuses'\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text)\n is_believable = db.Column(db.Text)\n student_id = db.Column(db.Integer, db.ForeignKey('students.id'))\n\n\[email protected]('/')\ndef root():\n return redirect(url_for('index'))\n\n\[email protected]('/students', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n new_student = Student(request.form['first_name'], request.form[\n 'last_name'])\n db.session.add(new_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/index.html', students=Student.query.all())\n\n\[email protected]('/students/new')\ndef new():\n return render_template('students/new.html')\n\n\[email protected]('/students/<int:id>/edit')\ndef edit(id):\n return render_template('students/edit.html', student=Student.query.get(id))\n\n\[email protected]('/students/<int:id>', methods=['GET', 'PATCH'])\ndef show(id):\n found_student = Student.query.get(id)\n if request.method == b'PATCH':\n found_student.first_name = request.form['first_name']\n found_student.last_name = request.form['last_name']\n db.session.add(found_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/show.html', student=found_student)\n\n\[email protected]('/students/<int:id>/excuses', methods=['GET', 'POST'])\ndef excuses_index(id):\n found_student = Student.query.get(id)\n if request.method == 'POST':\n new_excuse = Excuse(content=request.form.get('content'),\n is_believable=request.form.get('is_believable'), student_id=id)\n db.session.add(new_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=id))\n excuses_list = found_student.excuses.all()\n return render_template('excuses/index.html', excuses=excuses_list,\n student=found_student)\n\n\[email protected]('/students/<int:id>/excuses/new')\ndef new_excuse(id):\n return render_template('/excuses/new.html', id=id)\n\n\[email protected]('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[\n 'GET', 'PATCH', 'DELETE'])\ndef edit_excuse(id, excuse_id):\n print(id)\n found_student = Student.query.get(id)\n found_excuse = Excuse.query.get(excuse_id)\n excuses_list = found_student.excuses.all()\n if request.method == b'DELETE':\n db.session.delete(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n elif request.method == b'PATCH':\n found_excuse.content = request.form.get('content')\n found_excuse.is_believable = request.form.get('is_believable')\n db.session.add(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id=found_student.id))\n return render_template('excuses/edit.html', excuse=found_excuse,\n student=found_student)\n",
"step-5": "from flask import Flask, request, redirect, url_for, render_template\nfrom flask_modus import Modus\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\napp = Flask(__name__)\napp.config[\n 'SQLALCHEMY_DATABASE_URI'] = \"postgres://localhost/flask_one_to_many\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\nmodus = Modus(app)\ndb = SQLAlchemy(app)\nMigrate(app, db)\n\n\nclass Student(db.Model):\n\n __tablename__ = \"students\"\n\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.Text)\n last_name = db.Column(db.Text)\n excuses = db.relationship('Excuse', backref='student',\n lazy='dynamic')\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n\nclass Excuse(db.Model):\n __tablename__ = \"excuses\"\n\n id = db.Column(db.Integer, primary_key = True)\n content = db.Column(db.Text)\n is_believable = db.Column(db.Text)\n student_id = db.Column(db.Integer, db.ForeignKey(\"students.id\"))\n \n\n\n\n\[email protected]('/')\ndef root():\n return redirect(url_for('index'))\n\n\[email protected]('/students', methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == 'POST':\n new_student = Student(request.form['first_name'],\n request.form['last_name'])\n db.session.add(new_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/index.html', students=Student.query.all())\n\n\[email protected]('/students/new')\ndef new():\n return render_template('students/new.html')\n\n\[email protected]('/students/<int:id>/edit')\ndef edit(id):\n return render_template('students/edit.html', student=Student.query.get(id))\n\n\[email protected]('/students/<int:id>', methods=[\"GET\", \"PATCH\"])\ndef show(id):\n found_student = Student.query.get(id)\n if request.method == b'PATCH':\n found_student.first_name = request.form['first_name']\n found_student.last_name = request.form['last_name']\n db.session.add(found_student)\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('students/show.html', student=found_student)\n\[email protected](\"/students/<int:id>/excuses\", methods = [\"GET\", \"POST\"])\ndef excuses_index(id):\n found_student = Student.query.get(id)\n if request.method == \"POST\":\n new_excuse = Excuse(content = request.form.get(\"content\"), is_believable = request.form.get(\"is_believable\"), student_id = id)\n db.session.add(new_excuse)\n db.session.commit()\n return redirect(url_for(\"excuses_index\", id = id))\n\n \n excuses_list = found_student.excuses.all()\n return render_template(\"excuses/index.html\", excuses=excuses_list, student= found_student)\n\n\n\[email protected](\"/students/<int:id>/excuses/new\")\ndef new_excuse(id):\n\n return render_template(\"/excuses/new.html\", id = id)\n\n\[email protected](\"/students/<int:id>/excuses/<int:excuse_id>/edit\", methods = [\"GET\", \"PATCH\",\"DELETE\"])\ndef edit_excuse(id,excuse_id):\n print(id)\n found_student = Student.query.get(id)\n found_excuse = Excuse.query.get(excuse_id)\n excuses_list = found_student.excuses.all()\n if request.method == b'DELETE':\n db.session.delete(found_excuse)\n db.session.commit()\n return redirect(url_for('excuses_index', id = found_student.id))\n elif request.method == b\"PATCH\":\n \n found_excuse.content = request.form.get(\"content\")\n found_excuse.is_believable = request.form.get(\"is_believable\")\n \n db.session.add(found_excuse)\n db.session.commit()\n\n return redirect(url_for(\"excuses_index\", id = found_student.id))\n \n # return render_template(\"excuses/index.html\",excuses = excuses_list, student = found_student)\n return render_template(\"excuses/edit.html\",excuse = found_excuse, student = found_student)",
"step-ids": [
13,
14,
15,
16,
17
]
}
|
[
13,
14,
15,
16,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def intToRoman(self, num):
normalDic = {(1000): 'M', (500): 'D', (100): 'C', (50): 'L', (10):
'X', (5): 'V', (1): 'I'}
specialDic = {'41': 'IV', '91': 'IX', '42': 'XL', '92': 'XC', '43':
'CD', '93': 'CM'}
roman = ''
remainders = ['4', '9']
divisors = [1000, 500, 100, 50, 10, 5, 1]
for i, divisor in enumerate(divisors):
quotient = num / divisor
if quotient > 0:
roman += normalDic[divisor] * quotient
num = num % divisor
if str(num)[0] in remainders:
roman += specialDic[str(num)[0] + str(len(str(num)))]
num -= int(str(num)[0]) * 10 ** (len(str(num)) - 1)
return roman
<|reserved_special_token_1|>
# Given an integer, convert it to a roman numeral.
# Input is guaranteed to be within the range from 1 to 3999.
class Solution:
# @param {integer} num
# @return {string}
def intToRoman(self, num):
normalDic = {
1000: 'M',
500: 'D',
100: 'C',
50: 'L',
10: 'X',
5: 'V',
1: 'I'
}
specialDic = {
'41': 'IV', # 4
'91': 'IX', # 9
'42': 'XL', # 40
'92': 'XC', # 90
'43': 'CD', # 400
'93': 'CM', # 900
}
roman = ""
remainders = ['4', '9']
divisors = [1000, 500, 100, 50, 10, 5, 1]
for i, divisor in enumerate(divisors):
quotient = num/divisor
if quotient > 0:
roman += normalDic[divisor] * quotient
num = num % divisor
if str(num)[0] in remainders:
roman += specialDic[str(num)[0] + str(len(str(num)))]
num -= int(str(num)[0]) * (10 ** (len(str(num)) - 1))
return roman
|
flexible
|
{
"blob_id": "7de06772a1024a81193ac69a1110ad2e8b7f64ac",
"index": 9085,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def intToRoman(self, num):\n normalDic = {(1000): 'M', (500): 'D', (100): 'C', (50): 'L', (10):\n 'X', (5): 'V', (1): 'I'}\n specialDic = {'41': 'IV', '91': 'IX', '42': 'XL', '92': 'XC', '43':\n 'CD', '93': 'CM'}\n roman = ''\n remainders = ['4', '9']\n divisors = [1000, 500, 100, 50, 10, 5, 1]\n for i, divisor in enumerate(divisors):\n quotient = num / divisor\n if quotient > 0:\n roman += normalDic[divisor] * quotient\n num = num % divisor\n if str(num)[0] in remainders:\n roman += specialDic[str(num)[0] + str(len(str(num)))]\n num -= int(str(num)[0]) * 10 ** (len(str(num)) - 1)\n return roman\n",
"step-4": "# Given an integer, convert it to a roman numeral.\n\n# Input is guaranteed to be within the range from 1 to 3999.\n\nclass Solution:\n # @param {integer} num\n # @return {string}\n def intToRoman(self, num):\n normalDic = {\n 1000: 'M',\n 500: 'D',\n 100: 'C',\n 50: 'L',\n 10: 'X',\n 5: 'V',\n 1: 'I'\n }\n specialDic = {\n '41': 'IV', # 4\n '91': 'IX', # 9\n '42': 'XL', # 40\n '92': 'XC', # 90\n '43': 'CD', # 400\n '93': 'CM', # 900\n }\n\n roman = \"\"\n remainders = ['4', '9']\n divisors = [1000, 500, 100, 50, 10, 5, 1]\n for i, divisor in enumerate(divisors):\n quotient = num/divisor\n if quotient > 0:\n roman += normalDic[divisor] * quotient\n num = num % divisor\n\n if str(num)[0] in remainders:\n roman += specialDic[str(num)[0] + str(len(str(num)))]\n num -= int(str(num)[0]) * (10 ** (len(str(num)) - 1))\n\n return roman",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
from week2.Stack import Stack
class TransactionStack:
def __init__(self):
self.stack = Stack()
with open("json_file/Transaction_Stack.json") as data:
try:
temp = json.load(data)
except Exception:
pass
else:
for i in temp:
self.stack.push(i)
def transaction_stack(self, transaction, customer_name, company_name, no_of_share, cost, time):
new_transaction = {"transaction": transaction, "customer_name": customer_name, "company_name": company_name,
"no_of_share": no_of_share, "cost": cost, "time": time}
self.stack.push(new_transaction)
def save_transaction(self):
temp1 = []
size = self.stack.size()
for i in range(size):
temp1.append(self.stack.pop())
with open("Transaction_stack.json", 'w') as data:
json.dump(temp1, data)
# Main method
if __name__ == "__main__":
pass
|
normal
|
{
"blob_id": "30a2358e8396d24d6c3cd72d04321aa9f9f83995",
"index": 8233,
"step-1": "<mask token>\n\n\nclass TransactionStack:\n <mask token>\n\n def transaction_stack(self, transaction, customer_name, company_name,\n no_of_share, cost, time):\n new_transaction = {'transaction': transaction, 'customer_name':\n customer_name, 'company_name': company_name, 'no_of_share':\n no_of_share, 'cost': cost, 'time': time}\n self.stack.push(new_transaction)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TransactionStack:\n\n def __init__(self):\n self.stack = Stack()\n with open('json_file/Transaction_Stack.json') as data:\n try:\n temp = json.load(data)\n except Exception:\n pass\n else:\n for i in temp:\n self.stack.push(i)\n\n def transaction_stack(self, transaction, customer_name, company_name,\n no_of_share, cost, time):\n new_transaction = {'transaction': transaction, 'customer_name':\n customer_name, 'company_name': company_name, 'no_of_share':\n no_of_share, 'cost': cost, 'time': time}\n self.stack.push(new_transaction)\n\n def save_transaction(self):\n temp1 = []\n size = self.stack.size()\n for i in range(size):\n temp1.append(self.stack.pop())\n with open('Transaction_stack.json', 'w') as data:\n json.dump(temp1, data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TransactionStack:\n\n def __init__(self):\n self.stack = Stack()\n with open('json_file/Transaction_Stack.json') as data:\n try:\n temp = json.load(data)\n except Exception:\n pass\n else:\n for i in temp:\n self.stack.push(i)\n\n def transaction_stack(self, transaction, customer_name, company_name,\n no_of_share, cost, time):\n new_transaction = {'transaction': transaction, 'customer_name':\n customer_name, 'company_name': company_name, 'no_of_share':\n no_of_share, 'cost': cost, 'time': time}\n self.stack.push(new_transaction)\n\n def save_transaction(self):\n temp1 = []\n size = self.stack.size()\n for i in range(size):\n temp1.append(self.stack.pop())\n with open('Transaction_stack.json', 'w') as data:\n json.dump(temp1, data)\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "import json\nfrom week2.Stack import Stack\n\n\nclass TransactionStack:\n\n def __init__(self):\n self.stack = Stack()\n with open('json_file/Transaction_Stack.json') as data:\n try:\n temp = json.load(data)\n except Exception:\n pass\n else:\n for i in temp:\n self.stack.push(i)\n\n def transaction_stack(self, transaction, customer_name, company_name,\n no_of_share, cost, time):\n new_transaction = {'transaction': transaction, 'customer_name':\n customer_name, 'company_name': company_name, 'no_of_share':\n no_of_share, 'cost': cost, 'time': time}\n self.stack.push(new_transaction)\n\n def save_transaction(self):\n temp1 = []\n size = self.stack.size()\n for i in range(size):\n temp1.append(self.stack.pop())\n with open('Transaction_stack.json', 'w') as data:\n json.dump(temp1, data)\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "import json\n\nfrom week2.Stack import Stack\n\n\nclass TransactionStack:\n def __init__(self):\n\n self.stack = Stack()\n with open(\"json_file/Transaction_Stack.json\") as data:\n try:\n temp = json.load(data)\n except Exception:\n pass\n else:\n for i in temp:\n self.stack.push(i)\n\n def transaction_stack(self, transaction, customer_name, company_name, no_of_share, cost, time):\n\n new_transaction = {\"transaction\": transaction, \"customer_name\": customer_name, \"company_name\": company_name,\n \"no_of_share\": no_of_share, \"cost\": cost, \"time\": time}\n self.stack.push(new_transaction)\n\n def save_transaction(self):\n\n temp1 = []\n size = self.stack.size()\n for i in range(size):\n temp1.append(self.stack.pop())\n with open(\"Transaction_stack.json\", 'w') as data:\n json.dump(temp1, data)\n\n\n# Main method\nif __name__ == \"__main__\":\n pass\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class SnapshotMismatchError(Exception):
"""
Main snapshot mismatch exception.
"""
def setup_logging(level: int) ->logging.Logger:
"""
Get Logger instance.
Arguments:
level -- Log level
Returns:
Overriden Logger.
"""
logger = logging.getLogger(LOGGER_NAME)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=
'%H:%M:%S')
stream_handler.setFormatter(formatter)
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
logger.setLevel(level)
return logger
@dataclass
class CLINamespace:
"""
CLI namespace.
"""
debug: bool
path: Path
filter: List[str]
exit_on_error: bool
def parse_args() ->CLINamespace:
"""
Parse CLI arguments.
"""
parser = argparse.ArgumentParser(__file__)
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-x', '--exit-on-error', action='store_true')
parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /
'mypy_boto3_output')
parser.add_argument('filter', nargs='*')
args = parser.parse_args()
return CLINamespace(debug=args.debug, path=args.path, filter=args.
filter, exit_on_error=args.exit_on_error)
<|reserved_special_token_0|>
def run_pyright(path: Path) ->None:
"""
Check output with pyright.
"""
with tempfile.NamedTemporaryFile('w+b') as f:
try:
subprocess.check_call(['npx', 'pyright', path.as_posix(),
'--outputjson'], stderr=subprocess.DEVNULL, stdout=f)
return
except subprocess.CalledProcessError:
pass
temp_path = Path(f.name)
output = temp_path.read_text()
data = json.loads(output).get('generalDiagnostics', [])
errors = []
for error in data:
message = error.get('message', '')
if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):
continue
errors.append(error)
if errors:
messages = []
for error in errors:
messages.append(
f"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}"
)
raise SnapshotMismatchError('\n'.join(messages))
<|reserved_special_token_0|>
def run_call(path: Path) ->None:
"""
Check output by running it.
"""
if not (path / '__main__.py').exists():
return
try:
subprocess.check_call([sys.executable, path.as_posix()], stdout=
subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'
) from None
def run_import(path: Path) ->None:
"""
Check output by installing and importing it.
"""
if not (path / '__main__.py').exists():
return
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install',
'--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)
if (path / '__main__.py').exists():
subprocess.check_call([sys.executable, '-c',
f'import {path.name}'], stdout=subprocess.DEVNULL)
subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',
'--no-input', '-y', path.name], stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'
) from None
def is_package_dir(path: Path) ->bool:
"""
Check whether `path` contains a service package.
"""
if not path.is_dir():
return False
if path.name.endswith('.egg-info'):
return False
if (path / '__init__.pyi').exists():
return True
return False
def check_snapshot(path: Path) ->None:
"""
Check package type checkers snapshot.
Raises:
SnapshotMismatchError -- If snapshot is not equal to current output.
"""
logger = logging.getLogger(LOGGER_NAME)
logger.debug(f'Running flake8 for {path.name} ...')
run_flake8(path)
logger.debug(f'Running mypy for {path.name} ...')
run_mypy(path)
logger.debug(f'Running pyright for {path.name} ...')
run_pyright(path)
if (path / '__main__.py').exists():
logger.debug(f'Running call for {path.name} ...')
run_call(path)
logger.debug(f'Running import for {path.name} ...')
run_import(path)
def find_package_path(path: Path) ->Optional[Path]:
"""
Find package directory inside `path`.
"""
for package_path in path.iterdir():
if is_package_dir(package_path):
return package_path
def main() ->None:
"""
Run main logic.
"""
args = parse_args()
logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)
has_errors = False
for folder in sorted(args.path.iterdir()):
if not folder.name.endswith('_package'):
continue
if args.filter and not any(s in folder.as_posix() for s in args.filter
):
continue
package_path = find_package_path(folder)
if not package_path:
continue
logger.info(f'Checking {folder.name}/{package_path.name} ...')
try:
check_snapshot(package_path)
except SnapshotMismatchError as e:
logger.error(e)
has_errors = True
if args.exit_on_error:
break
if has_errors:
sys.exit(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SnapshotMismatchError(Exception):
"""
Main snapshot mismatch exception.
"""
def setup_logging(level: int) ->logging.Logger:
"""
Get Logger instance.
Arguments:
level -- Log level
Returns:
Overriden Logger.
"""
logger = logging.getLogger(LOGGER_NAME)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=
'%H:%M:%S')
stream_handler.setFormatter(formatter)
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
logger.setLevel(level)
return logger
@dataclass
class CLINamespace:
"""
CLI namespace.
"""
debug: bool
path: Path
filter: List[str]
exit_on_error: bool
def parse_args() ->CLINamespace:
"""
Parse CLI arguments.
"""
parser = argparse.ArgumentParser(__file__)
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-x', '--exit-on-error', action='store_true')
parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /
'mypy_boto3_output')
parser.add_argument('filter', nargs='*')
args = parser.parse_args()
return CLINamespace(debug=args.debug, path=args.path, filter=args.
filter, exit_on_error=args.exit_on_error)
def run_flake8(path: Path) ->None:
"""
Check output with flake8.
"""
with tempfile.NamedTemporaryFile('w+b') as f:
try:
subprocess.check_call([sys.executable, '-m', 'flake8',
'--ignore',
'E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803'
, path.as_posix()], stderr=f, stdout=f)
except subprocess.CalledProcessError:
temp_path = Path(f.name)
output = temp_path.read_text()
raise SnapshotMismatchError(output)
def run_pyright(path: Path) ->None:
"""
Check output with pyright.
"""
with tempfile.NamedTemporaryFile('w+b') as f:
try:
subprocess.check_call(['npx', 'pyright', path.as_posix(),
'--outputjson'], stderr=subprocess.DEVNULL, stdout=f)
return
except subprocess.CalledProcessError:
pass
temp_path = Path(f.name)
output = temp_path.read_text()
data = json.loads(output).get('generalDiagnostics', [])
errors = []
for error in data:
message = error.get('message', '')
if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):
continue
errors.append(error)
if errors:
messages = []
for error in errors:
messages.append(
f"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}"
)
raise SnapshotMismatchError('\n'.join(messages))
<|reserved_special_token_0|>
def run_call(path: Path) ->None:
"""
Check output by running it.
"""
if not (path / '__main__.py').exists():
return
try:
subprocess.check_call([sys.executable, path.as_posix()], stdout=
subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'
) from None
def run_import(path: Path) ->None:
"""
Check output by installing and importing it.
"""
if not (path / '__main__.py').exists():
return
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install',
'--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)
if (path / '__main__.py').exists():
subprocess.check_call([sys.executable, '-c',
f'import {path.name}'], stdout=subprocess.DEVNULL)
subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',
'--no-input', '-y', path.name], stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'
) from None
def is_package_dir(path: Path) ->bool:
"""
Check whether `path` contains a service package.
"""
if not path.is_dir():
return False
if path.name.endswith('.egg-info'):
return False
if (path / '__init__.pyi').exists():
return True
return False
def check_snapshot(path: Path) ->None:
"""
Check package type checkers snapshot.
Raises:
SnapshotMismatchError -- If snapshot is not equal to current output.
"""
logger = logging.getLogger(LOGGER_NAME)
logger.debug(f'Running flake8 for {path.name} ...')
run_flake8(path)
logger.debug(f'Running mypy for {path.name} ...')
run_mypy(path)
logger.debug(f'Running pyright for {path.name} ...')
run_pyright(path)
if (path / '__main__.py').exists():
logger.debug(f'Running call for {path.name} ...')
run_call(path)
logger.debug(f'Running import for {path.name} ...')
run_import(path)
def find_package_path(path: Path) ->Optional[Path]:
"""
Find package directory inside `path`.
"""
for package_path in path.iterdir():
if is_package_dir(package_path):
return package_path
def main() ->None:
"""
Run main logic.
"""
args = parse_args()
logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)
has_errors = False
for folder in sorted(args.path.iterdir()):
if not folder.name.endswith('_package'):
continue
if args.filter and not any(s in folder.as_posix() for s in args.filter
):
continue
package_path = find_package_path(folder)
if not package_path:
continue
logger.info(f'Checking {folder.name}/{package_path.name} ...')
try:
check_snapshot(package_path)
except SnapshotMismatchError as e:
logger.error(e)
has_errors = True
if args.exit_on_error:
break
if has_errors:
sys.exit(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SnapshotMismatchError(Exception):
"""
Main snapshot mismatch exception.
"""
def setup_logging(level: int) ->logging.Logger:
"""
Get Logger instance.
Arguments:
level -- Log level
Returns:
Overriden Logger.
"""
logger = logging.getLogger(LOGGER_NAME)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=
'%H:%M:%S')
stream_handler.setFormatter(formatter)
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
logger.setLevel(level)
return logger
@dataclass
class CLINamespace:
"""
CLI namespace.
"""
debug: bool
path: Path
filter: List[str]
exit_on_error: bool
def parse_args() ->CLINamespace:
"""
Parse CLI arguments.
"""
parser = argparse.ArgumentParser(__file__)
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-x', '--exit-on-error', action='store_true')
parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /
'mypy_boto3_output')
parser.add_argument('filter', nargs='*')
args = parser.parse_args()
return CLINamespace(debug=args.debug, path=args.path, filter=args.
filter, exit_on_error=args.exit_on_error)
def run_flake8(path: Path) ->None:
"""
Check output with flake8.
"""
with tempfile.NamedTemporaryFile('w+b') as f:
try:
subprocess.check_call([sys.executable, '-m', 'flake8',
'--ignore',
'E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803'
, path.as_posix()], stderr=f, stdout=f)
except subprocess.CalledProcessError:
temp_path = Path(f.name)
output = temp_path.read_text()
raise SnapshotMismatchError(output)
def run_pyright(path: Path) ->None:
"""
Check output with pyright.
"""
with tempfile.NamedTemporaryFile('w+b') as f:
try:
subprocess.check_call(['npx', 'pyright', path.as_posix(),
'--outputjson'], stderr=subprocess.DEVNULL, stdout=f)
return
except subprocess.CalledProcessError:
pass
temp_path = Path(f.name)
output = temp_path.read_text()
data = json.loads(output).get('generalDiagnostics', [])
errors = []
for error in data:
message = error.get('message', '')
if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):
continue
errors.append(error)
if errors:
messages = []
for error in errors:
messages.append(
f"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}"
)
raise SnapshotMismatchError('\n'.join(messages))
def run_mypy(path: Path) ->None:
"""
Check output with mypy.
"""
try:
output = subprocess.check_output([sys.executable, '-m', 'mypy',
path.as_posix()], stderr=subprocess.STDOUT, encoding='utf8')
except subprocess.CalledProcessError as e:
output = e.output
errors = []
for message in output.splitlines():
if not message or message.startswith('Found'):
continue
if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):
continue
errors.append(message)
if errors:
raise SnapshotMismatchError('\n'.join(errors)) from None
def run_call(path: Path) ->None:
"""
Check output by running it.
"""
if not (path / '__main__.py').exists():
return
try:
subprocess.check_call([sys.executable, path.as_posix()], stdout=
subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'
) from None
def run_import(path: Path) ->None:
"""
Check output by installing and importing it.
"""
if not (path / '__main__.py').exists():
return
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install',
'--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)
if (path / '__main__.py').exists():
subprocess.check_call([sys.executable, '-c',
f'import {path.name}'], stdout=subprocess.DEVNULL)
subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',
'--no-input', '-y', path.name], stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'
) from None
def is_package_dir(path: Path) ->bool:
"""
Check whether `path` contains a service package.
"""
if not path.is_dir():
return False
if path.name.endswith('.egg-info'):
return False
if (path / '__init__.pyi').exists():
return True
return False
def check_snapshot(path: Path) ->None:
"""
Check package type checkers snapshot.
Raises:
SnapshotMismatchError -- If snapshot is not equal to current output.
"""
logger = logging.getLogger(LOGGER_NAME)
logger.debug(f'Running flake8 for {path.name} ...')
run_flake8(path)
logger.debug(f'Running mypy for {path.name} ...')
run_mypy(path)
logger.debug(f'Running pyright for {path.name} ...')
run_pyright(path)
if (path / '__main__.py').exists():
logger.debug(f'Running call for {path.name} ...')
run_call(path)
logger.debug(f'Running import for {path.name} ...')
run_import(path)
def find_package_path(path: Path) ->Optional[Path]:
"""
Find package directory inside `path`.
"""
for package_path in path.iterdir():
if is_package_dir(package_path):
return package_path
def main() ->None:
"""
Run main logic.
"""
args = parse_args()
logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)
has_errors = False
for folder in sorted(args.path.iterdir()):
if not folder.name.endswith('_package'):
continue
if args.filter and not any(s in folder.as_posix() for s in args.filter
):
continue
package_path = find_package_path(folder)
if not package_path:
continue
logger.info(f'Checking {folder.name}/{package_path.name} ...')
try:
check_snapshot(package_path)
except SnapshotMismatchError as e:
logger.error(e)
has_errors = True
if args.exit_on_error:
break
if has_errors:
sys.exit(1)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ROOT_PATH = Path(__file__).parent.parent.resolve()
LOGGER_NAME = 'check_output'
IGNORE_PYRIGHT_ERRORS = (
'"get_paginator" is marked as overload, but no implementation is provided',
'"get_waiter" is marked as overload, but no implementation is provided',
'Method "paginate" overrides class "Paginator" in an incompatible manner',
'Method "wait" overrides class "Waiter" in an incompatible manner',
'define variable "items" in incompatible way',
'define variable "values" in incompatible way', 'must return value',
'Import "types_aiobotocore_', 'Import "mypy_boto3_')
IGNORE_MYPY_ERRORS = (
'Signature of "create_client" incompatible with supertype "Session"',
'Signature of "paginate" incompatible with supertype "Paginator"',
'Signature of "wait" incompatible with supertype "Waiter"', 'note:')
class SnapshotMismatchError(Exception):
"""
Main snapshot mismatch exception.
"""
def setup_logging(level: int) ->logging.Logger:
"""
Get Logger instance.
Arguments:
level -- Log level
Returns:
Overriden Logger.
"""
logger = logging.getLogger(LOGGER_NAME)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=
'%H:%M:%S')
stream_handler.setFormatter(formatter)
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
logger.setLevel(level)
return logger
@dataclass
class CLINamespace:
"""
CLI namespace.
"""
debug: bool
path: Path
filter: List[str]
exit_on_error: bool
def parse_args() ->CLINamespace:
"""
Parse CLI arguments.
"""
parser = argparse.ArgumentParser(__file__)
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-x', '--exit-on-error', action='store_true')
parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /
'mypy_boto3_output')
parser.add_argument('filter', nargs='*')
args = parser.parse_args()
return CLINamespace(debug=args.debug, path=args.path, filter=args.
filter, exit_on_error=args.exit_on_error)
def run_flake8(path: Path) ->None:
"""
Check output with flake8.
"""
with tempfile.NamedTemporaryFile('w+b') as f:
try:
subprocess.check_call([sys.executable, '-m', 'flake8',
'--ignore',
'E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803'
, path.as_posix()], stderr=f, stdout=f)
except subprocess.CalledProcessError:
temp_path = Path(f.name)
output = temp_path.read_text()
raise SnapshotMismatchError(output)
def run_pyright(path: Path) ->None:
"""
Check output with pyright.
"""
with tempfile.NamedTemporaryFile('w+b') as f:
try:
subprocess.check_call(['npx', 'pyright', path.as_posix(),
'--outputjson'], stderr=subprocess.DEVNULL, stdout=f)
return
except subprocess.CalledProcessError:
pass
temp_path = Path(f.name)
output = temp_path.read_text()
data = json.loads(output).get('generalDiagnostics', [])
errors = []
for error in data:
message = error.get('message', '')
if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):
continue
errors.append(error)
if errors:
messages = []
for error in errors:
messages.append(
f"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}"
)
raise SnapshotMismatchError('\n'.join(messages))
def run_mypy(path: Path) ->None:
"""
Check output with mypy.
"""
try:
output = subprocess.check_output([sys.executable, '-m', 'mypy',
path.as_posix()], stderr=subprocess.STDOUT, encoding='utf8')
except subprocess.CalledProcessError as e:
output = e.output
errors = []
for message in output.splitlines():
if not message or message.startswith('Found'):
continue
if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):
continue
errors.append(message)
if errors:
raise SnapshotMismatchError('\n'.join(errors)) from None
def run_call(path: Path) ->None:
"""
Check output by running it.
"""
if not (path / '__main__.py').exists():
return
try:
subprocess.check_call([sys.executable, path.as_posix()], stdout=
subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'
) from None
def run_import(path: Path) ->None:
"""
Check output by installing and importing it.
"""
if not (path / '__main__.py').exists():
return
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install',
'--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)
if (path / '__main__.py').exists():
subprocess.check_call([sys.executable, '-c',
f'import {path.name}'], stdout=subprocess.DEVNULL)
subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',
'--no-input', '-y', path.name], stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'
) from None
def is_package_dir(path: Path) ->bool:
"""
Check whether `path` contains a service package.
"""
if not path.is_dir():
return False
if path.name.endswith('.egg-info'):
return False
if (path / '__init__.pyi').exists():
return True
return False
def check_snapshot(path: Path) ->None:
"""
Check package type checkers snapshot.
Raises:
SnapshotMismatchError -- If snapshot is not equal to current output.
"""
logger = logging.getLogger(LOGGER_NAME)
logger.debug(f'Running flake8 for {path.name} ...')
run_flake8(path)
logger.debug(f'Running mypy for {path.name} ...')
run_mypy(path)
logger.debug(f'Running pyright for {path.name} ...')
run_pyright(path)
if (path / '__main__.py').exists():
logger.debug(f'Running call for {path.name} ...')
run_call(path)
logger.debug(f'Running import for {path.name} ...')
run_import(path)
def find_package_path(path: Path) ->Optional[Path]:
"""
Find package directory inside `path`.
"""
for package_path in path.iterdir():
if is_package_dir(package_path):
return package_path
def main() ->None:
"""
Run main logic.
"""
args = parse_args()
logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)
has_errors = False
for folder in sorted(args.path.iterdir()):
if not folder.name.endswith('_package'):
continue
if args.filter and not any(s in folder.as_posix() for s in args.filter
):
continue
package_path = find_package_path(folder)
if not package_path:
continue
logger.info(f'Checking {folder.name}/{package_path.name} ...')
try:
check_snapshot(package_path)
except SnapshotMismatchError as e:
logger.error(e)
has_errors = True
if args.exit_on_error:
break
if has_errors:
sys.exit(1)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
"""
Checker of generated packages.
- [x] import generated package
- [x] flake8
- [x] pyright
- [x] mypy
"""
import argparse
import json
import logging
import subprocess
import sys
import tempfile
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
ROOT_PATH = Path(__file__).parent.parent.resolve()
LOGGER_NAME = "check_output"
IGNORE_PYRIGHT_ERRORS = (
'"get_paginator" is marked as overload, but no implementation is provided',
'"get_waiter" is marked as overload, but no implementation is provided',
# 'Expected type arguments for generic class "ResourceCollection"',
# 'Type "None" cannot be assigned to type',
# '"__next__" is not present',
# 'Import "boto3.s3.transfer" could not be resolved',
# "is partially unknown",
'Method "paginate" overrides class "Paginator" in an incompatible manner',
'Method "wait" overrides class "Waiter" in an incompatible manner',
'define variable "items" in incompatible way',
'define variable "values" in incompatible way',
"must return value",
'Import "types_aiobotocore_',
'Import "mypy_boto3_',
)
IGNORE_MYPY_ERRORS = (
'Signature of "create_client" incompatible with supertype "Session"',
'Signature of "paginate" incompatible with supertype "Paginator"',
'Signature of "wait" incompatible with supertype "Waiter"',
"note:",
)
class SnapshotMismatchError(Exception):
"""
Main snapshot mismatch exception.
"""
def setup_logging(level: int) -> logging.Logger:
"""
Get Logger instance.
Arguments:
level -- Log level
Returns:
Overriden Logger.
"""
logger = logging.getLogger(LOGGER_NAME)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s %(message)s", datefmt="%H:%M:%S")
stream_handler.setFormatter(formatter)
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
logger.setLevel(level)
return logger
@dataclass
class CLINamespace:
"""
CLI namespace.
"""
debug: bool
path: Path
filter: List[str]
exit_on_error: bool
def parse_args() -> CLINamespace:
"""
Parse CLI arguments.
"""
parser = argparse.ArgumentParser(__file__)
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("-x", "--exit-on-error", action="store_true")
parser.add_argument("-p", "--path", type=Path, default=ROOT_PATH / "mypy_boto3_output")
parser.add_argument("filter", nargs="*")
args = parser.parse_args()
return CLINamespace(
debug=args.debug,
path=args.path,
filter=args.filter,
exit_on_error=args.exit_on_error,
)
def run_flake8(path: Path) -> None:
"""
Check output with flake8.
"""
with tempfile.NamedTemporaryFile("w+b") as f:
try:
subprocess.check_call(
[
sys.executable,
"-m",
"flake8",
"--ignore",
"E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803",
path.as_posix(),
],
stderr=f,
stdout=f,
)
except subprocess.CalledProcessError:
temp_path = Path(f.name)
output = temp_path.read_text()
raise SnapshotMismatchError(output)
def run_pyright(path: Path) -> None:
"""
Check output with pyright.
"""
with tempfile.NamedTemporaryFile("w+b") as f:
try:
subprocess.check_call(
["npx", "pyright", path.as_posix(), "--outputjson"],
stderr=subprocess.DEVNULL,
stdout=f,
)
return
except subprocess.CalledProcessError:
pass
temp_path = Path(f.name)
output = temp_path.read_text()
data = json.loads(output).get("generalDiagnostics", [])
errors = []
for error in data:
message = error.get("message", "")
if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):
continue
errors.append(error)
if errors:
messages = []
for error in errors:
messages.append(
f'{error["file"]}:{error["range"]["start"]["line"]} {error.get("message", "")}'
)
raise SnapshotMismatchError("\n".join(messages))
def run_mypy(path: Path) -> None:
"""
Check output with mypy.
"""
try:
output = subprocess.check_output(
[sys.executable, "-m", "mypy", path.as_posix()],
stderr=subprocess.STDOUT,
encoding="utf8",
)
except subprocess.CalledProcessError as e:
output = e.output
errors = []
for message in output.splitlines():
if not message or message.startswith("Found"):
continue
if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):
continue
errors.append(message)
if errors:
raise SnapshotMismatchError("\n".join(errors)) from None
def run_call(path: Path) -> None:
"""
Check output by running it.
"""
if not (path / "__main__.py").exists():
return
try:
subprocess.check_call([sys.executable, path.as_posix()], stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f"Path {path} cannot be imported: {e}") from None
def run_import(path: Path) -> None:
"""
Check output by installing and importing it.
"""
if not (path / "__main__.py").exists():
return
try:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "--no-input", path.parent.as_posix()],
stdout=subprocess.DEVNULL,
)
if (path / "__main__.py").exists():
subprocess.check_call(
[sys.executable, "-c", f"import {path.name}"],
stdout=subprocess.DEVNULL,
)
subprocess.check_call(
[sys.executable, "-m", "pip", "uninstall", "--no-input", "-y", path.name],
stdout=subprocess.DEVNULL,
)
except subprocess.CalledProcessError as e:
raise SnapshotMismatchError(f"Path {path} cannot be imported: {e}") from None
def is_package_dir(path: Path) -> bool:
"""
Check whether `path` contains a service package.
"""
if not path.is_dir():
return False
if path.name.endswith(".egg-info"):
return False
if (path / "__init__.pyi").exists():
return True
return False
def check_snapshot(path: Path) -> None:
"""
Check package type checkers snapshot.
Raises:
SnapshotMismatchError -- If snapshot is not equal to current output.
"""
logger = logging.getLogger(LOGGER_NAME)
logger.debug(f"Running flake8 for {path.name} ...")
run_flake8(path)
logger.debug(f"Running mypy for {path.name} ...")
run_mypy(path)
logger.debug(f"Running pyright for {path.name} ...")
run_pyright(path)
if (path / "__main__.py").exists():
logger.debug(f"Running call for {path.name} ...")
run_call(path)
logger.debug(f"Running import for {path.name} ...")
run_import(path)
def find_package_path(path: Path) -> Optional[Path]:
"""
Find package directory inside `path`.
"""
for package_path in path.iterdir():
if is_package_dir(package_path):
return package_path
def main() -> None:
"""
Run main logic.
"""
args = parse_args()
logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)
has_errors = False
for folder in sorted(args.path.iterdir()):
if not folder.name.endswith("_package"):
continue
if args.filter and not any(s in folder.as_posix() for s in args.filter):
continue
package_path = find_package_path(folder)
if not package_path:
continue
logger.info(f"Checking {folder.name}/{package_path.name} ...")
try:
check_snapshot(package_path)
except SnapshotMismatchError as e:
logger.error(e)
has_errors = True
if args.exit_on_error:
break
if has_errors:
sys.exit(1)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "f3466fd38ecf472a4342aad4d10410d6f2a67d47",
"index": 1779,
"step-1": "<mask token>\n\n\nclass SnapshotMismatchError(Exception):\n \"\"\"\n Main snapshot mismatch exception.\n \"\"\"\n\n\ndef setup_logging(level: int) ->logging.Logger:\n \"\"\"\n Get Logger instance.\n\n Arguments:\n level -- Log level\n\n Returns:\n Overriden Logger.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=\n '%H:%M:%S')\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n return logger\n\n\n@dataclass\nclass CLINamespace:\n \"\"\"\n CLI namespace.\n \"\"\"\n debug: bool\n path: Path\n filter: List[str]\n exit_on_error: bool\n\n\ndef parse_args() ->CLINamespace:\n \"\"\"\n Parse CLI arguments.\n \"\"\"\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-x', '--exit-on-error', action='store_true')\n parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /\n 'mypy_boto3_output')\n parser.add_argument('filter', nargs='*')\n args = parser.parse_args()\n return CLINamespace(debug=args.debug, path=args.path, filter=args.\n filter, exit_on_error=args.exit_on_error)\n\n\n<mask token>\n\n\ndef run_pyright(path: Path) ->None:\n \"\"\"\n Check output with pyright.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call(['npx', 'pyright', path.as_posix(),\n '--outputjson'], stderr=subprocess.DEVNULL, stdout=f)\n return\n except subprocess.CalledProcessError:\n pass\n temp_path = Path(f.name)\n output = temp_path.read_text()\n data = json.loads(output).get('generalDiagnostics', [])\n errors = []\n for error in data:\n message = error.get('message', '')\n if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):\n continue\n errors.append(error)\n if errors:\n messages = []\n for error in errors:\n messages.append(\n f\"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}\"\n )\n raise SnapshotMismatchError('\\n'.join(messages))\n\n\n<mask token>\n\n\ndef run_call(path: Path) ->None:\n \"\"\"\n Check output by running it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=\n subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef run_import(path: Path) ->None:\n \"\"\"\n Check output by installing and importing it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install',\n '--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)\n if (path / '__main__.py').exists():\n subprocess.check_call([sys.executable, '-c',\n f'import {path.name}'], stdout=subprocess.DEVNULL)\n subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',\n '--no-input', '-y', path.name], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef is_package_dir(path: Path) ->bool:\n \"\"\"\n Check whether `path` contains a service package.\n \"\"\"\n if not path.is_dir():\n return False\n if path.name.endswith('.egg-info'):\n return False\n if (path / '__init__.pyi').exists():\n return True\n return False\n\n\ndef check_snapshot(path: Path) ->None:\n \"\"\"\n Check package type checkers snapshot.\n\n Raises:\n SnapshotMismatchError -- If snapshot is not equal to current output.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n logger.debug(f'Running flake8 for {path.name} ...')\n run_flake8(path)\n logger.debug(f'Running mypy for {path.name} ...')\n run_mypy(path)\n logger.debug(f'Running pyright for {path.name} ...')\n run_pyright(path)\n if (path / '__main__.py').exists():\n logger.debug(f'Running call for {path.name} ...')\n run_call(path)\n logger.debug(f'Running import for {path.name} ...')\n run_import(path)\n\n\ndef find_package_path(path: Path) ->Optional[Path]:\n \"\"\"\n Find package directory inside `path`.\n \"\"\"\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path\n\n\ndef main() ->None:\n \"\"\"\n Run main logic.\n \"\"\"\n args = parse_args()\n logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)\n has_errors = False\n for folder in sorted(args.path.iterdir()):\n if not folder.name.endswith('_package'):\n continue\n if args.filter and not any(s in folder.as_posix() for s in args.filter\n ):\n continue\n package_path = find_package_path(folder)\n if not package_path:\n continue\n logger.info(f'Checking {folder.name}/{package_path.name} ...')\n try:\n check_snapshot(package_path)\n except SnapshotMismatchError as e:\n logger.error(e)\n has_errors = True\n if args.exit_on_error:\n break\n if has_errors:\n sys.exit(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SnapshotMismatchError(Exception):\n \"\"\"\n Main snapshot mismatch exception.\n \"\"\"\n\n\ndef setup_logging(level: int) ->logging.Logger:\n \"\"\"\n Get Logger instance.\n\n Arguments:\n level -- Log level\n\n Returns:\n Overriden Logger.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=\n '%H:%M:%S')\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n return logger\n\n\n@dataclass\nclass CLINamespace:\n \"\"\"\n CLI namespace.\n \"\"\"\n debug: bool\n path: Path\n filter: List[str]\n exit_on_error: bool\n\n\ndef parse_args() ->CLINamespace:\n \"\"\"\n Parse CLI arguments.\n \"\"\"\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-x', '--exit-on-error', action='store_true')\n parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /\n 'mypy_boto3_output')\n parser.add_argument('filter', nargs='*')\n args = parser.parse_args()\n return CLINamespace(debug=args.debug, path=args.path, filter=args.\n filter, exit_on_error=args.exit_on_error)\n\n\ndef run_flake8(path: Path) ->None:\n \"\"\"\n Check output with flake8.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call([sys.executable, '-m', 'flake8',\n '--ignore',\n 'E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803'\n , path.as_posix()], stderr=f, stdout=f)\n except subprocess.CalledProcessError:\n temp_path = Path(f.name)\n output = temp_path.read_text()\n raise SnapshotMismatchError(output)\n\n\ndef run_pyright(path: Path) ->None:\n \"\"\"\n Check output with pyright.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call(['npx', 'pyright', path.as_posix(),\n '--outputjson'], stderr=subprocess.DEVNULL, stdout=f)\n return\n except subprocess.CalledProcessError:\n pass\n temp_path = Path(f.name)\n output = temp_path.read_text()\n data = json.loads(output).get('generalDiagnostics', [])\n errors = []\n for error in data:\n message = error.get('message', '')\n if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):\n continue\n errors.append(error)\n if errors:\n messages = []\n for error in errors:\n messages.append(\n f\"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}\"\n )\n raise SnapshotMismatchError('\\n'.join(messages))\n\n\n<mask token>\n\n\ndef run_call(path: Path) ->None:\n \"\"\"\n Check output by running it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=\n subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef run_import(path: Path) ->None:\n \"\"\"\n Check output by installing and importing it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install',\n '--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)\n if (path / '__main__.py').exists():\n subprocess.check_call([sys.executable, '-c',\n f'import {path.name}'], stdout=subprocess.DEVNULL)\n subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',\n '--no-input', '-y', path.name], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef is_package_dir(path: Path) ->bool:\n \"\"\"\n Check whether `path` contains a service package.\n \"\"\"\n if not path.is_dir():\n return False\n if path.name.endswith('.egg-info'):\n return False\n if (path / '__init__.pyi').exists():\n return True\n return False\n\n\ndef check_snapshot(path: Path) ->None:\n \"\"\"\n Check package type checkers snapshot.\n\n Raises:\n SnapshotMismatchError -- If snapshot is not equal to current output.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n logger.debug(f'Running flake8 for {path.name} ...')\n run_flake8(path)\n logger.debug(f'Running mypy for {path.name} ...')\n run_mypy(path)\n logger.debug(f'Running pyright for {path.name} ...')\n run_pyright(path)\n if (path / '__main__.py').exists():\n logger.debug(f'Running call for {path.name} ...')\n run_call(path)\n logger.debug(f'Running import for {path.name} ...')\n run_import(path)\n\n\ndef find_package_path(path: Path) ->Optional[Path]:\n \"\"\"\n Find package directory inside `path`.\n \"\"\"\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path\n\n\ndef main() ->None:\n \"\"\"\n Run main logic.\n \"\"\"\n args = parse_args()\n logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)\n has_errors = False\n for folder in sorted(args.path.iterdir()):\n if not folder.name.endswith('_package'):\n continue\n if args.filter and not any(s in folder.as_posix() for s in args.filter\n ):\n continue\n package_path = find_package_path(folder)\n if not package_path:\n continue\n logger.info(f'Checking {folder.name}/{package_path.name} ...')\n try:\n check_snapshot(package_path)\n except SnapshotMismatchError as e:\n logger.error(e)\n has_errors = True\n if args.exit_on_error:\n break\n if has_errors:\n sys.exit(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SnapshotMismatchError(Exception):\n \"\"\"\n Main snapshot mismatch exception.\n \"\"\"\n\n\ndef setup_logging(level: int) ->logging.Logger:\n \"\"\"\n Get Logger instance.\n\n Arguments:\n level -- Log level\n\n Returns:\n Overriden Logger.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=\n '%H:%M:%S')\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n return logger\n\n\n@dataclass\nclass CLINamespace:\n \"\"\"\n CLI namespace.\n \"\"\"\n debug: bool\n path: Path\n filter: List[str]\n exit_on_error: bool\n\n\ndef parse_args() ->CLINamespace:\n \"\"\"\n Parse CLI arguments.\n \"\"\"\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-x', '--exit-on-error', action='store_true')\n parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /\n 'mypy_boto3_output')\n parser.add_argument('filter', nargs='*')\n args = parser.parse_args()\n return CLINamespace(debug=args.debug, path=args.path, filter=args.\n filter, exit_on_error=args.exit_on_error)\n\n\ndef run_flake8(path: Path) ->None:\n \"\"\"\n Check output with flake8.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call([sys.executable, '-m', 'flake8',\n '--ignore',\n 'E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803'\n , path.as_posix()], stderr=f, stdout=f)\n except subprocess.CalledProcessError:\n temp_path = Path(f.name)\n output = temp_path.read_text()\n raise SnapshotMismatchError(output)\n\n\ndef run_pyright(path: Path) ->None:\n \"\"\"\n Check output with pyright.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call(['npx', 'pyright', path.as_posix(),\n '--outputjson'], stderr=subprocess.DEVNULL, stdout=f)\n return\n except subprocess.CalledProcessError:\n pass\n temp_path = Path(f.name)\n output = temp_path.read_text()\n data = json.loads(output).get('generalDiagnostics', [])\n errors = []\n for error in data:\n message = error.get('message', '')\n if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):\n continue\n errors.append(error)\n if errors:\n messages = []\n for error in errors:\n messages.append(\n f\"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}\"\n )\n raise SnapshotMismatchError('\\n'.join(messages))\n\n\ndef run_mypy(path: Path) ->None:\n \"\"\"\n Check output with mypy.\n \"\"\"\n try:\n output = subprocess.check_output([sys.executable, '-m', 'mypy',\n path.as_posix()], stderr=subprocess.STDOUT, encoding='utf8')\n except subprocess.CalledProcessError as e:\n output = e.output\n errors = []\n for message in output.splitlines():\n if not message or message.startswith('Found'):\n continue\n if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):\n continue\n errors.append(message)\n if errors:\n raise SnapshotMismatchError('\\n'.join(errors)) from None\n\n\ndef run_call(path: Path) ->None:\n \"\"\"\n Check output by running it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=\n subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef run_import(path: Path) ->None:\n \"\"\"\n Check output by installing and importing it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install',\n '--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)\n if (path / '__main__.py').exists():\n subprocess.check_call([sys.executable, '-c',\n f'import {path.name}'], stdout=subprocess.DEVNULL)\n subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',\n '--no-input', '-y', path.name], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef is_package_dir(path: Path) ->bool:\n \"\"\"\n Check whether `path` contains a service package.\n \"\"\"\n if not path.is_dir():\n return False\n if path.name.endswith('.egg-info'):\n return False\n if (path / '__init__.pyi').exists():\n return True\n return False\n\n\ndef check_snapshot(path: Path) ->None:\n \"\"\"\n Check package type checkers snapshot.\n\n Raises:\n SnapshotMismatchError -- If snapshot is not equal to current output.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n logger.debug(f'Running flake8 for {path.name} ...')\n run_flake8(path)\n logger.debug(f'Running mypy for {path.name} ...')\n run_mypy(path)\n logger.debug(f'Running pyright for {path.name} ...')\n run_pyright(path)\n if (path / '__main__.py').exists():\n logger.debug(f'Running call for {path.name} ...')\n run_call(path)\n logger.debug(f'Running import for {path.name} ...')\n run_import(path)\n\n\ndef find_package_path(path: Path) ->Optional[Path]:\n \"\"\"\n Find package directory inside `path`.\n \"\"\"\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path\n\n\ndef main() ->None:\n \"\"\"\n Run main logic.\n \"\"\"\n args = parse_args()\n logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)\n has_errors = False\n for folder in sorted(args.path.iterdir()):\n if not folder.name.endswith('_package'):\n continue\n if args.filter and not any(s in folder.as_posix() for s in args.filter\n ):\n continue\n package_path = find_package_path(folder)\n if not package_path:\n continue\n logger.info(f'Checking {folder.name}/{package_path.name} ...')\n try:\n check_snapshot(package_path)\n except SnapshotMismatchError as e:\n logger.error(e)\n has_errors = True\n if args.exit_on_error:\n break\n if has_errors:\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nROOT_PATH = Path(__file__).parent.parent.resolve()\nLOGGER_NAME = 'check_output'\nIGNORE_PYRIGHT_ERRORS = (\n '\"get_paginator\" is marked as overload, but no implementation is provided',\n '\"get_waiter\" is marked as overload, but no implementation is provided',\n 'Method \"paginate\" overrides class \"Paginator\" in an incompatible manner',\n 'Method \"wait\" overrides class \"Waiter\" in an incompatible manner',\n 'define variable \"items\" in incompatible way',\n 'define variable \"values\" in incompatible way', 'must return value',\n 'Import \"types_aiobotocore_', 'Import \"mypy_boto3_')\nIGNORE_MYPY_ERRORS = (\n 'Signature of \"create_client\" incompatible with supertype \"Session\"',\n 'Signature of \"paginate\" incompatible with supertype \"Paginator\"',\n 'Signature of \"wait\" incompatible with supertype \"Waiter\"', 'note:')\n\n\nclass SnapshotMismatchError(Exception):\n \"\"\"\n Main snapshot mismatch exception.\n \"\"\"\n\n\ndef setup_logging(level: int) ->logging.Logger:\n \"\"\"\n Get Logger instance.\n\n Arguments:\n level -- Log level\n\n Returns:\n Overriden Logger.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter('%(levelname)s %(message)s', datefmt=\n '%H:%M:%S')\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n return logger\n\n\n@dataclass\nclass CLINamespace:\n \"\"\"\n CLI namespace.\n \"\"\"\n debug: bool\n path: Path\n filter: List[str]\n exit_on_error: bool\n\n\ndef parse_args() ->CLINamespace:\n \"\"\"\n Parse CLI arguments.\n \"\"\"\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-x', '--exit-on-error', action='store_true')\n parser.add_argument('-p', '--path', type=Path, default=ROOT_PATH /\n 'mypy_boto3_output')\n parser.add_argument('filter', nargs='*')\n args = parser.parse_args()\n return CLINamespace(debug=args.debug, path=args.path, filter=args.\n filter, exit_on_error=args.exit_on_error)\n\n\ndef run_flake8(path: Path) ->None:\n \"\"\"\n Check output with flake8.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call([sys.executable, '-m', 'flake8',\n '--ignore',\n 'E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803'\n , path.as_posix()], stderr=f, stdout=f)\n except subprocess.CalledProcessError:\n temp_path = Path(f.name)\n output = temp_path.read_text()\n raise SnapshotMismatchError(output)\n\n\ndef run_pyright(path: Path) ->None:\n \"\"\"\n Check output with pyright.\n \"\"\"\n with tempfile.NamedTemporaryFile('w+b') as f:\n try:\n subprocess.check_call(['npx', 'pyright', path.as_posix(),\n '--outputjson'], stderr=subprocess.DEVNULL, stdout=f)\n return\n except subprocess.CalledProcessError:\n pass\n temp_path = Path(f.name)\n output = temp_path.read_text()\n data = json.loads(output).get('generalDiagnostics', [])\n errors = []\n for error in data:\n message = error.get('message', '')\n if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):\n continue\n errors.append(error)\n if errors:\n messages = []\n for error in errors:\n messages.append(\n f\"{error['file']}:{error['range']['start']['line']} {error.get('message', '')}\"\n )\n raise SnapshotMismatchError('\\n'.join(messages))\n\n\ndef run_mypy(path: Path) ->None:\n \"\"\"\n Check output with mypy.\n \"\"\"\n try:\n output = subprocess.check_output([sys.executable, '-m', 'mypy',\n path.as_posix()], stderr=subprocess.STDOUT, encoding='utf8')\n except subprocess.CalledProcessError as e:\n output = e.output\n errors = []\n for message in output.splitlines():\n if not message or message.startswith('Found'):\n continue\n if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):\n continue\n errors.append(message)\n if errors:\n raise SnapshotMismatchError('\\n'.join(errors)) from None\n\n\ndef run_call(path: Path) ->None:\n \"\"\"\n Check output by running it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=\n subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef run_import(path: Path) ->None:\n \"\"\"\n Check output by installing and importing it.\n \"\"\"\n if not (path / '__main__.py').exists():\n return\n try:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install',\n '--no-input', path.parent.as_posix()], stdout=subprocess.DEVNULL)\n if (path / '__main__.py').exists():\n subprocess.check_call([sys.executable, '-c',\n f'import {path.name}'], stdout=subprocess.DEVNULL)\n subprocess.check_call([sys.executable, '-m', 'pip', 'uninstall',\n '--no-input', '-y', path.name], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f'Path {path} cannot be imported: {e}'\n ) from None\n\n\ndef is_package_dir(path: Path) ->bool:\n \"\"\"\n Check whether `path` contains a service package.\n \"\"\"\n if not path.is_dir():\n return False\n if path.name.endswith('.egg-info'):\n return False\n if (path / '__init__.pyi').exists():\n return True\n return False\n\n\ndef check_snapshot(path: Path) ->None:\n \"\"\"\n Check package type checkers snapshot.\n\n Raises:\n SnapshotMismatchError -- If snapshot is not equal to current output.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n logger.debug(f'Running flake8 for {path.name} ...')\n run_flake8(path)\n logger.debug(f'Running mypy for {path.name} ...')\n run_mypy(path)\n logger.debug(f'Running pyright for {path.name} ...')\n run_pyright(path)\n if (path / '__main__.py').exists():\n logger.debug(f'Running call for {path.name} ...')\n run_call(path)\n logger.debug(f'Running import for {path.name} ...')\n run_import(path)\n\n\ndef find_package_path(path: Path) ->Optional[Path]:\n \"\"\"\n Find package directory inside `path`.\n \"\"\"\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path\n\n\ndef main() ->None:\n \"\"\"\n Run main logic.\n \"\"\"\n args = parse_args()\n logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)\n has_errors = False\n for folder in sorted(args.path.iterdir()):\n if not folder.name.endswith('_package'):\n continue\n if args.filter and not any(s in folder.as_posix() for s in args.filter\n ):\n continue\n package_path = find_package_path(folder)\n if not package_path:\n continue\n logger.info(f'Checking {folder.name}/{package_path.name} ...')\n try:\n check_snapshot(package_path)\n except SnapshotMismatchError as e:\n logger.error(e)\n has_errors = True\n if args.exit_on_error:\n break\n if has_errors:\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\nChecker of generated packages.\n\n- [x] import generated package\n- [x] flake8\n- [x] pyright\n- [x] mypy\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport subprocess\nimport sys\nimport tempfile\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Optional\n\nROOT_PATH = Path(__file__).parent.parent.resolve()\nLOGGER_NAME = \"check_output\"\nIGNORE_PYRIGHT_ERRORS = (\n '\"get_paginator\" is marked as overload, but no implementation is provided',\n '\"get_waiter\" is marked as overload, but no implementation is provided',\n # 'Expected type arguments for generic class \"ResourceCollection\"',\n # 'Type \"None\" cannot be assigned to type',\n # '\"__next__\" is not present',\n # 'Import \"boto3.s3.transfer\" could not be resolved',\n # \"is partially unknown\",\n 'Method \"paginate\" overrides class \"Paginator\" in an incompatible manner',\n 'Method \"wait\" overrides class \"Waiter\" in an incompatible manner',\n 'define variable \"items\" in incompatible way',\n 'define variable \"values\" in incompatible way',\n \"must return value\",\n 'Import \"types_aiobotocore_',\n 'Import \"mypy_boto3_',\n)\nIGNORE_MYPY_ERRORS = (\n 'Signature of \"create_client\" incompatible with supertype \"Session\"',\n 'Signature of \"paginate\" incompatible with supertype \"Paginator\"',\n 'Signature of \"wait\" incompatible with supertype \"Waiter\"',\n \"note:\",\n)\n\n\nclass SnapshotMismatchError(Exception):\n \"\"\"\n Main snapshot mismatch exception.\n \"\"\"\n\n\ndef setup_logging(level: int) -> logging.Logger:\n \"\"\"\n Get Logger instance.\n\n Arguments:\n level -- Log level\n\n Returns:\n Overriden Logger.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)s %(message)s\", datefmt=\"%H:%M:%S\")\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n return logger\n\n\n@dataclass\nclass CLINamespace:\n \"\"\"\n CLI namespace.\n \"\"\"\n\n debug: bool\n path: Path\n filter: List[str]\n exit_on_error: bool\n\n\ndef parse_args() -> CLINamespace:\n \"\"\"\n Parse CLI arguments.\n \"\"\"\n parser = argparse.ArgumentParser(__file__)\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\")\n parser.add_argument(\"-x\", \"--exit-on-error\", action=\"store_true\")\n parser.add_argument(\"-p\", \"--path\", type=Path, default=ROOT_PATH / \"mypy_boto3_output\")\n parser.add_argument(\"filter\", nargs=\"*\")\n args = parser.parse_args()\n return CLINamespace(\n debug=args.debug,\n path=args.path,\n filter=args.filter,\n exit_on_error=args.exit_on_error,\n )\n\n\ndef run_flake8(path: Path) -> None:\n \"\"\"\n Check output with flake8.\n \"\"\"\n with tempfile.NamedTemporaryFile(\"w+b\") as f:\n try:\n subprocess.check_call(\n [\n sys.executable,\n \"-m\",\n \"flake8\",\n \"--ignore\",\n \"E203,W503,E501,D200,D107,D401,D105,D205,D400,D101,D102,D403,N802,N803\",\n path.as_posix(),\n ],\n stderr=f,\n stdout=f,\n )\n except subprocess.CalledProcessError:\n temp_path = Path(f.name)\n output = temp_path.read_text()\n raise SnapshotMismatchError(output)\n\n\ndef run_pyright(path: Path) -> None:\n \"\"\"\n Check output with pyright.\n \"\"\"\n with tempfile.NamedTemporaryFile(\"w+b\") as f:\n try:\n subprocess.check_call(\n [\"npx\", \"pyright\", path.as_posix(), \"--outputjson\"],\n stderr=subprocess.DEVNULL,\n stdout=f,\n )\n return\n except subprocess.CalledProcessError:\n pass\n\n temp_path = Path(f.name)\n output = temp_path.read_text()\n\n data = json.loads(output).get(\"generalDiagnostics\", [])\n errors = []\n for error in data:\n message = error.get(\"message\", \"\")\n if any(imsg in message for imsg in IGNORE_PYRIGHT_ERRORS):\n continue\n errors.append(error)\n\n if errors:\n messages = []\n for error in errors:\n messages.append(\n f'{error[\"file\"]}:{error[\"range\"][\"start\"][\"line\"]} {error.get(\"message\", \"\")}'\n )\n raise SnapshotMismatchError(\"\\n\".join(messages))\n\n\ndef run_mypy(path: Path) -> None:\n \"\"\"\n Check output with mypy.\n \"\"\"\n try:\n output = subprocess.check_output(\n [sys.executable, \"-m\", \"mypy\", path.as_posix()],\n stderr=subprocess.STDOUT,\n encoding=\"utf8\",\n )\n except subprocess.CalledProcessError as e:\n output = e.output\n errors = []\n for message in output.splitlines():\n if not message or message.startswith(\"Found\"):\n continue\n if any(imsg in message for imsg in IGNORE_MYPY_ERRORS):\n continue\n errors.append(message)\n\n if errors:\n raise SnapshotMismatchError(\"\\n\".join(errors)) from None\n\n\ndef run_call(path: Path) -> None:\n \"\"\"\n Check output by running it.\n \"\"\"\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None\n\n\ndef run_import(path: Path) -> None:\n \"\"\"\n Check output by installing and importing it.\n \"\"\"\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"--no-input\", path.parent.as_posix()],\n stdout=subprocess.DEVNULL,\n )\n if (path / \"__main__.py\").exists():\n subprocess.check_call(\n [sys.executable, \"-c\", f\"import {path.name}\"],\n stdout=subprocess.DEVNULL,\n )\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"--no-input\", \"-y\", path.name],\n stdout=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None\n\n\ndef is_package_dir(path: Path) -> bool:\n \"\"\"\n Check whether `path` contains a service package.\n \"\"\"\n if not path.is_dir():\n return False\n if path.name.endswith(\".egg-info\"):\n return False\n if (path / \"__init__.pyi\").exists():\n return True\n return False\n\n\ndef check_snapshot(path: Path) -> None:\n \"\"\"\n Check package type checkers snapshot.\n\n Raises:\n SnapshotMismatchError -- If snapshot is not equal to current output.\n \"\"\"\n logger = logging.getLogger(LOGGER_NAME)\n logger.debug(f\"Running flake8 for {path.name} ...\")\n run_flake8(path)\n logger.debug(f\"Running mypy for {path.name} ...\")\n run_mypy(path)\n logger.debug(f\"Running pyright for {path.name} ...\")\n run_pyright(path)\n\n if (path / \"__main__.py\").exists():\n logger.debug(f\"Running call for {path.name} ...\")\n run_call(path)\n logger.debug(f\"Running import for {path.name} ...\")\n run_import(path)\n\n\ndef find_package_path(path: Path) -> Optional[Path]:\n \"\"\"\n Find package directory inside `path`.\n \"\"\"\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path\n\n\ndef main() -> None:\n \"\"\"\n Run main logic.\n \"\"\"\n args = parse_args()\n logger = setup_logging(logging.DEBUG if args.debug else logging.INFO)\n has_errors = False\n for folder in sorted(args.path.iterdir()):\n if not folder.name.endswith(\"_package\"):\n continue\n\n if args.filter and not any(s in folder.as_posix() for s in args.filter):\n continue\n\n package_path = find_package_path(folder)\n if not package_path:\n continue\n logger.info(f\"Checking {folder.name}/{package_path.name} ...\")\n try:\n check_snapshot(package_path)\n except SnapshotMismatchError as e:\n logger.error(e)\n has_errors = True\n if args.exit_on_error:\n break\n\n if has_errors:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
13,
14,
16,
17,
19
]
}
|
[
13,
14,
16,
17,
19
] |
import json
import datetime
import string
import random
import logging
import jwt
from main import db
from main.config import config
def execute_sql_from_file(filename):
# Open and read the file as a single buffer
fd = open(filename, 'r')
sql_file = fd.read()
fd.close()
# All SQL commands (split on ';')
sql_commands = sql_file.split(';')
# Execute every command from the input file
for command in sql_commands:
# This will skip and report validation
# For example, if the tables do not yet exist, this will skip over
# the DROP TABLE commands
try:
db.session.execute(command.decode('utf-8'))
except Exception, e:
logging.exception(e)
def create_mock_data():
execute_sql_from_file('./sql/test.sql')
def drop_tables():
execute_sql_from_file('./sql/drop_tables.sql')
def create_headers(access_token=None):
headers = {
'Content-Type': 'application/json'
}
if access_token:
headers.update({
'Authorization': 'Bearer {}'.format(access_token)
})
return headers
def json_response(response):
return json.loads(response.data.decode('utf-8'))
def generate_access_token(user_id, is_expired=False):
"""
Generate JWT Token for test authentication.
:param user_id: User ID
:param is_expired: To generate expired tokens
:return: JWT Token string
"""
iat = datetime.datetime.utcnow()
return jwt.encode({
'sub': user_id, # Subject of this token
'iat': iat, # Issued at
'exp': iat + datetime.timedelta(hours=1) # Expired at
if not is_expired
else iat - datetime.timedelta(minutes=5)
}, config.SECRET_KEY)
def random_string(string_length=10):
"""Generate a random string of fixed length"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for _ in range(string_length))
|
normal
|
{
"blob_id": "a724b49c4d86400b632c02236ceca58e62ba6c86",
"index": 9116,
"step-1": "import json\nimport datetime\nimport string\nimport random\nimport logging\n\nimport jwt\n\nfrom main import db\nfrom main.config import config\n\n\ndef execute_sql_from_file(filename):\n # Open and read the file as a single buffer\n fd = open(filename, 'r')\n sql_file = fd.read()\n fd.close()\n\n # All SQL commands (split on ';')\n sql_commands = sql_file.split(';')\n\n # Execute every command from the input file\n for command in sql_commands:\n # This will skip and report validation\n # For example, if the tables do not yet exist, this will skip over\n # the DROP TABLE commands\n try:\n db.session.execute(command.decode('utf-8'))\n except Exception, e:\n logging.exception(e)\n\n\ndef create_mock_data():\n execute_sql_from_file('./sql/test.sql')\n\n\ndef drop_tables():\n execute_sql_from_file('./sql/drop_tables.sql')\n\n\ndef create_headers(access_token=None):\n headers = {\n 'Content-Type': 'application/json'\n }\n\n if access_token:\n headers.update({\n 'Authorization': 'Bearer {}'.format(access_token)\n })\n\n return headers\n\n\ndef json_response(response):\n return json.loads(response.data.decode('utf-8'))\n\n\ndef generate_access_token(user_id, is_expired=False):\n \"\"\"\n Generate JWT Token for test authentication.\n\n :param user_id: User ID\n :param is_expired: To generate expired tokens\n :return: JWT Token string\n \"\"\"\n\n iat = datetime.datetime.utcnow()\n\n return jwt.encode({\n 'sub': user_id, # Subject of this token\n 'iat': iat, # Issued at\n 'exp': iat + datetime.timedelta(hours=1) # Expired at\n if not is_expired\n else iat - datetime.timedelta(minutes=5)\n }, config.SECRET_KEY)\n\n\ndef random_string(string_length=10):\n \"\"\"Generate a random string of fixed length\"\"\"\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for _ in range(string_length))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#
# @lc app=leetcode.cn id=909 lang=python3
#
# [909] 蛇梯棋
#
# @lc code=start
from typing import List
class Solution:
def snakesAndLadders(self, board: List[List[int]]) -> int:
N = len(board)
def get_pos(num):
r = (num-1) // N
c = (num-1) % N
c = c if ((r+1) & 1) else (N-1 - c)
r = N-1 - r
return r, c
# r, c = get_pos(20)
# print(r, c)
def skip(num):
r, c = get_pos(num)
if board[r][c] != -1:
return board[r][c]
else:
return num
from collections import deque
dq = deque([1])
vis = set([1])
step = -1
while dq:
sz = len(dq)
step += 1
for _ in range(sz):
node = dq.popleft()
if (node == N*N):
return step
for i in range(1, 7):
new_node = node + i
if (new_node > N*N):
continue
new_node = skip(new_node)
if (new_node not in vis):
dq.append(new_node)
vis.add(new_node)
return -1
""" 21-06-27 每日一题打卡BFS
Accepted
211/211 cases passed (100 ms)
Your runtime beats 99.08 % of python3 submissions
Your memory usage beats 14.68 % of python3 submissions (15.1 MB)
"""
# board = [[-1,-1,-1,-1,-1,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,35,-1,-1,13,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,15,-1,-1,-1,-1]]
# s = Solution().snakesAndLadders(board)
# print(s)
# @lc code=end
|
normal
|
{
"blob_id": "da5a366d1cc4f192a220dc38c7a74aeb3fba7cdb",
"index": 9839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def snakesAndLadders(self, board: List[List[int]]) ->int:\n N = len(board)\n\n def get_pos(num):\n r = (num - 1) // N\n c = (num - 1) % N\n c = c if r + 1 & 1 else N - 1 - c\n r = N - 1 - r\n return r, c\n\n def skip(num):\n r, c = get_pos(num)\n if board[r][c] != -1:\n return board[r][c]\n else:\n return num\n from collections import deque\n dq = deque([1])\n vis = set([1])\n step = -1\n while dq:\n sz = len(dq)\n step += 1\n for _ in range(sz):\n node = dq.popleft()\n if node == N * N:\n return step\n for i in range(1, 7):\n new_node = node + i\n if new_node > N * N:\n continue\n new_node = skip(new_node)\n if new_node not in vis:\n dq.append(new_node)\n vis.add(new_node)\n return -1\n\n\n<mask token>\n",
"step-4": "from typing import List\n\n\nclass Solution:\n\n def snakesAndLadders(self, board: List[List[int]]) ->int:\n N = len(board)\n\n def get_pos(num):\n r = (num - 1) // N\n c = (num - 1) % N\n c = c if r + 1 & 1 else N - 1 - c\n r = N - 1 - r\n return r, c\n\n def skip(num):\n r, c = get_pos(num)\n if board[r][c] != -1:\n return board[r][c]\n else:\n return num\n from collections import deque\n dq = deque([1])\n vis = set([1])\n step = -1\n while dq:\n sz = len(dq)\n step += 1\n for _ in range(sz):\n node = dq.popleft()\n if node == N * N:\n return step\n for i in range(1, 7):\n new_node = node + i\n if new_node > N * N:\n continue\n new_node = skip(new_node)\n if new_node not in vis:\n dq.append(new_node)\n vis.add(new_node)\n return -1\n\n\n<mask token>\n",
"step-5": "#\n# @lc app=leetcode.cn id=909 lang=python3\n#\n# [909] 蛇梯棋\n#\n\n# @lc code=start\nfrom typing import List\nclass Solution:\n def snakesAndLadders(self, board: List[List[int]]) -> int:\n N = len(board)\n def get_pos(num):\n r = (num-1) // N\n c = (num-1) % N\n c = c if ((r+1) & 1) else (N-1 - c) \n r = N-1 - r\n return r, c\n # r, c = get_pos(20)\n # print(r, c)\n def skip(num):\n r, c = get_pos(num)\n if board[r][c] != -1:\n return board[r][c]\n else:\n return num\n from collections import deque\n dq = deque([1])\n vis = set([1])\n step = -1\n while dq:\n sz = len(dq)\n step += 1\n for _ in range(sz):\n node = dq.popleft()\n if (node == N*N):\n return step\n for i in range(1, 7):\n new_node = node + i\n if (new_node > N*N):\n continue\n new_node = skip(new_node)\n if (new_node not in vis):\n dq.append(new_node)\n vis.add(new_node)\n\n return -1\n\n\"\"\" 21-06-27 每日一题打卡BFS\nAccepted\n211/211 cases passed (100 ms)\nYour runtime beats 99.08 % of python3 submissions\nYour memory usage beats 14.68 % of python3 submissions (15.1 MB)\n\"\"\"\n\n# board = [[-1,-1,-1,-1,-1,-1],\n# [-1,-1,-1,-1,-1,-1],\n# [-1,-1,-1,-1,-1,-1],\n# [-1,35,-1,-1,13,-1],\n# [-1,-1,-1,-1,-1,-1],\n# [-1,15,-1,-1,-1,-1]]\n\n# s = Solution().snakesAndLadders(board)\n# print(s)\n\n# @lc code=end\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.figure()
plt.xlabel('Time (ms)', fontsize=30)
plt.ylabel('Capture rate (%)', fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.xlim(x_lower_limit, x_upper_limit)
plt.ylim(y_lower_limit, y_upper_limit)
plt.plot(show_time, show_eff, 'b-', markeredgecolor='b', linewidth=5)
plt.savefig('eff-vs-time-proton.eps', format='eps', dpi=1000, bbox_inches=
'tight')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data = np.loadtxt('eff-proton.dat')
show_time = data[0]
show_eff = data[1]
x_lower_limit = 0.0
x_upper_limit = para.T_nu * 1000
y_lower_limit = min(show_eff) - abs(max(show_eff) - min(show_eff))
y_upper_limit = max(show_eff)
plt.figure()
plt.xlabel('Time (ms)', fontsize=30)
plt.ylabel('Capture rate (%)', fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.xlim(x_lower_limit, x_upper_limit)
plt.ylim(y_lower_limit, y_upper_limit)
plt.plot(show_time, show_eff, 'b-', markeredgecolor='b', linewidth=5)
plt.savefig('eff-vs-time-proton.eps', format='eps', dpi=1000, bbox_inches=
'tight')
plt.show()
<|reserved_special_token_1|>
from math import *
import numpy as np
import matplotlib.pyplot as plt
import Input as para
data = np.loadtxt('eff-proton.dat')
show_time = data[0]
show_eff = data[1]
x_lower_limit = 0.0
x_upper_limit = para.T_nu * 1000
y_lower_limit = min(show_eff) - abs(max(show_eff) - min(show_eff))
y_upper_limit = max(show_eff)
plt.figure()
plt.xlabel('Time (ms)', fontsize=30)
plt.ylabel('Capture rate (%)', fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.xlim(x_lower_limit, x_upper_limit)
plt.ylim(y_lower_limit, y_upper_limit)
plt.plot(show_time, show_eff, 'b-', markeredgecolor='b', linewidth=5)
plt.savefig('eff-vs-time-proton.eps', format='eps', dpi=1000, bbox_inches=
'tight')
plt.show()
<|reserved_special_token_1|>
#!/usr/bin/env python
from math import *
import numpy as np
import matplotlib.pyplot as plt
import Input as para
data = np.loadtxt("eff-proton.dat")
#data = np.loadtxt("eff-electron.dat")
show_time = data[0]
show_eff = data[1]
#print show_turn, show_eff
#x_lower_limit = min(show_time)
#x_upper_limit = max(show_time)
x_lower_limit = 0.0
x_upper_limit = para.T_nu*1000
y_lower_limit = min(show_eff)-abs(max(show_eff)-min(show_eff))
y_upper_limit = max(show_eff)
plt.figure()
plt.xlabel('Time (ms)', fontsize=30)
plt.ylabel('Capture rate (%)', fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.xlim(x_lower_limit, x_upper_limit)
plt.ylim(y_lower_limit, y_upper_limit)
plt.plot(show_time, show_eff, 'b-', markeredgecolor = 'b', linewidth=5)
plt.savefig('eff-vs-time-proton.eps', format='eps', dpi=1000, bbox_inches='tight')
#plt.savefig('eff-vs-time-electron.eps', format='eps', dpi=1000, bbox_inches='tight')
plt.show()
|
flexible
|
{
"blob_id": "bee96e817dd4d9462c1e3f8eb525c22c2117140a",
"index": 9942,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.figure()\nplt.xlabel('Time (ms)', fontsize=30)\nplt.ylabel('Capture rate (%)', fontsize=30)\nplt.xticks(fontsize=25)\nplt.yticks(fontsize=25)\nplt.xlim(x_lower_limit, x_upper_limit)\nplt.ylim(y_lower_limit, y_upper_limit)\nplt.plot(show_time, show_eff, 'b-', markeredgecolor='b', linewidth=5)\nplt.savefig('eff-vs-time-proton.eps', format='eps', dpi=1000, bbox_inches=\n 'tight')\nplt.show()\n",
"step-3": "<mask token>\ndata = np.loadtxt('eff-proton.dat')\nshow_time = data[0]\nshow_eff = data[1]\nx_lower_limit = 0.0\nx_upper_limit = para.T_nu * 1000\ny_lower_limit = min(show_eff) - abs(max(show_eff) - min(show_eff))\ny_upper_limit = max(show_eff)\nplt.figure()\nplt.xlabel('Time (ms)', fontsize=30)\nplt.ylabel('Capture rate (%)', fontsize=30)\nplt.xticks(fontsize=25)\nplt.yticks(fontsize=25)\nplt.xlim(x_lower_limit, x_upper_limit)\nplt.ylim(y_lower_limit, y_upper_limit)\nplt.plot(show_time, show_eff, 'b-', markeredgecolor='b', linewidth=5)\nplt.savefig('eff-vs-time-proton.eps', format='eps', dpi=1000, bbox_inches=\n 'tight')\nplt.show()\n",
"step-4": "from math import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport Input as para\ndata = np.loadtxt('eff-proton.dat')\nshow_time = data[0]\nshow_eff = data[1]\nx_lower_limit = 0.0\nx_upper_limit = para.T_nu * 1000\ny_lower_limit = min(show_eff) - abs(max(show_eff) - min(show_eff))\ny_upper_limit = max(show_eff)\nplt.figure()\nplt.xlabel('Time (ms)', fontsize=30)\nplt.ylabel('Capture rate (%)', fontsize=30)\nplt.xticks(fontsize=25)\nplt.yticks(fontsize=25)\nplt.xlim(x_lower_limit, x_upper_limit)\nplt.ylim(y_lower_limit, y_upper_limit)\nplt.plot(show_time, show_eff, 'b-', markeredgecolor='b', linewidth=5)\nplt.savefig('eff-vs-time-proton.eps', format='eps', dpi=1000, bbox_inches=\n 'tight')\nplt.show()\n",
"step-5": "#!/usr/bin/env python\nfrom math import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport Input as para\n\ndata = np.loadtxt(\"eff-proton.dat\")\n#data = np.loadtxt(\"eff-electron.dat\")\n\nshow_time = data[0]\nshow_eff = data[1]\n#print show_turn, show_eff\n\n#x_lower_limit = min(show_time)\n#x_upper_limit = max(show_time)\nx_lower_limit = 0.0\nx_upper_limit = para.T_nu*1000\n\ny_lower_limit = min(show_eff)-abs(max(show_eff)-min(show_eff))\ny_upper_limit = max(show_eff)\n\nplt.figure()\nplt.xlabel('Time (ms)', fontsize=30)\nplt.ylabel('Capture rate (%)', fontsize=30)\nplt.xticks(fontsize=25)\nplt.yticks(fontsize=25)\nplt.xlim(x_lower_limit, x_upper_limit)\nplt.ylim(y_lower_limit, y_upper_limit)\nplt.plot(show_time, show_eff, 'b-', markeredgecolor = 'b', linewidth=5)\n\nplt.savefig('eff-vs-time-proton.eps', format='eps', dpi=1000, bbox_inches='tight')\n#plt.savefig('eff-vs-time-electron.eps', format='eps', dpi=1000, bbox_inches='tight')\n\nplt.show()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ReportGenerator(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run(self, html, url):
print('[#] Running the report generator')
self.html = html
self.getting_flags_locations()
self.getting_related_flags(url)
<|reserved_special_token_0|>
def getting_related_flags(self, url):
banner = self.get_headers(url)
if banner:
csp_dict = banner[0]
headers = banner[1]
frame = self.raise_frame_option(csp_dict, headers)
protocol = self.raise_unsafe_protocol(csp_dict, headers)
trusted = self.raise_trusted_types(csp_dict)
print(frame)
print(protocol)
print(trusted)
print(csp_dict)
def get_headers(self, url):
req = requests.get(url)
try:
csp_header = req.headers['Content-Security-Policy']
csp_dict = self.extracting_csp_dict(csp_header)
return csp_dict, req.headers
except KeyError:
print('No CSP on this site')
@staticmethod
def extracting_csp_dict(header_list):
res = {}
header_list = header_list.split(';')
for i in enumerate(header_list):
header_list[i] = header_list[i].strip()
sources = header_list[i].split(' ')
res[sources[0]] = sources[1:]
return res
def generating_csp_flags(self, csp_dict):
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def lower_case_in(elem, dic):
return elem.lower() in [x.lower() for x in dic.keys()]
def pretty_print_report(self):
print('*******************************************')
print('*********** REPORT FOR THE PAGE ***********')
print('*******************************************')
if self.flags:
for flag in self.flags:
print('---------------------------------------------')
print('>>> FLAGS RAISED <<<')
print('>>> At location : ', flag.location)
print('>>> Type : ', flag.id)
print('>>> Explanation : ', flag.reco_dict[flag.id][
'explanation'])
if flag.content != '':
print('>>> Content : ', flag.content)
else:
print('>>> Content : one liner tag')
print('---------------------------------------------')
print('*******************************************')
else:
print('No flags have been raised for that specific page')
print('*******************************************')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ReportGenerator(object):
<|reserved_special_token_0|>
def __init__(self):
self.flags = list()
self.related_flags = list()
self.line_finder = LineFinder(self.flags)
self.html = ''
def run(self, html, url):
print('[#] Running the report generator')
self.html = html
self.getting_flags_locations()
self.getting_related_flags(url)
<|reserved_special_token_0|>
def getting_related_flags(self, url):
banner = self.get_headers(url)
if banner:
csp_dict = banner[0]
headers = banner[1]
frame = self.raise_frame_option(csp_dict, headers)
protocol = self.raise_unsafe_protocol(csp_dict, headers)
trusted = self.raise_trusted_types(csp_dict)
print(frame)
print(protocol)
print(trusted)
print(csp_dict)
def get_headers(self, url):
req = requests.get(url)
try:
csp_header = req.headers['Content-Security-Policy']
csp_dict = self.extracting_csp_dict(csp_header)
return csp_dict, req.headers
except KeyError:
print('No CSP on this site')
@staticmethod
def extracting_csp_dict(header_list):
res = {}
header_list = header_list.split(';')
for i in enumerate(header_list):
header_list[i] = header_list[i].strip()
sources = header_list[i].split(' ')
res[sources[0]] = sources[1:]
return res
def generating_csp_flags(self, csp_dict):
pass
def raise_unsafe_protocol(self, csp_dict, url):
if 'block-all-mixed-content' not in csp_dict.keys() and urlparse(url
).scheme == 'https':
for directive in csp_dict:
for source in csp_dict[directive][1:]:
if source == 'http':
return Flag('possible_mixed_content')
elif not self.lower_case_in('upgrade-insecure-requests', csp_dict):
return Flag('no_upgrade_insecure_requests')
return None
def raise_frame_option(self, csp_dict, header):
try:
if csp_dict['frame-ancestor'].lower() not in ['none', 'self']:
flag_id = 'permissive_frame_rule'
return Flag(flag_id)
except KeyError:
pass
if not self.lower_case_in('X-Frame-Options', csp_dict):
flag_id = 'no_frame_rule'
elif header['X-Frame-Options'].lower().startswith('allowall'):
flag_id = 'permissive_frame_rule'
elif header['X-Frame-Options'].lower().startswith('allow-from'):
flag_id = 'permissive_frame_rule'
else:
flag_id = 'missing_frame_ancestors'
return Flag(flag_id)
def raise_trusted_types(self, csp_dict):
if not self.lower_case_in('trusted_types', csp_dict):
return Flag('no_trusted_types')
return None
def raise_missing_object(self, csp_dict):
if not self.lower_case_in('object-src', csp_dict) and csp_dict[
'default-src'] != 'none':
return Flag('missing_obj_src')
return None
@staticmethod
def lower_case_in(elem, dic):
return elem.lower() in [x.lower() for x in dic.keys()]
def pretty_print_report(self):
print('*******************************************')
print('*********** REPORT FOR THE PAGE ***********')
print('*******************************************')
if self.flags:
for flag in self.flags:
print('---------------------------------------------')
print('>>> FLAGS RAISED <<<')
print('>>> At location : ', flag.location)
print('>>> Type : ', flag.id)
print('>>> Explanation : ', flag.reco_dict[flag.id][
'explanation'])
if flag.content != '':
print('>>> Content : ', flag.content)
else:
print('>>> Content : one liner tag')
print('---------------------------------------------')
print('*******************************************')
else:
print('No flags have been raised for that specific page')
print('*******************************************')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ReportGenerator(object):
<|reserved_special_token_0|>
def __init__(self):
self.flags = list()
self.related_flags = list()
self.line_finder = LineFinder(self.flags)
self.html = ''
def run(self, html, url):
print('[#] Running the report generator')
self.html = html
self.getting_flags_locations()
self.getting_related_flags(url)
def getting_flags_locations(self):
"""
Locates the flags in the resource
Calls the LineFinder class in order
:return: None
"""
print(self.flags)
self.line_finder.find_line(self.html)
def getting_related_flags(self, url):
banner = self.get_headers(url)
if banner:
csp_dict = banner[0]
headers = banner[1]
frame = self.raise_frame_option(csp_dict, headers)
protocol = self.raise_unsafe_protocol(csp_dict, headers)
trusted = self.raise_trusted_types(csp_dict)
print(frame)
print(protocol)
print(trusted)
print(csp_dict)
def get_headers(self, url):
req = requests.get(url)
try:
csp_header = req.headers['Content-Security-Policy']
csp_dict = self.extracting_csp_dict(csp_header)
return csp_dict, req.headers
except KeyError:
print('No CSP on this site')
@staticmethod
def extracting_csp_dict(header_list):
res = {}
header_list = header_list.split(';')
for i in enumerate(header_list):
header_list[i] = header_list[i].strip()
sources = header_list[i].split(' ')
res[sources[0]] = sources[1:]
return res
def generating_csp_flags(self, csp_dict):
pass
def raise_unsafe_protocol(self, csp_dict, url):
if 'block-all-mixed-content' not in csp_dict.keys() and urlparse(url
).scheme == 'https':
for directive in csp_dict:
for source in csp_dict[directive][1:]:
if source == 'http':
return Flag('possible_mixed_content')
elif not self.lower_case_in('upgrade-insecure-requests', csp_dict):
return Flag('no_upgrade_insecure_requests')
return None
def raise_frame_option(self, csp_dict, header):
try:
if csp_dict['frame-ancestor'].lower() not in ['none', 'self']:
flag_id = 'permissive_frame_rule'
return Flag(flag_id)
except KeyError:
pass
if not self.lower_case_in('X-Frame-Options', csp_dict):
flag_id = 'no_frame_rule'
elif header['X-Frame-Options'].lower().startswith('allowall'):
flag_id = 'permissive_frame_rule'
elif header['X-Frame-Options'].lower().startswith('allow-from'):
flag_id = 'permissive_frame_rule'
else:
flag_id = 'missing_frame_ancestors'
return Flag(flag_id)
def raise_trusted_types(self, csp_dict):
if not self.lower_case_in('trusted_types', csp_dict):
return Flag('no_trusted_types')
return None
def raise_missing_object(self, csp_dict):
if not self.lower_case_in('object-src', csp_dict) and csp_dict[
'default-src'] != 'none':
return Flag('missing_obj_src')
return None
@staticmethod
def lower_case_in(elem, dic):
return elem.lower() in [x.lower() for x in dic.keys()]
def pretty_print_report(self):
print('*******************************************')
print('*********** REPORT FOR THE PAGE ***********')
print('*******************************************')
if self.flags:
for flag in self.flags:
print('---------------------------------------------')
print('>>> FLAGS RAISED <<<')
print('>>> At location : ', flag.location)
print('>>> Type : ', flag.id)
print('>>> Explanation : ', flag.reco_dict[flag.id][
'explanation'])
if flag.content != '':
print('>>> Content : ', flag.content)
else:
print('>>> Content : one liner tag')
print('---------------------------------------------')
print('*******************************************')
else:
print('No flags have been raised for that specific page')
print('*******************************************')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ReportGenerator(object):
"""
Class designed to generate reports after CSP audition
The ReportGenerator class generated report based on a list of flags issued
by the sorter classes. Then it aggregates the data into an exploitable
format.
"""
def __init__(self):
self.flags = list()
self.related_flags = list()
self.line_finder = LineFinder(self.flags)
self.html = ''
def run(self, html, url):
print('[#] Running the report generator')
self.html = html
self.getting_flags_locations()
self.getting_related_flags(url)
def getting_flags_locations(self):
"""
Locates the flags in the resource
Calls the LineFinder class in order
:return: None
"""
print(self.flags)
self.line_finder.find_line(self.html)
def getting_related_flags(self, url):
banner = self.get_headers(url)
if banner:
csp_dict = banner[0]
headers = banner[1]
frame = self.raise_frame_option(csp_dict, headers)
protocol = self.raise_unsafe_protocol(csp_dict, headers)
trusted = self.raise_trusted_types(csp_dict)
print(frame)
print(protocol)
print(trusted)
print(csp_dict)
def get_headers(self, url):
req = requests.get(url)
try:
csp_header = req.headers['Content-Security-Policy']
csp_dict = self.extracting_csp_dict(csp_header)
return csp_dict, req.headers
except KeyError:
print('No CSP on this site')
@staticmethod
def extracting_csp_dict(header_list):
res = {}
header_list = header_list.split(';')
for i in enumerate(header_list):
header_list[i] = header_list[i].strip()
sources = header_list[i].split(' ')
res[sources[0]] = sources[1:]
return res
def generating_csp_flags(self, csp_dict):
pass
def raise_unsafe_protocol(self, csp_dict, url):
if 'block-all-mixed-content' not in csp_dict.keys() and urlparse(url
).scheme == 'https':
for directive in csp_dict:
for source in csp_dict[directive][1:]:
if source == 'http':
return Flag('possible_mixed_content')
elif not self.lower_case_in('upgrade-insecure-requests', csp_dict):
return Flag('no_upgrade_insecure_requests')
return None
def raise_frame_option(self, csp_dict, header):
try:
if csp_dict['frame-ancestor'].lower() not in ['none', 'self']:
flag_id = 'permissive_frame_rule'
return Flag(flag_id)
except KeyError:
pass
if not self.lower_case_in('X-Frame-Options', csp_dict):
flag_id = 'no_frame_rule'
elif header['X-Frame-Options'].lower().startswith('allowall'):
flag_id = 'permissive_frame_rule'
elif header['X-Frame-Options'].lower().startswith('allow-from'):
flag_id = 'permissive_frame_rule'
else:
flag_id = 'missing_frame_ancestors'
return Flag(flag_id)
def raise_trusted_types(self, csp_dict):
if not self.lower_case_in('trusted_types', csp_dict):
return Flag('no_trusted_types')
return None
def raise_missing_object(self, csp_dict):
if not self.lower_case_in('object-src', csp_dict) and csp_dict[
'default-src'] != 'none':
return Flag('missing_obj_src')
return None
@staticmethod
def lower_case_in(elem, dic):
return elem.lower() in [x.lower() for x in dic.keys()]
def pretty_print_report(self):
print('*******************************************')
print('*********** REPORT FOR THE PAGE ***********')
print('*******************************************')
if self.flags:
for flag in self.flags:
print('---------------------------------------------')
print('>>> FLAGS RAISED <<<')
print('>>> At location : ', flag.location)
print('>>> Type : ', flag.id)
print('>>> Explanation : ', flag.reco_dict[flag.id][
'explanation'])
if flag.content != '':
print('>>> Content : ', flag.content)
else:
print('>>> Content : one liner tag')
print('---------------------------------------------')
print('*******************************************')
else:
print('No flags have been raised for that specific page')
print('*******************************************')
<|reserved_special_token_1|>
""" Class implementing ReportGenerator """
from urllib.parse import urlparse
import requests
from src.classes.reporter.flag import Flag
from src.classes.reporter.line_finder import LineFinder
class ReportGenerator(object):
"""
Class designed to generate reports after CSP audition
The ReportGenerator class generated report based on a list of flags issued
by the sorter classes. Then it aggregates the data into an exploitable
format.
"""
def __init__(self):
# List of flags issued by the sorters
self.flags = list()
# List of related flags
self.related_flags = list()
# Initiating line parser
self.line_finder = LineFinder(self.flags)
# Initiating html to empty styrin
self.html = ''
def run(self, html, url):
# Calling the run method to generate the report
print('[#] Running the report generator')
# Setting the html page to inspect
self.html = html
# Getting the flag location
self.getting_flags_locations()
# Generating the related flags
self.getting_related_flags(url)
def getting_flags_locations(self):
"""
Locates the flags in the resource
Calls the LineFinder class in order
:return: None
"""
print(self.flags)
self.line_finder.find_line(self.html)
def getting_related_flags(self, url):
banner = self.get_headers(url)
if banner:
csp_dict = banner[0]
headers = banner[1]
frame = self.raise_frame_option(csp_dict, headers)
protocol = self.raise_unsafe_protocol(csp_dict, headers)
trusted = self.raise_trusted_types(csp_dict)
print(frame)
print(protocol)
print(trusted)
print(csp_dict)
def get_headers(self, url):
req = requests.get(url)
try:
csp_header = req.headers['Content-Security-Policy']
csp_dict = self.extracting_csp_dict(csp_header)
return csp_dict, req.headers
except KeyError:
print('No CSP on this site')
@staticmethod
def extracting_csp_dict(header_list):
res = {}
header_list = header_list.split(';')
for i in enumerate(header_list):
header_list[i] = header_list[i].strip()
sources = header_list[i].split(' ')
res[sources[0]] = sources[1:]
return res
def generating_csp_flags(self, csp_dict):
pass
def raise_unsafe_protocol(self, csp_dict, url):
if 'block-all-mixed-content' not in csp_dict.keys() and urlparse(url).scheme == 'https':
for directive in csp_dict:
for source in csp_dict[directive][1:]:
if source == 'http':
return Flag('possible_mixed_content')
elif not self.lower_case_in('upgrade-insecure-requests', csp_dict):
return Flag('no_upgrade_insecure_requests')
return None
def raise_frame_option(self, csp_dict, header):
try:
if csp_dict['frame-ancestor'].lower() not in ['none', 'self']:
flag_id = 'permissive_frame_rule'
return Flag(flag_id)
except KeyError:
pass
if not self.lower_case_in('X-Frame-Options', csp_dict):
flag_id = 'no_frame_rule'
elif header['X-Frame-Options'].lower().startswith('allowall'):
flag_id = 'permissive_frame_rule'
elif header['X-Frame-Options'].lower().startswith('allow-from'):
flag_id = 'permissive_frame_rule'
else:
flag_id = 'missing_frame_ancestors'
return Flag(flag_id)
def raise_trusted_types(self, csp_dict):
if not self.lower_case_in('trusted_types', csp_dict):
return Flag('no_trusted_types')
return None
def raise_missing_object(self, csp_dict):
if not self.lower_case_in('object-src', csp_dict) and \
csp_dict['default-src'] != 'none':
return Flag('missing_obj_src')
return None
@staticmethod
def lower_case_in(elem, dic):
return elem.lower() in [x.lower() for x in dic.keys()]
def pretty_print_report(self):
print('*******************************************')
print('*********** REPORT FOR THE PAGE ***********')
print('*******************************************')
if self.flags:
for flag in self.flags:
print('---------------------------------------------')
print('>>> FLAGS RAISED <<<')
print('>>> At location : ', flag.location)
print('>>> Type : ', flag.id)
print('>>> Explanation : ', flag.reco_dict[flag.id]['explanation'])
if flag.content != '':
print('>>> Content : ', flag.content)
else:
print('>>> Content : one liner tag')
print('---------------------------------------------')
print('*******************************************')
else:
print('No flags have been raised for that specific page')
print('*******************************************')
|
flexible
|
{
"blob_id": "2003060f7793de678b4a259ad9424cd5927a57f7",
"index": 3167,
"step-1": "<mask token>\n\n\nclass ReportGenerator(object):\n <mask token>\n <mask token>\n\n def run(self, html, url):\n print('[#] Running the report generator')\n self.html = html\n self.getting_flags_locations()\n self.getting_related_flags(url)\n <mask token>\n\n def getting_related_flags(self, url):\n banner = self.get_headers(url)\n if banner:\n csp_dict = banner[0]\n headers = banner[1]\n frame = self.raise_frame_option(csp_dict, headers)\n protocol = self.raise_unsafe_protocol(csp_dict, headers)\n trusted = self.raise_trusted_types(csp_dict)\n print(frame)\n print(protocol)\n print(trusted)\n print(csp_dict)\n\n def get_headers(self, url):\n req = requests.get(url)\n try:\n csp_header = req.headers['Content-Security-Policy']\n csp_dict = self.extracting_csp_dict(csp_header)\n return csp_dict, req.headers\n except KeyError:\n print('No CSP on this site')\n\n @staticmethod\n def extracting_csp_dict(header_list):\n res = {}\n header_list = header_list.split(';')\n for i in enumerate(header_list):\n header_list[i] = header_list[i].strip()\n sources = header_list[i].split(' ')\n res[sources[0]] = sources[1:]\n return res\n\n def generating_csp_flags(self, csp_dict):\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def lower_case_in(elem, dic):\n return elem.lower() in [x.lower() for x in dic.keys()]\n\n def pretty_print_report(self):\n print('*******************************************')\n print('*********** REPORT FOR THE PAGE ***********')\n print('*******************************************')\n if self.flags:\n for flag in self.flags:\n print('---------------------------------------------')\n print('>>> FLAGS RAISED <<<')\n print('>>> At location : ', flag.location)\n print('>>> Type : ', flag.id)\n print('>>> Explanation : ', flag.reco_dict[flag.id][\n 'explanation'])\n if flag.content != '':\n print('>>> Content : ', flag.content)\n else:\n print('>>> Content : one liner tag')\n print('---------------------------------------------')\n print('*******************************************')\n else:\n print('No flags have been raised for that specific page')\n print('*******************************************')\n",
"step-2": "<mask token>\n\n\nclass ReportGenerator(object):\n <mask token>\n\n def __init__(self):\n self.flags = list()\n self.related_flags = list()\n self.line_finder = LineFinder(self.flags)\n self.html = ''\n\n def run(self, html, url):\n print('[#] Running the report generator')\n self.html = html\n self.getting_flags_locations()\n self.getting_related_flags(url)\n <mask token>\n\n def getting_related_flags(self, url):\n banner = self.get_headers(url)\n if banner:\n csp_dict = banner[0]\n headers = banner[1]\n frame = self.raise_frame_option(csp_dict, headers)\n protocol = self.raise_unsafe_protocol(csp_dict, headers)\n trusted = self.raise_trusted_types(csp_dict)\n print(frame)\n print(protocol)\n print(trusted)\n print(csp_dict)\n\n def get_headers(self, url):\n req = requests.get(url)\n try:\n csp_header = req.headers['Content-Security-Policy']\n csp_dict = self.extracting_csp_dict(csp_header)\n return csp_dict, req.headers\n except KeyError:\n print('No CSP on this site')\n\n @staticmethod\n def extracting_csp_dict(header_list):\n res = {}\n header_list = header_list.split(';')\n for i in enumerate(header_list):\n header_list[i] = header_list[i].strip()\n sources = header_list[i].split(' ')\n res[sources[0]] = sources[1:]\n return res\n\n def generating_csp_flags(self, csp_dict):\n pass\n\n def raise_unsafe_protocol(self, csp_dict, url):\n if 'block-all-mixed-content' not in csp_dict.keys() and urlparse(url\n ).scheme == 'https':\n for directive in csp_dict:\n for source in csp_dict[directive][1:]:\n if source == 'http':\n return Flag('possible_mixed_content')\n elif not self.lower_case_in('upgrade-insecure-requests', csp_dict):\n return Flag('no_upgrade_insecure_requests')\n return None\n\n def raise_frame_option(self, csp_dict, header):\n try:\n if csp_dict['frame-ancestor'].lower() not in ['none', 'self']:\n flag_id = 'permissive_frame_rule'\n return Flag(flag_id)\n except KeyError:\n pass\n if not self.lower_case_in('X-Frame-Options', csp_dict):\n flag_id = 'no_frame_rule'\n elif header['X-Frame-Options'].lower().startswith('allowall'):\n flag_id = 'permissive_frame_rule'\n elif header['X-Frame-Options'].lower().startswith('allow-from'):\n flag_id = 'permissive_frame_rule'\n else:\n flag_id = 'missing_frame_ancestors'\n return Flag(flag_id)\n\n def raise_trusted_types(self, csp_dict):\n if not self.lower_case_in('trusted_types', csp_dict):\n return Flag('no_trusted_types')\n return None\n\n def raise_missing_object(self, csp_dict):\n if not self.lower_case_in('object-src', csp_dict) and csp_dict[\n 'default-src'] != 'none':\n return Flag('missing_obj_src')\n return None\n\n @staticmethod\n def lower_case_in(elem, dic):\n return elem.lower() in [x.lower() for x in dic.keys()]\n\n def pretty_print_report(self):\n print('*******************************************')\n print('*********** REPORT FOR THE PAGE ***********')\n print('*******************************************')\n if self.flags:\n for flag in self.flags:\n print('---------------------------------------------')\n print('>>> FLAGS RAISED <<<')\n print('>>> At location : ', flag.location)\n print('>>> Type : ', flag.id)\n print('>>> Explanation : ', flag.reco_dict[flag.id][\n 'explanation'])\n if flag.content != '':\n print('>>> Content : ', flag.content)\n else:\n print('>>> Content : one liner tag')\n print('---------------------------------------------')\n print('*******************************************')\n else:\n print('No flags have been raised for that specific page')\n print('*******************************************')\n",
"step-3": "<mask token>\n\n\nclass ReportGenerator(object):\n <mask token>\n\n def __init__(self):\n self.flags = list()\n self.related_flags = list()\n self.line_finder = LineFinder(self.flags)\n self.html = ''\n\n def run(self, html, url):\n print('[#] Running the report generator')\n self.html = html\n self.getting_flags_locations()\n self.getting_related_flags(url)\n\n def getting_flags_locations(self):\n \"\"\"\n Locates the flags in the resource\n\n Calls the LineFinder class in order\n\n :return: None\n \"\"\"\n print(self.flags)\n self.line_finder.find_line(self.html)\n\n def getting_related_flags(self, url):\n banner = self.get_headers(url)\n if banner:\n csp_dict = banner[0]\n headers = banner[1]\n frame = self.raise_frame_option(csp_dict, headers)\n protocol = self.raise_unsafe_protocol(csp_dict, headers)\n trusted = self.raise_trusted_types(csp_dict)\n print(frame)\n print(protocol)\n print(trusted)\n print(csp_dict)\n\n def get_headers(self, url):\n req = requests.get(url)\n try:\n csp_header = req.headers['Content-Security-Policy']\n csp_dict = self.extracting_csp_dict(csp_header)\n return csp_dict, req.headers\n except KeyError:\n print('No CSP on this site')\n\n @staticmethod\n def extracting_csp_dict(header_list):\n res = {}\n header_list = header_list.split(';')\n for i in enumerate(header_list):\n header_list[i] = header_list[i].strip()\n sources = header_list[i].split(' ')\n res[sources[0]] = sources[1:]\n return res\n\n def generating_csp_flags(self, csp_dict):\n pass\n\n def raise_unsafe_protocol(self, csp_dict, url):\n if 'block-all-mixed-content' not in csp_dict.keys() and urlparse(url\n ).scheme == 'https':\n for directive in csp_dict:\n for source in csp_dict[directive][1:]:\n if source == 'http':\n return Flag('possible_mixed_content')\n elif not self.lower_case_in('upgrade-insecure-requests', csp_dict):\n return Flag('no_upgrade_insecure_requests')\n return None\n\n def raise_frame_option(self, csp_dict, header):\n try:\n if csp_dict['frame-ancestor'].lower() not in ['none', 'self']:\n flag_id = 'permissive_frame_rule'\n return Flag(flag_id)\n except KeyError:\n pass\n if not self.lower_case_in('X-Frame-Options', csp_dict):\n flag_id = 'no_frame_rule'\n elif header['X-Frame-Options'].lower().startswith('allowall'):\n flag_id = 'permissive_frame_rule'\n elif header['X-Frame-Options'].lower().startswith('allow-from'):\n flag_id = 'permissive_frame_rule'\n else:\n flag_id = 'missing_frame_ancestors'\n return Flag(flag_id)\n\n def raise_trusted_types(self, csp_dict):\n if not self.lower_case_in('trusted_types', csp_dict):\n return Flag('no_trusted_types')\n return None\n\n def raise_missing_object(self, csp_dict):\n if not self.lower_case_in('object-src', csp_dict) and csp_dict[\n 'default-src'] != 'none':\n return Flag('missing_obj_src')\n return None\n\n @staticmethod\n def lower_case_in(elem, dic):\n return elem.lower() in [x.lower() for x in dic.keys()]\n\n def pretty_print_report(self):\n print('*******************************************')\n print('*********** REPORT FOR THE PAGE ***********')\n print('*******************************************')\n if self.flags:\n for flag in self.flags:\n print('---------------------------------------------')\n print('>>> FLAGS RAISED <<<')\n print('>>> At location : ', flag.location)\n print('>>> Type : ', flag.id)\n print('>>> Explanation : ', flag.reco_dict[flag.id][\n 'explanation'])\n if flag.content != '':\n print('>>> Content : ', flag.content)\n else:\n print('>>> Content : one liner tag')\n print('---------------------------------------------')\n print('*******************************************')\n else:\n print('No flags have been raised for that specific page')\n print('*******************************************')\n",
"step-4": "<mask token>\n\n\nclass ReportGenerator(object):\n \"\"\"\n Class designed to generate reports after CSP audition\n\n The ReportGenerator class generated report based on a list of flags issued\n by the sorter classes. Then it aggregates the data into an exploitable\n format.\n \"\"\"\n\n def __init__(self):\n self.flags = list()\n self.related_flags = list()\n self.line_finder = LineFinder(self.flags)\n self.html = ''\n\n def run(self, html, url):\n print('[#] Running the report generator')\n self.html = html\n self.getting_flags_locations()\n self.getting_related_flags(url)\n\n def getting_flags_locations(self):\n \"\"\"\n Locates the flags in the resource\n\n Calls the LineFinder class in order\n\n :return: None\n \"\"\"\n print(self.flags)\n self.line_finder.find_line(self.html)\n\n def getting_related_flags(self, url):\n banner = self.get_headers(url)\n if banner:\n csp_dict = banner[0]\n headers = banner[1]\n frame = self.raise_frame_option(csp_dict, headers)\n protocol = self.raise_unsafe_protocol(csp_dict, headers)\n trusted = self.raise_trusted_types(csp_dict)\n print(frame)\n print(protocol)\n print(trusted)\n print(csp_dict)\n\n def get_headers(self, url):\n req = requests.get(url)\n try:\n csp_header = req.headers['Content-Security-Policy']\n csp_dict = self.extracting_csp_dict(csp_header)\n return csp_dict, req.headers\n except KeyError:\n print('No CSP on this site')\n\n @staticmethod\n def extracting_csp_dict(header_list):\n res = {}\n header_list = header_list.split(';')\n for i in enumerate(header_list):\n header_list[i] = header_list[i].strip()\n sources = header_list[i].split(' ')\n res[sources[0]] = sources[1:]\n return res\n\n def generating_csp_flags(self, csp_dict):\n pass\n\n def raise_unsafe_protocol(self, csp_dict, url):\n if 'block-all-mixed-content' not in csp_dict.keys() and urlparse(url\n ).scheme == 'https':\n for directive in csp_dict:\n for source in csp_dict[directive][1:]:\n if source == 'http':\n return Flag('possible_mixed_content')\n elif not self.lower_case_in('upgrade-insecure-requests', csp_dict):\n return Flag('no_upgrade_insecure_requests')\n return None\n\n def raise_frame_option(self, csp_dict, header):\n try:\n if csp_dict['frame-ancestor'].lower() not in ['none', 'self']:\n flag_id = 'permissive_frame_rule'\n return Flag(flag_id)\n except KeyError:\n pass\n if not self.lower_case_in('X-Frame-Options', csp_dict):\n flag_id = 'no_frame_rule'\n elif header['X-Frame-Options'].lower().startswith('allowall'):\n flag_id = 'permissive_frame_rule'\n elif header['X-Frame-Options'].lower().startswith('allow-from'):\n flag_id = 'permissive_frame_rule'\n else:\n flag_id = 'missing_frame_ancestors'\n return Flag(flag_id)\n\n def raise_trusted_types(self, csp_dict):\n if not self.lower_case_in('trusted_types', csp_dict):\n return Flag('no_trusted_types')\n return None\n\n def raise_missing_object(self, csp_dict):\n if not self.lower_case_in('object-src', csp_dict) and csp_dict[\n 'default-src'] != 'none':\n return Flag('missing_obj_src')\n return None\n\n @staticmethod\n def lower_case_in(elem, dic):\n return elem.lower() in [x.lower() for x in dic.keys()]\n\n def pretty_print_report(self):\n print('*******************************************')\n print('*********** REPORT FOR THE PAGE ***********')\n print('*******************************************')\n if self.flags:\n for flag in self.flags:\n print('---------------------------------------------')\n print('>>> FLAGS RAISED <<<')\n print('>>> At location : ', flag.location)\n print('>>> Type : ', flag.id)\n print('>>> Explanation : ', flag.reco_dict[flag.id][\n 'explanation'])\n if flag.content != '':\n print('>>> Content : ', flag.content)\n else:\n print('>>> Content : one liner tag')\n print('---------------------------------------------')\n print('*******************************************')\n else:\n print('No flags have been raised for that specific page')\n print('*******************************************')\n",
"step-5": "\"\"\" Class implementing ReportGenerator \"\"\"\n\nfrom urllib.parse import urlparse\nimport requests\nfrom src.classes.reporter.flag import Flag\nfrom src.classes.reporter.line_finder import LineFinder\n\n\nclass ReportGenerator(object):\n \"\"\"\n Class designed to generate reports after CSP audition\n\n The ReportGenerator class generated report based on a list of flags issued\n by the sorter classes. Then it aggregates the data into an exploitable\n format.\n \"\"\"\n\n def __init__(self):\n # List of flags issued by the sorters\n self.flags = list()\n # List of related flags\n self.related_flags = list()\n # Initiating line parser\n self.line_finder = LineFinder(self.flags)\n # Initiating html to empty styrin\n self.html = ''\n\n def run(self, html, url):\n # Calling the run method to generate the report\n print('[#] Running the report generator')\n # Setting the html page to inspect\n self.html = html\n # Getting the flag location\n self.getting_flags_locations()\n # Generating the related flags\n self.getting_related_flags(url)\n\n def getting_flags_locations(self):\n \"\"\"\n Locates the flags in the resource\n\n Calls the LineFinder class in order\n\n :return: None\n \"\"\"\n print(self.flags)\n self.line_finder.find_line(self.html)\n\n def getting_related_flags(self, url):\n banner = self.get_headers(url)\n if banner:\n csp_dict = banner[0]\n headers = banner[1]\n\n frame = self.raise_frame_option(csp_dict, headers)\n protocol = self.raise_unsafe_protocol(csp_dict, headers)\n trusted = self.raise_trusted_types(csp_dict)\n\n print(frame)\n print(protocol)\n print(trusted)\n print(csp_dict)\n\n def get_headers(self, url):\n req = requests.get(url)\n try:\n csp_header = req.headers['Content-Security-Policy']\n csp_dict = self.extracting_csp_dict(csp_header)\n return csp_dict, req.headers\n except KeyError:\n print('No CSP on this site')\n\n @staticmethod\n def extracting_csp_dict(header_list):\n res = {}\n header_list = header_list.split(';')\n for i in enumerate(header_list):\n header_list[i] = header_list[i].strip()\n sources = header_list[i].split(' ')\n res[sources[0]] = sources[1:]\n return res\n\n def generating_csp_flags(self, csp_dict):\n pass\n\n def raise_unsafe_protocol(self, csp_dict, url):\n if 'block-all-mixed-content' not in csp_dict.keys() and urlparse(url).scheme == 'https':\n for directive in csp_dict:\n for source in csp_dict[directive][1:]:\n if source == 'http':\n return Flag('possible_mixed_content')\n elif not self.lower_case_in('upgrade-insecure-requests', csp_dict):\n return Flag('no_upgrade_insecure_requests')\n return None\n\n def raise_frame_option(self, csp_dict, header):\n try:\n if csp_dict['frame-ancestor'].lower() not in ['none', 'self']:\n flag_id = 'permissive_frame_rule'\n return Flag(flag_id)\n except KeyError:\n pass\n if not self.lower_case_in('X-Frame-Options', csp_dict):\n flag_id = 'no_frame_rule'\n elif header['X-Frame-Options'].lower().startswith('allowall'):\n flag_id = 'permissive_frame_rule'\n elif header['X-Frame-Options'].lower().startswith('allow-from'):\n flag_id = 'permissive_frame_rule'\n else:\n flag_id = 'missing_frame_ancestors'\n return Flag(flag_id)\n\n def raise_trusted_types(self, csp_dict):\n if not self.lower_case_in('trusted_types', csp_dict):\n return Flag('no_trusted_types')\n return None\n\n def raise_missing_object(self, csp_dict):\n if not self.lower_case_in('object-src', csp_dict) and \\\n csp_dict['default-src'] != 'none':\n return Flag('missing_obj_src')\n return None\n\n @staticmethod\n def lower_case_in(elem, dic):\n return elem.lower() in [x.lower() for x in dic.keys()]\n\n def pretty_print_report(self):\n print('*******************************************')\n print('*********** REPORT FOR THE PAGE ***********')\n print('*******************************************')\n if self.flags:\n for flag in self.flags:\n print('---------------------------------------------')\n print('>>> FLAGS RAISED <<<')\n print('>>> At location : ', flag.location)\n print('>>> Type : ', flag.id)\n print('>>> Explanation : ', flag.reco_dict[flag.id]['explanation'])\n if flag.content != '':\n print('>>> Content : ', flag.content)\n else:\n print('>>> Content : one liner tag')\n print('---------------------------------------------')\n print('*******************************************')\n else:\n print('No flags have been raised for that specific page')\n print('*******************************************')\n",
"step-ids": [
8,
13,
14,
15,
17
]
}
|
[
8,
13,
14,
15,
17
] |
from LinkedList import LinkedList
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
h1 = l1
v1 = 0
while h1:
v1 = v1 * 10 + h1.val
h1 = h1.next
h2 = l2
v2 = 0
while h2:
v2 = v2 * 10 + h2.val
h2 = h2.next
val = str(v1 + v2)
dummy = curr = ListNode(0)
for i in val:
curr.next = ListNode(int(i))
curr = curr.next
return dummy.next
l11, l22 = [7, 2, 4, 3], [5, 6, 4]
l1 = LinkedList(l11).getHead()
l2 = LinkedList(l22).getHead()
sl = Solution()
head = sl.addTwoNumbers(l1, l2)
LinkedList([1]).printLinkedList(head)
|
normal
|
{
"blob_id": "0f3ecd0a7189f57fdbda2360f6e39bd6101e2fdb",
"index": 7435,
"step-1": "<mask token>\n\n\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n h1 = l1\n v1 = 0\n while h1:\n v1 = v1 * 10 + h1.val\n h1 = h1.next\n h2 = l2\n v2 = 0\n while h2:\n v2 = v2 * 10 + h2.val\n h2 = h2.next\n val = str(v1 + v2)\n dummy = curr = ListNode(0)\n for i in val:\n curr.next = ListNode(int(i))\n curr = curr.next\n return dummy.next\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n h1 = l1\n v1 = 0\n while h1:\n v1 = v1 * 10 + h1.val\n h1 = h1.next\n h2 = l2\n v2 = 0\n while h2:\n v2 = v2 * 10 + h2.val\n h2 = h2.next\n val = str(v1 + v2)\n dummy = curr = ListNode(0)\n for i in val:\n curr.next = ListNode(int(i))\n curr = curr.next\n return dummy.next\n\n\n<mask token>\nLinkedList([1]).printLinkedList(head)\n",
"step-3": "<mask token>\n\n\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n h1 = l1\n v1 = 0\n while h1:\n v1 = v1 * 10 + h1.val\n h1 = h1.next\n h2 = l2\n v2 = 0\n while h2:\n v2 = v2 * 10 + h2.val\n h2 = h2.next\n val = str(v1 + v2)\n dummy = curr = ListNode(0)\n for i in val:\n curr.next = ListNode(int(i))\n curr = curr.next\n return dummy.next\n\n\nl11, l22 = [7, 2, 4, 3], [5, 6, 4]\nl1 = LinkedList(l11).getHead()\nl2 = LinkedList(l22).getHead()\nsl = Solution()\nhead = sl.addTwoNumbers(l1, l2)\nLinkedList([1]).printLinkedList(head)\n",
"step-4": "from LinkedList import LinkedList\n\n\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n h1 = l1\n v1 = 0\n while h1:\n v1 = v1 * 10 + h1.val\n h1 = h1.next\n h2 = l2\n v2 = 0\n while h2:\n v2 = v2 * 10 + h2.val\n h2 = h2.next\n val = str(v1 + v2)\n dummy = curr = ListNode(0)\n for i in val:\n curr.next = ListNode(int(i))\n curr = curr.next\n return dummy.next\n\n\nl11, l22 = [7, 2, 4, 3], [5, 6, 4]\nl1 = LinkedList(l11).getHead()\nl2 = LinkedList(l22).getHead()\nsl = Solution()\nhead = sl.addTwoNumbers(l1, l2)\nLinkedList([1]).printLinkedList(head)\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_admin():
"""
The function ``is_admin`` detects whether the calling process is running
with administrator/superuser privileges. It works cross-platform on
either Windows NT systems or Unix-based systems.
"""
if os.name == 'nt':
try:
os.listdir(os.sep.join([os.environ.get('SystemRoot',
'C:\\windows'), 'temp']))
except:
return False
else:
return True
elif 'SUDO_USER' in os.environ and os.geteuid() == 0:
return True
else:
return False
<|reserved_special_token_1|>
import os
def is_admin():
"""
The function ``is_admin`` detects whether the calling process is running
with administrator/superuser privileges. It works cross-platform on
either Windows NT systems or Unix-based systems.
"""
if os.name == 'nt':
try:
os.listdir(os.sep.join([os.environ.get('SystemRoot',
'C:\\windows'), 'temp']))
except:
return False
else:
return True
elif 'SUDO_USER' in os.environ and os.geteuid() == 0:
return True
else:
return False
<|reserved_special_token_1|>
import os
def is_admin():
"""
The function ``is_admin`` detects whether the calling process is running
with administrator/superuser privileges. It works cross-platform on
either Windows NT systems or Unix-based systems.
"""
if os.name == 'nt':
try:
# Only Windows users with admin privileges can read
# the C:\windows\temp directory.
os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\\windows'),'temp']))
except:
return False
else:
return True
else:
# Root has UID 0 on Unix systems.
if 'SUDO_USER' in os.environ and os.geteuid() == 0:
return True
else:
return False
|
flexible
|
{
"blob_id": "f1601d3d820b93631f9b1358627a5716016ad135",
"index": 5473,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_admin():\n \"\"\"\n The function ``is_admin`` detects whether the calling process is running\n with administrator/superuser privileges. It works cross-platform on \n either Windows NT systems or Unix-based systems.\n \"\"\"\n if os.name == 'nt':\n try:\n os.listdir(os.sep.join([os.environ.get('SystemRoot',\n 'C:\\\\windows'), 'temp']))\n except:\n return False\n else:\n return True\n elif 'SUDO_USER' in os.environ and os.geteuid() == 0:\n return True\n else:\n return False\n",
"step-3": "import os\n\n\ndef is_admin():\n \"\"\"\n The function ``is_admin`` detects whether the calling process is running\n with administrator/superuser privileges. It works cross-platform on \n either Windows NT systems or Unix-based systems.\n \"\"\"\n if os.name == 'nt':\n try:\n os.listdir(os.sep.join([os.environ.get('SystemRoot',\n 'C:\\\\windows'), 'temp']))\n except:\n return False\n else:\n return True\n elif 'SUDO_USER' in os.environ and os.geteuid() == 0:\n return True\n else:\n return False\n",
"step-4": "import os\n\n\ndef is_admin():\n \"\"\"\n The function ``is_admin`` detects whether the calling process is running\n with administrator/superuser privileges. It works cross-platform on \n either Windows NT systems or Unix-based systems.\n \"\"\"\n if os.name == 'nt':\n try:\n # Only Windows users with admin privileges can read \n # the C:\\windows\\temp directory.\n os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\\\\windows'),'temp']))\n except:\n return False\n else:\n return True\n else:\n # Root has UID 0 on Unix systems.\n if 'SUDO_USER' in os.environ and os.geteuid() == 0:\n return True\n else:\n return False\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_version():
return '2.01'
<|reserved_special_token_0|>
def restart():
print('restart')
try:
emby_wsocket.stop()
except:
sys.exit()
sys.exit()
print('fin restart')
def save_config(config_file, config):
with open(config_file, 'w') as fw:
json.dump(config, fw, indent=4)
fw.close
try:
emby_wsocket.ws_config = config
emby_wsocket.EmbySession.config = config
except:
emby_wsocket.ws_config = config
<|reserved_special_token_0|>
def cargar_config(config_file, tv_path, av_path, lang_path):
with open(config_file, 'r') as f:
config = json.load(f)
f.close
config['Version'] = get_version()
default = config.get('Autoscript', False)
config['Autoscript'] = default
default = config.get('enable_all_libraries', False)
config['enable_all_libraries'] = default
default = config.get('TV_model', '')
config['TV_model'] = default
default = config.get('TV_SOURCES', [])
config['TV_SOURCES'] = default
default = config.get('AV_model', '')
config['AV_model'] = default
default = config.get('AV_SOURCES', [])
config['AV_SOURCES'] = default
default = config.get('TV_script_init', '')
config['TV_script_init'] = default
default = config.get('TV_script_end', '')
config['TV_script_end'] = default
default = config.get('av_delay_hdmi', 0)
config['av_delay_hdmi'] = default
default = config.get('AV_Port', 23)
config['AV_Port'] = default
default = config.get('timeout_oppo_mount', 60)
config['timeout_oppo_mount'] = default
default = config.get('language', 'es-ES')
config['language'] = default
default = config.get('default_nfs', False)
config['default_nfs'] = default
default = config.get('wait_nfs', False)
config['wait_nfs'] = default
default = config.get('refresh_time', 5)
config['refresh_time'] = default
default = config.get('check_beta', False)
config['check_beta'] = default
default = config.get('smbtrick', False)
config['smbtrick'] = default
default = config.get('BRDisc', False)
config['BRDisc'] = default
edit_server = 0
server_list = config['servers']
for server in server_list:
default = server.get('Test_OK', False)
server_list[edit_server]['Test_OK'] = default
edit_server = edit_server + 1
if config['TV'] == 'True':
config['TV'] = True
if config['TV'] == 'False':
config['TV'] = False
if config['AV'] == 'True':
config['AV'] = True
if config['AV'] == 'False':
config['AV'] = False
config['servers'] = server_list
config['tv_dirs'] = get_dir_folders(tv_path)
config['av_dirs'] = get_dir_folders(av_path)
config['langs'] = get_dir_folders(lang_path)
return config
def check_version(config):
url = (
'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'
)
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
print(config['check_beta'])
if config['check_beta'] == True:
last_version = version['beta_version']
last_version_file = version['beta_version_file']
else:
last_version = version['curr_version']
last_version_file = version['curr_version_file']
xno_version = get_version()
resp = {}
resp['version'] = last_version
resp['file'] = last_version_file
print(xno_version)
print(last_version)
if xno_version < last_version:
resp['new_version'] = True
else:
resp['new_version'] = False
print(resp)
return resp
def update_version(config, vers_path, cwd):
url = (
'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'
)
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
if config['check_beta'] == True:
last_version = version['beta_version']
last_version_file = version['beta_version_file']
else:
last_version = version['curr_version']
last_version_file = version['curr_version_file']
url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +
last_version_file)
headers = {}
response2 = requests.get(url2, headers=headers)
filename = vers_path + last_version_file
with open(filename, 'wb') as f:
f.write(response2.content)
f.close()
shutil.unpack_archive(filename, cwd)
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
if config['TV'] == True and config['TV_model'] != '':
move_files(tv_path + config['TV_model'], lib_path)
if config['AV'] == True and config['AV_model'] != '':
move_files(av_path + config['AV_model'], lib_path)
resp = {}
resp['version'] = last_version
resp['file'] = last_version_file
resp['new_version'] = False
return resp
def cargar_lang(config_file):
with open(config_file.encode(sys.getfilesystemencoding()), 'r',
encoding='latin-1') as f:
config = json.load(f)
f.close
return config
def leer_file(web_file):
with open(web_file, 'r', encoding='utf8') as f:
num = f.read()
f.close
return num
def leer_img(web_file):
with open(web_file, 'rb') as f:
num = f.read()
f.close
return num
def test_path(config, server):
rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)
result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])
return result2
def get_mount_path(movie, server_data):
movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])
movie = movie.replace('\\\\', '\\')
movie = movie.replace('\\', '/')
word = '/'
inicio = movie.find(word)
inicio = inicio + 1
final = movie.find(word, inicio, len(movie))
servidor = movie[inicio:final]
ultimo = final + 1
result = final + 1
while result > 0:
ultimo = result + 1
result = movie.find(word, ultimo, len(movie))
fichero = movie[ultimo:len(movie)]
final = final + 1
ultimo = ultimo - 1
carpeta = movie[final:ultimo]
resultado = {}
resultado['Servidor'] = servidor
resultado['Carpeta'] = carpeta
resultado['Fichero'] = fichero
return resultado
def test_mount_path(config, servidor, carpeta):
sendnotifyremote(config['Oppo_IP'])
result = check_socket(config)
if result == 0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('EJT', config)
time.sleep(1)
response_data6b = getsetupmenu(config)
while response_data6f.find('devicelist":[]') > 0:
time.sleep(1)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('QPW', config)
device_list = json.loads(response_data6f)
if config['DebugLevel'] > 0:
print(device_list)
nfs = config['default_nfs']
for device in device_list['devicelist']:
if device['name'].upper() == servidor.upper():
if device['sub_type'] == 'nfs':
nfs = True
break
else:
nfs = False
break
if nfs:
response_login = LoginNFS(config, servidor)
else:
response_login = LoginSambaWithOutID(config, servidor)
if config['Always_ON'] == False:
time.sleep(5)
response_data6b = getsetupmenu(config)
if nfs:
response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',
config)
else:
response_mount = mountSharedFolder(servidor, carpeta, '', '',
config)
response = json.loads(response_mount)
if config['Autoscript'] == True:
result = umountSharedFolder(config)
if response['success'] == True:
a = 'OK'
else:
a = 'FAILURE'
return a
else:
print(
'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'
)
def test_emby(config):
try:
EmbySession = EmbyHttp(config)
user_info = EmbySession.user_info
if user_info['SessionInfo']['Id'] != '':
return 'OK'
else:
return 'FAILED'
except:
return 'FAILED'
<|reserved_special_token_0|>
def carga_libraries(config):
try:
EmbySession = EmbyHttp(config)
views_list = EmbySession.get_user_views(EmbySession.user_info[
'User']['Id'])
libraries = []
for view in views_list:
library = {}
library['Name'] = view['Name']
library['Id'] = view['Id']
library['Active'] = False
try:
lib_list = config['Libraries']
except:
lib_list = {}
for lib in lib_list:
if lib['Id'] == view['Id']:
library['Active'] = lib['Active']
libraries.append(library)
config['Libraries'] = libraries
return 0
except:
return 1
def is_library_active(config, libraryname):
for library in config['Libraries']:
if library['Name'] == libraryname:
return library['Active']
return False
def get_selectableFolders(config):
EmbySession = EmbyHttp(config)
MediaFolders = EmbySession.get_emby_selectablefolders()
servers = []
for Folder in MediaFolders:
index = 1
active = is_library_active(config, Folder['Name'])
if config['enable_all_libraries'] == True:
active = True
if active == True:
for SubFolder in Folder['SubFolders']:
server = {}
server['Id'] = SubFolder['Id']
if index > 1:
server['name'] = Folder['Name'] + '(' + str(index) + ')'
else:
server['name'] = Folder['Name']
server['Emby_Path'] = SubFolder['Path']
server['Oppo_Path'] = '/'
try:
serv_list = config['servers']
except:
serv_list = {}
for serv in serv_list:
if server['Emby_Path'] == serv['Emby_Path']:
server['name'] = serv['name']
server['Oppo_Path'] = serv['Oppo_Path']
server['Test_OK'] = serv['Test_OK']
servers.append(server)
index = index + 1
config['servers'] = servers
def get_dir_folders(directory):
os.chdir(directory)
dirs = os.listdir('.')
encontrado = False
list_dir = []
for x in dirs:
if os.path.isdir(x):
list_dir.append(x)
return list_dir
<|reserved_special_token_0|>
def get_devices(config):
try:
EmbySession = EmbyHttp(config)
devices = EmbySession.get_emby_devices()
index = 0
dev_temp = []
for device in devices['Items']:
try:
if device['Id'] != 'Xnoppo':
device['Name'] = device['Name'] + ' / ' + device['AppName']
device['Id'] = device['ReportedDeviceId']
dev_temp.append(device)
except:
pass
config['devices'] = dev_temp
return 'OK'
except:
return 'FAILURE'
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
resource_path = (cwd + separador + 'web' + separador + 'resources' +
separador)
html_path = cwd + separador + 'web' + separador
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/emby_conf.html':
i = leer_file(html_path + 'emby_conf.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/oppo_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'oppo_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/lib_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'lib_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/path_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'path_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/tv_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'tv_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/av_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'av_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/other_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'other_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/status.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'status.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/help.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'help.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/remote.html':
i = leer_file(html_path + 'remote.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/android-chrome-36x36.png':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'android-chrome-36x36.png')
self.wfile.write(bytes(i))
return 0
if self.path == '/av-receiver-icon-2.jpg':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'av-receiver-icon-2.jpg')
self.wfile.write(bytes(i))
return 0
if self.path == '/dragon.png':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'dragon.png')
self.wfile.write(bytes(i))
return 0
if self.path == '/xnoppo_config':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/xnoppo_config_lib':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
carga_libraries(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/xnoppo_config_dev':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
get_devices(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/check_version':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = check_version(config)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/update_version':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = update_version(config, vers_path, cwd)
restart()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/get_state':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = get_state()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/restart':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
a = 'Restarting'
self.wfile.write(bytes(a, 'utf-8'))
restart()
if self.path == '/refresh_paths':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
get_selectableFolders(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/lang':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = cargar_lang(lang_path + config['language'] + separador +
'lang.js')
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path.find('/send_key?') >= 0:
get_data = self.path
print(get_data)
a = len('/send_key?sendkey=')
b = get_data[a:len(get_data)]
print(b)
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
sendnotifyremote(config['Oppo_IP'])
result = check_socket(config)
if b == 'PON':
if result == 0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('EJT', config)
if config['BRDisc'] == True:
time.sleep(1)
response_data_on = sendremotekey('EJT', config)
time.sleep(1)
response_data6b = getsetupmenu(config)
else:
response_data_on = sendremotekey(b, config)
self.send_response(200)
self.send_header('Content-type', 'text')
self.end_headers()
a = 'ok'
self.wfile.write(bytes(a, 'utf-8'))
return 0
if self.path == '/log.txt':
self.send_response(200)
self.send_header('Content-type', 'text')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')
self.wfile.write(bytes(a))
return 0
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(
'<html><head><title>https://pythonbasics.org</title></head>',
'utf-8'))
self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))
self.wfile.write(bytes('<body>', 'utf-8'))
self.wfile.write(bytes('<p>This is an example web server.</p>',
'utf-8'))
self.wfile.write(bytes('</body></html>', 'utf-8'))
def do_POST(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
resource_path = (cwd + separador + 'web' + separador + 'resources' +
separador)
html_path = cwd + separador + 'web' + separador
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/save_config':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
if self.path == '/check_emby':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = test_emby(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
status = get_state()
if status['Playstate'] == 'Not_Connected':
save_config(cwd + separador + 'config.json', config)
emby_wsocket.ws_config = config
restart()
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/check_oppo':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = test_oppo(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/test_path':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
server = json.loads(post_data.decode('utf-8'))
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = test_path(config, server)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(server))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(server), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/navigate_path':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
path_obj = json.loads(post_data.decode('utf-8'))
path = path_obj['path']
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = navigate_folder(path, config)
a_json = json.dumps(a)
print(len(a_json))
self.send_response(200)
self.send_header('Content-Length', len(a_json))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/move_tv':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
move_files(tv_path + config['TV_model'], lib_path)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
restart()
return 0
if self.path == '/move_av':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
move_files(av_path + config['AV_model'], lib_path)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
restart()
return 0
if self.path == '/get_tv_key':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_tv_key(config)
if a == 'OK':
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_conn':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_test_conn(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/get_tv_sources':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_tv_sources(config)
if a == 'OK':
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/get_av_sources':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_hdmi_list(config)
if a != None:
config['AV_SOURCES'] = a
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_init':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_end':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_set_prev(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_on':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_check_power(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_off':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_power_off(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_hdmi':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_version():
return '2.01'
def thread_function(ws_object):
print('Thread: starting')
ws_object.start()
print('Thread: finishing')
def restart():
print('restart')
try:
emby_wsocket.stop()
except:
sys.exit()
sys.exit()
print('fin restart')
def save_config(config_file, config):
with open(config_file, 'w') as fw:
json.dump(config, fw, indent=4)
fw.close
try:
emby_wsocket.ws_config = config
emby_wsocket.EmbySession.config = config
except:
emby_wsocket.ws_config = config
def get_state():
status = {}
status['Version'] = get_version()
try:
status['Playstate'] = emby_wsocket.EmbySession.playstate
status['playedtitle'] = emby_wsocket.EmbySession.playedtitle
status['server'] = emby_wsocket.EmbySession.server
status['folder'] = emby_wsocket.EmbySession.folder
status['filename'] = emby_wsocket.EmbySession.filename
status['CurrentData'] = emby_wsocket.EmbySession.currentdata
except:
status['Playstate'] = 'Not_Connected'
status['playedtitle'] = ''
status['server'] = ''
status['folder'] = ''
status['filename'] = ''
status['CurrentData'] = ''
status['cpu_perc'] = psutil.cpu_percent()
status['mem_perc'] = psutil.virtual_memory().percent
print(psutil.virtual_memory().percent)
print(status)
return status
def cargar_config(config_file, tv_path, av_path, lang_path):
with open(config_file, 'r') as f:
config = json.load(f)
f.close
config['Version'] = get_version()
default = config.get('Autoscript', False)
config['Autoscript'] = default
default = config.get('enable_all_libraries', False)
config['enable_all_libraries'] = default
default = config.get('TV_model', '')
config['TV_model'] = default
default = config.get('TV_SOURCES', [])
config['TV_SOURCES'] = default
default = config.get('AV_model', '')
config['AV_model'] = default
default = config.get('AV_SOURCES', [])
config['AV_SOURCES'] = default
default = config.get('TV_script_init', '')
config['TV_script_init'] = default
default = config.get('TV_script_end', '')
config['TV_script_end'] = default
default = config.get('av_delay_hdmi', 0)
config['av_delay_hdmi'] = default
default = config.get('AV_Port', 23)
config['AV_Port'] = default
default = config.get('timeout_oppo_mount', 60)
config['timeout_oppo_mount'] = default
default = config.get('language', 'es-ES')
config['language'] = default
default = config.get('default_nfs', False)
config['default_nfs'] = default
default = config.get('wait_nfs', False)
config['wait_nfs'] = default
default = config.get('refresh_time', 5)
config['refresh_time'] = default
default = config.get('check_beta', False)
config['check_beta'] = default
default = config.get('smbtrick', False)
config['smbtrick'] = default
default = config.get('BRDisc', False)
config['BRDisc'] = default
edit_server = 0
server_list = config['servers']
for server in server_list:
default = server.get('Test_OK', False)
server_list[edit_server]['Test_OK'] = default
edit_server = edit_server + 1
if config['TV'] == 'True':
config['TV'] = True
if config['TV'] == 'False':
config['TV'] = False
if config['AV'] == 'True':
config['AV'] = True
if config['AV'] == 'False':
config['AV'] = False
config['servers'] = server_list
config['tv_dirs'] = get_dir_folders(tv_path)
config['av_dirs'] = get_dir_folders(av_path)
config['langs'] = get_dir_folders(lang_path)
return config
def check_version(config):
url = (
'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'
)
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
print(config['check_beta'])
if config['check_beta'] == True:
last_version = version['beta_version']
last_version_file = version['beta_version_file']
else:
last_version = version['curr_version']
last_version_file = version['curr_version_file']
xno_version = get_version()
resp = {}
resp['version'] = last_version
resp['file'] = last_version_file
print(xno_version)
print(last_version)
if xno_version < last_version:
resp['new_version'] = True
else:
resp['new_version'] = False
print(resp)
return resp
def update_version(config, vers_path, cwd):
url = (
'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'
)
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
if config['check_beta'] == True:
last_version = version['beta_version']
last_version_file = version['beta_version_file']
else:
last_version = version['curr_version']
last_version_file = version['curr_version_file']
url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +
last_version_file)
headers = {}
response2 = requests.get(url2, headers=headers)
filename = vers_path + last_version_file
with open(filename, 'wb') as f:
f.write(response2.content)
f.close()
shutil.unpack_archive(filename, cwd)
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
if config['TV'] == True and config['TV_model'] != '':
move_files(tv_path + config['TV_model'], lib_path)
if config['AV'] == True and config['AV_model'] != '':
move_files(av_path + config['AV_model'], lib_path)
resp = {}
resp['version'] = last_version
resp['file'] = last_version_file
resp['new_version'] = False
return resp
def cargar_lang(config_file):
with open(config_file.encode(sys.getfilesystemencoding()), 'r',
encoding='latin-1') as f:
config = json.load(f)
f.close
return config
def leer_file(web_file):
with open(web_file, 'r', encoding='utf8') as f:
num = f.read()
f.close
return num
def leer_img(web_file):
with open(web_file, 'rb') as f:
num = f.read()
f.close
return num
def test_path(config, server):
rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)
result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])
return result2
def get_mount_path(movie, server_data):
movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])
movie = movie.replace('\\\\', '\\')
movie = movie.replace('\\', '/')
word = '/'
inicio = movie.find(word)
inicio = inicio + 1
final = movie.find(word, inicio, len(movie))
servidor = movie[inicio:final]
ultimo = final + 1
result = final + 1
while result > 0:
ultimo = result + 1
result = movie.find(word, ultimo, len(movie))
fichero = movie[ultimo:len(movie)]
final = final + 1
ultimo = ultimo - 1
carpeta = movie[final:ultimo]
resultado = {}
resultado['Servidor'] = servidor
resultado['Carpeta'] = carpeta
resultado['Fichero'] = fichero
return resultado
def test_mount_path(config, servidor, carpeta):
sendnotifyremote(config['Oppo_IP'])
result = check_socket(config)
if result == 0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('EJT', config)
time.sleep(1)
response_data6b = getsetupmenu(config)
while response_data6f.find('devicelist":[]') > 0:
time.sleep(1)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('QPW', config)
device_list = json.loads(response_data6f)
if config['DebugLevel'] > 0:
print(device_list)
nfs = config['default_nfs']
for device in device_list['devicelist']:
if device['name'].upper() == servidor.upper():
if device['sub_type'] == 'nfs':
nfs = True
break
else:
nfs = False
break
if nfs:
response_login = LoginNFS(config, servidor)
else:
response_login = LoginSambaWithOutID(config, servidor)
if config['Always_ON'] == False:
time.sleep(5)
response_data6b = getsetupmenu(config)
if nfs:
response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',
config)
else:
response_mount = mountSharedFolder(servidor, carpeta, '', '',
config)
response = json.loads(response_mount)
if config['Autoscript'] == True:
result = umountSharedFolder(config)
if response['success'] == True:
a = 'OK'
else:
a = 'FAILURE'
return a
else:
print(
'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'
)
def test_emby(config):
try:
EmbySession = EmbyHttp(config)
user_info = EmbySession.user_info
if user_info['SessionInfo']['Id'] != '':
return 'OK'
else:
return 'FAILED'
except:
return 'FAILED'
def test_oppo(config):
result = check_socket(config)
if result == 0:
return 'OK'
else:
return 'FAILED'
def carga_libraries(config):
try:
EmbySession = EmbyHttp(config)
views_list = EmbySession.get_user_views(EmbySession.user_info[
'User']['Id'])
libraries = []
for view in views_list:
library = {}
library['Name'] = view['Name']
library['Id'] = view['Id']
library['Active'] = False
try:
lib_list = config['Libraries']
except:
lib_list = {}
for lib in lib_list:
if lib['Id'] == view['Id']:
library['Active'] = lib['Active']
libraries.append(library)
config['Libraries'] = libraries
return 0
except:
return 1
def is_library_active(config, libraryname):
for library in config['Libraries']:
if library['Name'] == libraryname:
return library['Active']
return False
def get_selectableFolders(config):
EmbySession = EmbyHttp(config)
MediaFolders = EmbySession.get_emby_selectablefolders()
servers = []
for Folder in MediaFolders:
index = 1
active = is_library_active(config, Folder['Name'])
if config['enable_all_libraries'] == True:
active = True
if active == True:
for SubFolder in Folder['SubFolders']:
server = {}
server['Id'] = SubFolder['Id']
if index > 1:
server['name'] = Folder['Name'] + '(' + str(index) + ')'
else:
server['name'] = Folder['Name']
server['Emby_Path'] = SubFolder['Path']
server['Oppo_Path'] = '/'
try:
serv_list = config['servers']
except:
serv_list = {}
for serv in serv_list:
if server['Emby_Path'] == serv['Emby_Path']:
server['name'] = serv['name']
server['Oppo_Path'] = serv['Oppo_Path']
server['Test_OK'] = serv['Test_OK']
servers.append(server)
index = index + 1
config['servers'] = servers
def get_dir_folders(directory):
os.chdir(directory)
dirs = os.listdir('.')
encontrado = False
list_dir = []
for x in dirs:
if os.path.isdir(x):
list_dir.append(x)
return list_dir
<|reserved_special_token_0|>
def get_devices(config):
try:
EmbySession = EmbyHttp(config)
devices = EmbySession.get_emby_devices()
index = 0
dev_temp = []
for device in devices['Items']:
try:
if device['Id'] != 'Xnoppo':
device['Name'] = device['Name'] + ' / ' + device['AppName']
device['Id'] = device['ReportedDeviceId']
dev_temp.append(device)
except:
pass
config['devices'] = dev_temp
return 'OK'
except:
return 'FAILURE'
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
resource_path = (cwd + separador + 'web' + separador + 'resources' +
separador)
html_path = cwd + separador + 'web' + separador
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/emby_conf.html':
i = leer_file(html_path + 'emby_conf.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/oppo_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'oppo_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/lib_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'lib_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/path_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'path_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/tv_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'tv_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/av_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'av_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/other_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'other_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/status.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'status.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/help.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'help.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/remote.html':
i = leer_file(html_path + 'remote.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/android-chrome-36x36.png':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'android-chrome-36x36.png')
self.wfile.write(bytes(i))
return 0
if self.path == '/av-receiver-icon-2.jpg':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'av-receiver-icon-2.jpg')
self.wfile.write(bytes(i))
return 0
if self.path == '/dragon.png':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'dragon.png')
self.wfile.write(bytes(i))
return 0
if self.path == '/xnoppo_config':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/xnoppo_config_lib':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
carga_libraries(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/xnoppo_config_dev':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
get_devices(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/check_version':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = check_version(config)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/update_version':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = update_version(config, vers_path, cwd)
restart()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/get_state':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = get_state()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/restart':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
a = 'Restarting'
self.wfile.write(bytes(a, 'utf-8'))
restart()
if self.path == '/refresh_paths':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
get_selectableFolders(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/lang':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = cargar_lang(lang_path + config['language'] + separador +
'lang.js')
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path.find('/send_key?') >= 0:
get_data = self.path
print(get_data)
a = len('/send_key?sendkey=')
b = get_data[a:len(get_data)]
print(b)
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
sendnotifyremote(config['Oppo_IP'])
result = check_socket(config)
if b == 'PON':
if result == 0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('EJT', config)
if config['BRDisc'] == True:
time.sleep(1)
response_data_on = sendremotekey('EJT', config)
time.sleep(1)
response_data6b = getsetupmenu(config)
else:
response_data_on = sendremotekey(b, config)
self.send_response(200)
self.send_header('Content-type', 'text')
self.end_headers()
a = 'ok'
self.wfile.write(bytes(a, 'utf-8'))
return 0
if self.path == '/log.txt':
self.send_response(200)
self.send_header('Content-type', 'text')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')
self.wfile.write(bytes(a))
return 0
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(
'<html><head><title>https://pythonbasics.org</title></head>',
'utf-8'))
self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))
self.wfile.write(bytes('<body>', 'utf-8'))
self.wfile.write(bytes('<p>This is an example web server.</p>',
'utf-8'))
self.wfile.write(bytes('</body></html>', 'utf-8'))
def do_POST(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
resource_path = (cwd + separador + 'web' + separador + 'resources' +
separador)
html_path = cwd + separador + 'web' + separador
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/save_config':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
if self.path == '/check_emby':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = test_emby(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
status = get_state()
if status['Playstate'] == 'Not_Connected':
save_config(cwd + separador + 'config.json', config)
emby_wsocket.ws_config = config
restart()
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/check_oppo':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = test_oppo(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/test_path':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
server = json.loads(post_data.decode('utf-8'))
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = test_path(config, server)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(server))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(server), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/navigate_path':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
path_obj = json.loads(post_data.decode('utf-8'))
path = path_obj['path']
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = navigate_folder(path, config)
a_json = json.dumps(a)
print(len(a_json))
self.send_response(200)
self.send_header('Content-Length', len(a_json))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/move_tv':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
move_files(tv_path + config['TV_model'], lib_path)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
restart()
return 0
if self.path == '/move_av':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
move_files(av_path + config['AV_model'], lib_path)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
restart()
return 0
if self.path == '/get_tv_key':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_tv_key(config)
if a == 'OK':
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_conn':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_test_conn(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/get_tv_sources':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_tv_sources(config)
if a == 'OK':
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/get_av_sources':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_hdmi_list(config)
if a != None:
config['AV_SOURCES'] = a
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_init':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_end':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_set_prev(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_on':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_check_power(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_off':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_power_off(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_hdmi':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_version():
return '2.01'
def thread_function(ws_object):
print('Thread: starting')
ws_object.start()
print('Thread: finishing')
def restart():
print('restart')
try:
emby_wsocket.stop()
except:
sys.exit()
sys.exit()
print('fin restart')
def save_config(config_file, config):
with open(config_file, 'w') as fw:
json.dump(config, fw, indent=4)
fw.close
try:
emby_wsocket.ws_config = config
emby_wsocket.EmbySession.config = config
except:
emby_wsocket.ws_config = config
def get_state():
status = {}
status['Version'] = get_version()
try:
status['Playstate'] = emby_wsocket.EmbySession.playstate
status['playedtitle'] = emby_wsocket.EmbySession.playedtitle
status['server'] = emby_wsocket.EmbySession.server
status['folder'] = emby_wsocket.EmbySession.folder
status['filename'] = emby_wsocket.EmbySession.filename
status['CurrentData'] = emby_wsocket.EmbySession.currentdata
except:
status['Playstate'] = 'Not_Connected'
status['playedtitle'] = ''
status['server'] = ''
status['folder'] = ''
status['filename'] = ''
status['CurrentData'] = ''
status['cpu_perc'] = psutil.cpu_percent()
status['mem_perc'] = psutil.virtual_memory().percent
print(psutil.virtual_memory().percent)
print(status)
return status
def cargar_config(config_file, tv_path, av_path, lang_path):
with open(config_file, 'r') as f:
config = json.load(f)
f.close
config['Version'] = get_version()
default = config.get('Autoscript', False)
config['Autoscript'] = default
default = config.get('enable_all_libraries', False)
config['enable_all_libraries'] = default
default = config.get('TV_model', '')
config['TV_model'] = default
default = config.get('TV_SOURCES', [])
config['TV_SOURCES'] = default
default = config.get('AV_model', '')
config['AV_model'] = default
default = config.get('AV_SOURCES', [])
config['AV_SOURCES'] = default
default = config.get('TV_script_init', '')
config['TV_script_init'] = default
default = config.get('TV_script_end', '')
config['TV_script_end'] = default
default = config.get('av_delay_hdmi', 0)
config['av_delay_hdmi'] = default
default = config.get('AV_Port', 23)
config['AV_Port'] = default
default = config.get('timeout_oppo_mount', 60)
config['timeout_oppo_mount'] = default
default = config.get('language', 'es-ES')
config['language'] = default
default = config.get('default_nfs', False)
config['default_nfs'] = default
default = config.get('wait_nfs', False)
config['wait_nfs'] = default
default = config.get('refresh_time', 5)
config['refresh_time'] = default
default = config.get('check_beta', False)
config['check_beta'] = default
default = config.get('smbtrick', False)
config['smbtrick'] = default
default = config.get('BRDisc', False)
config['BRDisc'] = default
edit_server = 0
server_list = config['servers']
for server in server_list:
default = server.get('Test_OK', False)
server_list[edit_server]['Test_OK'] = default
edit_server = edit_server + 1
if config['TV'] == 'True':
config['TV'] = True
if config['TV'] == 'False':
config['TV'] = False
if config['AV'] == 'True':
config['AV'] = True
if config['AV'] == 'False':
config['AV'] = False
config['servers'] = server_list
config['tv_dirs'] = get_dir_folders(tv_path)
config['av_dirs'] = get_dir_folders(av_path)
config['langs'] = get_dir_folders(lang_path)
return config
def check_version(config):
url = (
'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'
)
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
print(config['check_beta'])
if config['check_beta'] == True:
last_version = version['beta_version']
last_version_file = version['beta_version_file']
else:
last_version = version['curr_version']
last_version_file = version['curr_version_file']
xno_version = get_version()
resp = {}
resp['version'] = last_version
resp['file'] = last_version_file
print(xno_version)
print(last_version)
if xno_version < last_version:
resp['new_version'] = True
else:
resp['new_version'] = False
print(resp)
return resp
def update_version(config, vers_path, cwd):
url = (
'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'
)
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
if config['check_beta'] == True:
last_version = version['beta_version']
last_version_file = version['beta_version_file']
else:
last_version = version['curr_version']
last_version_file = version['curr_version_file']
url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +
last_version_file)
headers = {}
response2 = requests.get(url2, headers=headers)
filename = vers_path + last_version_file
with open(filename, 'wb') as f:
f.write(response2.content)
f.close()
shutil.unpack_archive(filename, cwd)
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
if config['TV'] == True and config['TV_model'] != '':
move_files(tv_path + config['TV_model'], lib_path)
if config['AV'] == True and config['AV_model'] != '':
move_files(av_path + config['AV_model'], lib_path)
resp = {}
resp['version'] = last_version
resp['file'] = last_version_file
resp['new_version'] = False
return resp
def cargar_lang(config_file):
with open(config_file.encode(sys.getfilesystemencoding()), 'r',
encoding='latin-1') as f:
config = json.load(f)
f.close
return config
def leer_file(web_file):
with open(web_file, 'r', encoding='utf8') as f:
num = f.read()
f.close
return num
def leer_img(web_file):
with open(web_file, 'rb') as f:
num = f.read()
f.close
return num
def test_path(config, server):
rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)
result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])
return result2
def get_mount_path(movie, server_data):
movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])
movie = movie.replace('\\\\', '\\')
movie = movie.replace('\\', '/')
word = '/'
inicio = movie.find(word)
inicio = inicio + 1
final = movie.find(word, inicio, len(movie))
servidor = movie[inicio:final]
ultimo = final + 1
result = final + 1
while result > 0:
ultimo = result + 1
result = movie.find(word, ultimo, len(movie))
fichero = movie[ultimo:len(movie)]
final = final + 1
ultimo = ultimo - 1
carpeta = movie[final:ultimo]
resultado = {}
resultado['Servidor'] = servidor
resultado['Carpeta'] = carpeta
resultado['Fichero'] = fichero
return resultado
def test_mount_path(config, servidor, carpeta):
sendnotifyremote(config['Oppo_IP'])
result = check_socket(config)
if result == 0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('EJT', config)
time.sleep(1)
response_data6b = getsetupmenu(config)
while response_data6f.find('devicelist":[]') > 0:
time.sleep(1)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('QPW', config)
device_list = json.loads(response_data6f)
if config['DebugLevel'] > 0:
print(device_list)
nfs = config['default_nfs']
for device in device_list['devicelist']:
if device['name'].upper() == servidor.upper():
if device['sub_type'] == 'nfs':
nfs = True
break
else:
nfs = False
break
if nfs:
response_login = LoginNFS(config, servidor)
else:
response_login = LoginSambaWithOutID(config, servidor)
if config['Always_ON'] == False:
time.sleep(5)
response_data6b = getsetupmenu(config)
if nfs:
response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',
config)
else:
response_mount = mountSharedFolder(servidor, carpeta, '', '',
config)
response = json.loads(response_mount)
if config['Autoscript'] == True:
result = umountSharedFolder(config)
if response['success'] == True:
a = 'OK'
else:
a = 'FAILURE'
return a
else:
print(
'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'
)
def test_emby(config):
try:
EmbySession = EmbyHttp(config)
user_info = EmbySession.user_info
if user_info['SessionInfo']['Id'] != '':
return 'OK'
else:
return 'FAILED'
except:
return 'FAILED'
def test_oppo(config):
result = check_socket(config)
if result == 0:
return 'OK'
else:
return 'FAILED'
def carga_libraries(config):
try:
EmbySession = EmbyHttp(config)
views_list = EmbySession.get_user_views(EmbySession.user_info[
'User']['Id'])
libraries = []
for view in views_list:
library = {}
library['Name'] = view['Name']
library['Id'] = view['Id']
library['Active'] = False
try:
lib_list = config['Libraries']
except:
lib_list = {}
for lib in lib_list:
if lib['Id'] == view['Id']:
library['Active'] = lib['Active']
libraries.append(library)
config['Libraries'] = libraries
return 0
except:
return 1
def is_library_active(config, libraryname):
for library in config['Libraries']:
if library['Name'] == libraryname:
return library['Active']
return False
def get_selectableFolders(config):
EmbySession = EmbyHttp(config)
MediaFolders = EmbySession.get_emby_selectablefolders()
servers = []
for Folder in MediaFolders:
index = 1
active = is_library_active(config, Folder['Name'])
if config['enable_all_libraries'] == True:
active = True
if active == True:
for SubFolder in Folder['SubFolders']:
server = {}
server['Id'] = SubFolder['Id']
if index > 1:
server['name'] = Folder['Name'] + '(' + str(index) + ')'
else:
server['name'] = Folder['Name']
server['Emby_Path'] = SubFolder['Path']
server['Oppo_Path'] = '/'
try:
serv_list = config['servers']
except:
serv_list = {}
for serv in serv_list:
if server['Emby_Path'] == serv['Emby_Path']:
server['name'] = serv['name']
server['Oppo_Path'] = serv['Oppo_Path']
server['Test_OK'] = serv['Test_OK']
servers.append(server)
index = index + 1
config['servers'] = servers
def get_dir_folders(directory):
os.chdir(directory)
dirs = os.listdir('.')
encontrado = False
list_dir = []
for x in dirs:
if os.path.isdir(x):
list_dir.append(x)
return list_dir
def move_files(src, dest):
os.chdir(src)
src_files = os.listdir('.')
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest)
return 0
def get_devices(config):
try:
EmbySession = EmbyHttp(config)
devices = EmbySession.get_emby_devices()
index = 0
dev_temp = []
for device in devices['Items']:
try:
if device['Id'] != 'Xnoppo':
device['Name'] = device['Name'] + ' / ' + device['AppName']
device['Id'] = device['ReportedDeviceId']
dev_temp.append(device)
except:
pass
config['devices'] = dev_temp
return 'OK'
except:
return 'FAILURE'
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
resource_path = (cwd + separador + 'web' + separador + 'resources' +
separador)
html_path = cwd + separador + 'web' + separador
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/emby_conf.html':
i = leer_file(html_path + 'emby_conf.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/oppo_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'oppo_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/lib_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'lib_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/path_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'path_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/tv_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'tv_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/av_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'av_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/other_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'other_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/status.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'status.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/help.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'help.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/remote.html':
i = leer_file(html_path + 'remote.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/android-chrome-36x36.png':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'android-chrome-36x36.png')
self.wfile.write(bytes(i))
return 0
if self.path == '/av-receiver-icon-2.jpg':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'av-receiver-icon-2.jpg')
self.wfile.write(bytes(i))
return 0
if self.path == '/dragon.png':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'dragon.png')
self.wfile.write(bytes(i))
return 0
if self.path == '/xnoppo_config':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/xnoppo_config_lib':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
carga_libraries(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/xnoppo_config_dev':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
get_devices(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/check_version':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = check_version(config)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/update_version':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = update_version(config, vers_path, cwd)
restart()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/get_state':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = get_state()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/restart':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
a = 'Restarting'
self.wfile.write(bytes(a, 'utf-8'))
restart()
if self.path == '/refresh_paths':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
get_selectableFolders(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/lang':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = cargar_lang(lang_path + config['language'] + separador +
'lang.js')
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path.find('/send_key?') >= 0:
get_data = self.path
print(get_data)
a = len('/send_key?sendkey=')
b = get_data[a:len(get_data)]
print(b)
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
sendnotifyremote(config['Oppo_IP'])
result = check_socket(config)
if b == 'PON':
if result == 0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('EJT', config)
if config['BRDisc'] == True:
time.sleep(1)
response_data_on = sendremotekey('EJT', config)
time.sleep(1)
response_data6b = getsetupmenu(config)
else:
response_data_on = sendremotekey(b, config)
self.send_response(200)
self.send_header('Content-type', 'text')
self.end_headers()
a = 'ok'
self.wfile.write(bytes(a, 'utf-8'))
return 0
if self.path == '/log.txt':
self.send_response(200)
self.send_header('Content-type', 'text')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')
self.wfile.write(bytes(a))
return 0
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(
'<html><head><title>https://pythonbasics.org</title></head>',
'utf-8'))
self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))
self.wfile.write(bytes('<body>', 'utf-8'))
self.wfile.write(bytes('<p>This is an example web server.</p>',
'utf-8'))
self.wfile.write(bytes('</body></html>', 'utf-8'))
def do_POST(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
resource_path = (cwd + separador + 'web' + separador + 'resources' +
separador)
html_path = cwd + separador + 'web' + separador
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/save_config':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
if self.path == '/check_emby':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = test_emby(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
status = get_state()
if status['Playstate'] == 'Not_Connected':
save_config(cwd + separador + 'config.json', config)
emby_wsocket.ws_config = config
restart()
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/check_oppo':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = test_oppo(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/test_path':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
server = json.loads(post_data.decode('utf-8'))
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = test_path(config, server)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(server))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(server), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/navigate_path':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
path_obj = json.loads(post_data.decode('utf-8'))
path = path_obj['path']
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = navigate_folder(path, config)
a_json = json.dumps(a)
print(len(a_json))
self.send_response(200)
self.send_header('Content-Length', len(a_json))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/move_tv':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
move_files(tv_path + config['TV_model'], lib_path)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
restart()
return 0
if self.path == '/move_av':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
move_files(av_path + config['AV_model'], lib_path)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
restart()
return 0
if self.path == '/get_tv_key':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_tv_key(config)
if a == 'OK':
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_conn':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_test_conn(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/get_tv_sources':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_tv_sources(config)
if a == 'OK':
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/get_av_sources':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_hdmi_list(config)
if a != None:
config['AV_SOURCES'] = a
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_init':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_end':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_set_prev(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_on':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_check_power(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_off':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_power_off(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_hdmi':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import http.server
import socketserver
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import json
import io
import urllib
import requests
from lib.Emby_ws import xnoppo_ws
from lib.Emby_http import *
from lib.Xnoppo import *
from lib.Xnoppo_TV import *
import lib.Xnoppo_AVR
import shutil
import asyncio
import threading
import logging
import logging.handlers
import psutil
def get_version():
return '2.01'
def thread_function(ws_object):
print('Thread: starting')
ws_object.start()
print('Thread: finishing')
def restart():
print('restart')
try:
emby_wsocket.stop()
except:
sys.exit()
sys.exit()
print('fin restart')
def save_config(config_file, config):
with open(config_file, 'w') as fw:
json.dump(config, fw, indent=4)
fw.close
try:
emby_wsocket.ws_config = config
emby_wsocket.EmbySession.config = config
except:
emby_wsocket.ws_config = config
def get_state():
status = {}
status['Version'] = get_version()
try:
status['Playstate'] = emby_wsocket.EmbySession.playstate
status['playedtitle'] = emby_wsocket.EmbySession.playedtitle
status['server'] = emby_wsocket.EmbySession.server
status['folder'] = emby_wsocket.EmbySession.folder
status['filename'] = emby_wsocket.EmbySession.filename
status['CurrentData'] = emby_wsocket.EmbySession.currentdata
except:
status['Playstate'] = 'Not_Connected'
status['playedtitle'] = ''
status['server'] = ''
status['folder'] = ''
status['filename'] = ''
status['CurrentData'] = ''
status['cpu_perc'] = psutil.cpu_percent()
status['mem_perc'] = psutil.virtual_memory().percent
print(psutil.virtual_memory().percent)
print(status)
return status
def cargar_config(config_file, tv_path, av_path, lang_path):
with open(config_file, 'r') as f:
config = json.load(f)
f.close
config['Version'] = get_version()
default = config.get('Autoscript', False)
config['Autoscript'] = default
default = config.get('enable_all_libraries', False)
config['enable_all_libraries'] = default
default = config.get('TV_model', '')
config['TV_model'] = default
default = config.get('TV_SOURCES', [])
config['TV_SOURCES'] = default
default = config.get('AV_model', '')
config['AV_model'] = default
default = config.get('AV_SOURCES', [])
config['AV_SOURCES'] = default
default = config.get('TV_script_init', '')
config['TV_script_init'] = default
default = config.get('TV_script_end', '')
config['TV_script_end'] = default
default = config.get('av_delay_hdmi', 0)
config['av_delay_hdmi'] = default
default = config.get('AV_Port', 23)
config['AV_Port'] = default
default = config.get('timeout_oppo_mount', 60)
config['timeout_oppo_mount'] = default
default = config.get('language', 'es-ES')
config['language'] = default
default = config.get('default_nfs', False)
config['default_nfs'] = default
default = config.get('wait_nfs', False)
config['wait_nfs'] = default
default = config.get('refresh_time', 5)
config['refresh_time'] = default
default = config.get('check_beta', False)
config['check_beta'] = default
default = config.get('smbtrick', False)
config['smbtrick'] = default
default = config.get('BRDisc', False)
config['BRDisc'] = default
edit_server = 0
server_list = config['servers']
for server in server_list:
default = server.get('Test_OK', False)
server_list[edit_server]['Test_OK'] = default
edit_server = edit_server + 1
if config['TV'] == 'True':
config['TV'] = True
if config['TV'] == 'False':
config['TV'] = False
if config['AV'] == 'True':
config['AV'] = True
if config['AV'] == 'False':
config['AV'] = False
config['servers'] = server_list
config['tv_dirs'] = get_dir_folders(tv_path)
config['av_dirs'] = get_dir_folders(av_path)
config['langs'] = get_dir_folders(lang_path)
return config
def check_version(config):
url = (
'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'
)
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
print(config['check_beta'])
if config['check_beta'] == True:
last_version = version['beta_version']
last_version_file = version['beta_version_file']
else:
last_version = version['curr_version']
last_version_file = version['curr_version_file']
xno_version = get_version()
resp = {}
resp['version'] = last_version
resp['file'] = last_version_file
print(xno_version)
print(last_version)
if xno_version < last_version:
resp['new_version'] = True
else:
resp['new_version'] = False
print(resp)
return resp
def update_version(config, vers_path, cwd):
url = (
'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'
)
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
if config['check_beta'] == True:
last_version = version['beta_version']
last_version_file = version['beta_version_file']
else:
last_version = version['curr_version']
last_version_file = version['curr_version_file']
url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +
last_version_file)
headers = {}
response2 = requests.get(url2, headers=headers)
filename = vers_path + last_version_file
with open(filename, 'wb') as f:
f.write(response2.content)
f.close()
shutil.unpack_archive(filename, cwd)
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
if config['TV'] == True and config['TV_model'] != '':
move_files(tv_path + config['TV_model'], lib_path)
if config['AV'] == True and config['AV_model'] != '':
move_files(av_path + config['AV_model'], lib_path)
resp = {}
resp['version'] = last_version
resp['file'] = last_version_file
resp['new_version'] = False
return resp
def cargar_lang(config_file):
with open(config_file.encode(sys.getfilesystemencoding()), 'r',
encoding='latin-1') as f:
config = json.load(f)
f.close
return config
def leer_file(web_file):
with open(web_file, 'r', encoding='utf8') as f:
num = f.read()
f.close
return num
def leer_img(web_file):
with open(web_file, 'rb') as f:
num = f.read()
f.close
return num
def test_path(config, server):
rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)
result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])
return result2
def get_mount_path(movie, server_data):
movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])
movie = movie.replace('\\\\', '\\')
movie = movie.replace('\\', '/')
word = '/'
inicio = movie.find(word)
inicio = inicio + 1
final = movie.find(word, inicio, len(movie))
servidor = movie[inicio:final]
ultimo = final + 1
result = final + 1
while result > 0:
ultimo = result + 1
result = movie.find(word, ultimo, len(movie))
fichero = movie[ultimo:len(movie)]
final = final + 1
ultimo = ultimo - 1
carpeta = movie[final:ultimo]
resultado = {}
resultado['Servidor'] = servidor
resultado['Carpeta'] = carpeta
resultado['Fichero'] = fichero
return resultado
def test_mount_path(config, servidor, carpeta):
sendnotifyremote(config['Oppo_IP'])
result = check_socket(config)
if result == 0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('EJT', config)
time.sleep(1)
response_data6b = getsetupmenu(config)
while response_data6f.find('devicelist":[]') > 0:
time.sleep(1)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('QPW', config)
device_list = json.loads(response_data6f)
if config['DebugLevel'] > 0:
print(device_list)
nfs = config['default_nfs']
for device in device_list['devicelist']:
if device['name'].upper() == servidor.upper():
if device['sub_type'] == 'nfs':
nfs = True
break
else:
nfs = False
break
if nfs:
response_login = LoginNFS(config, servidor)
else:
response_login = LoginSambaWithOutID(config, servidor)
if config['Always_ON'] == False:
time.sleep(5)
response_data6b = getsetupmenu(config)
if nfs:
response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',
config)
else:
response_mount = mountSharedFolder(servidor, carpeta, '', '',
config)
response = json.loads(response_mount)
if config['Autoscript'] == True:
result = umountSharedFolder(config)
if response['success'] == True:
a = 'OK'
else:
a = 'FAILURE'
return a
else:
print(
'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'
)
def test_emby(config):
try:
EmbySession = EmbyHttp(config)
user_info = EmbySession.user_info
if user_info['SessionInfo']['Id'] != '':
return 'OK'
else:
return 'FAILED'
except:
return 'FAILED'
def test_oppo(config):
result = check_socket(config)
if result == 0:
return 'OK'
else:
return 'FAILED'
def carga_libraries(config):
try:
EmbySession = EmbyHttp(config)
views_list = EmbySession.get_user_views(EmbySession.user_info[
'User']['Id'])
libraries = []
for view in views_list:
library = {}
library['Name'] = view['Name']
library['Id'] = view['Id']
library['Active'] = False
try:
lib_list = config['Libraries']
except:
lib_list = {}
for lib in lib_list:
if lib['Id'] == view['Id']:
library['Active'] = lib['Active']
libraries.append(library)
config['Libraries'] = libraries
return 0
except:
return 1
def is_library_active(config, libraryname):
for library in config['Libraries']:
if library['Name'] == libraryname:
return library['Active']
return False
def get_selectableFolders(config):
EmbySession = EmbyHttp(config)
MediaFolders = EmbySession.get_emby_selectablefolders()
servers = []
for Folder in MediaFolders:
index = 1
active = is_library_active(config, Folder['Name'])
if config['enable_all_libraries'] == True:
active = True
if active == True:
for SubFolder in Folder['SubFolders']:
server = {}
server['Id'] = SubFolder['Id']
if index > 1:
server['name'] = Folder['Name'] + '(' + str(index) + ')'
else:
server['name'] = Folder['Name']
server['Emby_Path'] = SubFolder['Path']
server['Oppo_Path'] = '/'
try:
serv_list = config['servers']
except:
serv_list = {}
for serv in serv_list:
if server['Emby_Path'] == serv['Emby_Path']:
server['name'] = serv['name']
server['Oppo_Path'] = serv['Oppo_Path']
server['Test_OK'] = serv['Test_OK']
servers.append(server)
index = index + 1
config['servers'] = servers
def get_dir_folders(directory):
os.chdir(directory)
dirs = os.listdir('.')
encontrado = False
list_dir = []
for x in dirs:
if os.path.isdir(x):
list_dir.append(x)
return list_dir
def move_files(src, dest):
os.chdir(src)
src_files = os.listdir('.')
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest)
return 0
def get_devices(config):
try:
EmbySession = EmbyHttp(config)
devices = EmbySession.get_emby_devices()
index = 0
dev_temp = []
for device in devices['Items']:
try:
if device['Id'] != 'Xnoppo':
device['Name'] = device['Name'] + ' / ' + device['AppName']
device['Id'] = device['ReportedDeviceId']
dev_temp.append(device)
except:
pass
config['devices'] = dev_temp
return 'OK'
except:
return 'FAILURE'
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
resource_path = (cwd + separador + 'web' + separador + 'resources' +
separador)
html_path = cwd + separador + 'web' + separador
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/emby_conf.html':
i = leer_file(html_path + 'emby_conf.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/oppo_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'oppo_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/lib_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'lib_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/path_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'path_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/tv_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'tv_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/av_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'av_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/other_conf.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'other_conf.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/status.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'status.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/help.html':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_file(html_path + 'help.html')
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/remote.html':
i = leer_file(html_path + 'remote.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(i, 'utf-8'))
return 0
if self.path == '/android-chrome-36x36.png':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'android-chrome-36x36.png')
self.wfile.write(bytes(i))
return 0
if self.path == '/av-receiver-icon-2.jpg':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'av-receiver-icon-2.jpg')
self.wfile.write(bytes(i))
return 0
if self.path == '/dragon.png':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
i = leer_img(resource_path + 'dragon.png')
self.wfile.write(bytes(i))
return 0
if self.path == '/xnoppo_config':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/xnoppo_config_lib':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
carga_libraries(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/xnoppo_config_dev':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
get_devices(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/check_version':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = check_version(config)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/update_version':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = update_version(config, vers_path, cwd)
restart()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/get_state':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = get_state()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/restart':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
a = 'Restarting'
self.wfile.write(bytes(a, 'utf-8'))
restart()
if self.path == '/refresh_paths':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
a = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
get_selectableFolders(a)
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/lang':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = cargar_lang(lang_path + config['language'] + separador +
'lang.js')
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path.find('/send_key?') >= 0:
get_data = self.path
print(get_data)
a = len('/send_key?sendkey=')
b = get_data[a:len(get_data)]
print(b)
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
sendnotifyremote(config['Oppo_IP'])
result = check_socket(config)
if b == 'PON':
if result == 0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey('EJT', config)
if config['BRDisc'] == True:
time.sleep(1)
response_data_on = sendremotekey('EJT', config)
time.sleep(1)
response_data6b = getsetupmenu(config)
else:
response_data_on = sendremotekey(b, config)
self.send_response(200)
self.send_header('Content-type', 'text')
self.end_headers()
a = 'ok'
self.wfile.write(bytes(a, 'utf-8'))
return 0
if self.path == '/log.txt':
self.send_response(200)
self.send_header('Content-type', 'text')
self.end_headers()
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')
self.wfile.write(bytes(a))
return 0
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(bytes(
'<html><head><title>https://pythonbasics.org</title></head>',
'utf-8'))
self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))
self.wfile.write(bytes('<body>', 'utf-8'))
self.wfile.write(bytes('<p>This is an example web server.</p>',
'utf-8'))
self.wfile.write(bytes('</body></html>', 'utf-8'))
def do_POST(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
resource_path = (cwd + separador + 'web' + separador + 'resources' +
separador)
html_path = cwd + separador + 'web' + separador
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/save_config':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
if self.path == '/check_emby':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = test_emby(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
status = get_state()
if status['Playstate'] == 'Not_Connected':
save_config(cwd + separador + 'config.json', config)
emby_wsocket.ws_config = config
restart()
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/check_oppo':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = test_oppo(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/test_path':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
server = json.loads(post_data.decode('utf-8'))
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = test_path(config, server)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(server))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(server), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/navigate_path':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
path_obj = json.loads(post_data.decode('utf-8'))
path = path_obj['path']
config = cargar_config(cwd + separador + 'config.json', tv_path,
av_path, lang_path)
a = navigate_folder(path, config)
a_json = json.dumps(a)
print(len(a_json))
self.send_response(200)
self.send_header('Content-Length', len(a_json))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(a), 'utf-8'))
return 0
if self.path == '/move_tv':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
move_files(tv_path + config['TV_model'], lib_path)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
restart()
return 0
if self.path == '/move_av':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json', config)
move_files(av_path + config['AV_model'], lib_path)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
restart()
return 0
if self.path == '/get_tv_key':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_tv_key(config)
if a == 'OK':
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_conn':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_test_conn(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/get_tv_sources':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_tv_sources(config)
if a == 'OK':
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/get_av_sources':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = get_hdmi_list(config)
if a != None:
config['AV_SOURCES'] = a
save_config(cwd + separador + 'config.json', config)
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_init':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/tv_test_end':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = tv_set_prev(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_on':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_check_power(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_off':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_power_off(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if self.path == '/av_test_hdmi':
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
config = json.loads(post_data.decode('utf-8'))
a = av_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header('Content-Length', len(config))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config), 'utf-8'))
else:
self.send_response(300)
self.send_header('Content-Length', len('ERROR'))
self.send_header('Content-Type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes('ERROR', 'utf-8'))
return 0
if __name__ == '__main__':
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador = '\\'
else:
separador = '/'
config_file = cwd + separador + 'config.json'
resource_path = (cwd + separador + 'web' + separador + 'resources' +
separador)
html_path = cwd + separador + 'web' + separador
tv_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'TV' + separador)
av_path = (cwd + separador + 'web' + separador + 'libraries' +
separador + 'AV' + separador)
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
config = cargar_config(config_file, tv_path, av_path, lang_path)
logfile = cwd + separador + 'emby_xnoppo_client_logging.log'
lang = cargar_lang(lang_path + config['language'] + separador + 'lang.js')
if config['DebugLevel'] == 0:
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.CRITICAL)
elif config['DebugLevel'] == 1:
rfh = logging.handlers.RotatingFileHandler(filename=logfile, mode=
'a', maxBytes=50 * 1024 * 1024, backupCount=2, encoding=None,
delay=0)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.INFO, handlers=[rfh])
elif config['DebugLevel'] == 2:
rfh = logging.handlers.RotatingFileHandler(filename=logfile, mode=
'a', maxBytes=5 * 1024 * 1024, backupCount=2, encoding=None,
delay=0)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.DEBUG, handlers=[rfh]
)
emby_wsocket = xnoppo_ws()
emby_wsocket.ws_config = config
emby_wsocket.config_file = config_file
emby_wsocket.ws_lang = lang
x = threading.Thread(target=thread_function, args=(emby_wsocket,))
x.start()
espera = 0
estado_anterior = ''
logging.debug('Arrancamos el Servidor Web\n')
serverPort = 8090
webServer = HTTPServer(('', serverPort), MyServer)
print('Server started http://%s:%s' % ('', serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
logging.info('Fin proceso')
logging.info('Finished')
print('Server stopped.')
<|reserved_special_token_1|>
import http.server
import socketserver
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import json
import io
import urllib
import requests
from lib.Emby_ws import xnoppo_ws
from lib.Emby_http import *
from lib.Xnoppo import *
from lib.Xnoppo_TV import *
import lib.Xnoppo_AVR
import shutil
import asyncio
import threading
import logging
import logging.handlers
import psutil
def get_version():
return("2.01")
def thread_function(ws_object):
print("Thread: starting")
ws_object.start()
print("Thread: finishing")
def restart():
print('restart')
try:
emby_wsocket.stop()
except:
sys.exit()
sys.exit()
print('fin restart')
def save_config(config_file, config):
with open(config_file, 'w') as fw:
json.dump(config, fw, indent=4)
fw.close
try:
emby_wsocket.ws_config=config
emby_wsocket.EmbySession.config=config
except:
emby_wsocket.ws_config=config
def get_state():
status={}
status["Version"]=get_version()
try:
status["Playstate"]=emby_wsocket.EmbySession.playstate
status["playedtitle"]=emby_wsocket.EmbySession.playedtitle
status["server"]=emby_wsocket.EmbySession.server
status["folder"]=emby_wsocket.EmbySession.folder
status["filename"]=emby_wsocket.EmbySession.filename
status["CurrentData"]=emby_wsocket.EmbySession.currentdata
# gives a single float value
except:
status["Playstate"]="Not_Connected"
status["playedtitle"]=""
status["server"]=""
status["folder"]=""
status["filename"]=""
status["CurrentData"]=""
status["cpu_perc"]=psutil.cpu_percent()
status["mem_perc"]=psutil.virtual_memory().percent
# you can have the percentage of used RAM
print(psutil.virtual_memory().percent)
print(status)
return(status)
def cargar_config(config_file,tv_path,av_path,lang_path):
with open(config_file, 'r') as f:
config = json.load(f)
#ver_configuracion(config)
f.close
## new options default config values
config["Version"]=get_version()
default = config.get("Autoscript", False)
config["Autoscript"]=default
default = config.get("enable_all_libraries", False)
config["enable_all_libraries"]=default
default = config.get("TV_model", "")
config["TV_model"]=default
default = config.get("TV_SOURCES", [])
config["TV_SOURCES"] = default
default = config.get("AV_model", "")
config["AV_model"]=default
default = config.get("AV_SOURCES", [])
config["AV_SOURCES"] = default
default = config.get("TV_script_init", "")
config["TV_script_init"]=default
default = config.get("TV_script_end", "")
config["TV_script_end"]=default
default = config.get("av_delay_hdmi", 0)
config["av_delay_hdmi"]=default
default = config.get("AV_Port", 23)
config["AV_Port"]=default
default = config.get("timeout_oppo_mount", 60)
config["timeout_oppo_mount"]=default
default = config.get("language","es-ES")
config["language"]=default
default = config.get("default_nfs",False)
config["default_nfs"]=default
default = config.get("wait_nfs",False)
config["wait_nfs"]=default
default = config.get("refresh_time",5)
config["refresh_time"]=default
default = config.get("check_beta",False)
config["check_beta"]=default
default = config.get("smbtrick",False)
config["smbtrick"]=default
default = config.get("BRDisc",False)
config["BRDisc"]=default
## testeado de rutas
edit_server=0
server_list = config["servers"]
for server in server_list:
default = server.get("Test_OK", False)
server_list[edit_server]["Test_OK"]=default
edit_server=edit_server+1
## Cambio de booleans de texto antiguos a boleans actuales.
if config["TV"]=='True':
config["TV"]=True;
if config["TV"]=='False':
config["TV"]=False;
if config["AV"]=='True':
config["AV"]=True;
if config["AV"]=='False':
config["AV"]=False;
config["servers"]=server_list
config["tv_dirs"]=get_dir_folders(tv_path);
config["av_dirs"]=get_dir_folders(av_path);
config["langs"]=get_dir_folders(lang_path);
return(config)
def check_version(config):
url = "https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js"
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
print(config["check_beta"])
if config["check_beta"]==True:
last_version=version["beta_version"]
last_version_file=version["beta_version_file"]
else:
last_version=version["curr_version"]
last_version_file=version["curr_version_file"]
xno_version=get_version()
resp = {}
resp["version"]=last_version
resp["file"]=last_version_file
print(xno_version)
print(last_version)
if xno_version<last_version:
resp["new_version"]=True
else:
resp["new_version"]=False
print(resp)
return(resp)
def update_version(config,vers_path,cwd):
url = "https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js"
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
if config["check_beta"]==True:
last_version=version["beta_version"]
last_version_file=version["beta_version_file"]
else:
last_version=version["curr_version"]
last_version_file=version["curr_version_file"]
url2 = "https://github.com/siberian-git/Xnoppo/raw/main/versions/" + last_version_file
headers = {}
response2 = requests.get(url2, headers=headers)
filename=vers_path + last_version_file
with open(filename, 'wb') as f:
f.write(response2.content)
f.close()
shutil.unpack_archive(filename, cwd)
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
if config["TV"]==True and config["TV_model"]!="":
move_files(tv_path + config["TV_model"],lib_path)
if config["AV"]==True and config["AV_model"]!="":
move_files(av_path + config["AV_model"],lib_path)
resp = {}
resp["version"]=last_version
resp["file"]=last_version_file
resp["new_version"]=False
return(resp)
def cargar_lang(config_file):
with open(config_file.encode(sys.getfilesystemencoding()), 'r',encoding='latin-1') as f:
config = json.load(f)
#ver_configuracion(config)
f.close
## new options default config values
return(config)
def leer_file(web_file):
with open(web_file, 'r',encoding='utf8') as f:
num=f.read()
f.close
return(num)
def leer_img(web_file):
with open(web_file, 'rb') as f:
num=f.read()
f.close
return(num)
def test_path(config,server):
rutas = get_mount_path(server["Emby_Path"] + "/test.mkv",server)
result2 = test_mount_path(config,rutas["Servidor"],rutas["Carpeta"])
return(result2)
def get_mount_path(movie,server_data):
movie = movie.replace(server_data["Emby_Path"],server_data["Oppo_Path"])
movie = movie.replace('\\\\','\\')
movie = movie.replace('\\','/')
word = '/'
inicio = movie.find(word)
inicio = inicio +1
final = movie.find(word,inicio,len(movie))
servidor = movie[inicio:final]
ultimo=final+1
result=final+1
while result > 0:
ultimo=result+1
result=movie.find(word,ultimo,len(movie))
fichero=movie[ultimo:len(movie)]
final=final+1
ultimo=ultimo-1
carpeta=movie[final:ultimo]
resultado={}
resultado["Servidor"]=servidor
resultado["Carpeta"]=carpeta
resultado["Fichero"]=fichero
return(resultado)
def test_mount_path(config,servidor,carpeta):
sendnotifyremote(config["Oppo_IP"])
#print("Conectando con el OPPO")
result=check_socket(config)
if result==0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey("EJT",config)
time.sleep(1)
#print("Solicitando montar ruta al OPPO")
response_data6b = getsetupmenu(config)
while response_data6f.find('devicelist":[]') > 0:
time.sleep(1)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey("QPW",config)
device_list=json.loads(response_data6f)
if config["DebugLevel"]>0: print(device_list)
nfs=config["default_nfs"]
for device in device_list["devicelist"]:
if device["name"].upper()==servidor.upper():
if device["sub_type"]=="nfs":
nfs=True
break
else:
nfs=False
break
if nfs:
response_login = LoginNFS(config,servidor)
else:
response_login = LoginSambaWithOutID(config,servidor)
if config["Always_ON"]==False:
time.sleep(5)
response_data6b = getsetupmenu(config)
if nfs:
response_mount = mountSharedNFSFolder(servidor,carpeta,'','',config)
else:
response_mount = mountSharedFolder(servidor,carpeta,'','',config)
response=json.loads(response_mount)
#print(response)
if config["Autoscript"]==True:
result=umountSharedFolder(config)
if response["success"]==True:
a = "OK"
else:
a = "FAILURE"
return(a)
else:
print("No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo")
def test_emby(config):
try:
EmbySession=EmbyHttp(config)
user_info = EmbySession.user_info
if user_info["SessionInfo"]["Id"]!="":
return("OK")
else:
return("FAILED")
except:
return("FAILED")
def test_oppo(config):
result=check_socket(config)
if result==0:
return("OK")
else:
return("FAILED")
def carga_libraries(config):
try:
EmbySession=EmbyHttp(config)
views_list=EmbySession.get_user_views(EmbySession.user_info["User"]["Id"])
libraries = []
for view in views_list:
library= {}
library["Name"]=view["Name"]
library["Id"]=view["Id"]
library["Active"]=False
try:
lib_list=config["Libraries"]
except:
lib_list={}
for lib in lib_list:
if lib["Id"]==view["Id"]:
library["Active"]=lib["Active"]
libraries.append(library)
config["Libraries"]=libraries
return(0)
except:
return(1)
def is_library_active(config,libraryname):
for library in config["Libraries"]:
if library["Name"]==libraryname:
return(library["Active"])
return(False)
def get_selectableFolders(config):
EmbySession=EmbyHttp(config)
MediaFolders = EmbySession.get_emby_selectablefolders()
servers=[]
for Folder in MediaFolders:
index=1
active=is_library_active(config,Folder["Name"])
if config["enable_all_libraries"]==True:
active=True;
if active==True:
for SubFolder in Folder["SubFolders"]:
server={}
server["Id"]=SubFolder["Id"]
if index>1:
server["name"]=Folder["Name"]+"("+str(index)+")"
else:
server["name"]=Folder["Name"]
server["Emby_Path"]=SubFolder["Path"]
server["Oppo_Path"]="/"
try:
serv_list=config["servers"]
except:
serv_list={}
for serv in serv_list:
if server["Emby_Path"]==serv["Emby_Path"]:
server["name"]=serv["name"];
server["Oppo_Path"]=serv["Oppo_Path"];
server["Test_OK"]=serv["Test_OK"];
servers.append(server)
index=index+1
config["servers"]=servers
def get_dir_folders(directory):
os.chdir(directory)
dirs = os.listdir(".")
encontrado=False
list_dir=[]
#a =""
#list_dir.append(a)
for x in dirs:
if os.path.isdir(x):
list_dir.append(x)
return(list_dir)
def move_files(src, dest):
os.chdir(src)
src_files = os.listdir('.')
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest)
return(0)
def get_devices(config):
try:
EmbySession=EmbyHttp(config)
devices = EmbySession.get_emby_devices()
index=0
dev_temp = []
for device in devices["Items"]:
try:
if device["Id"]!='Xnoppo':
device["Name"]=device["Name"] + " / " + device["AppName"]
device["Id"]=device["ReportedDeviceId"]
dev_temp.append(device)
except:
pass
config["devices"]=dev_temp
return('OK')
except:
return('FAILURE')
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
resource_path=cwd + separador + 'web' + separador + 'resources' + separador
html_path = cwd + separador + 'web' + separador
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/emby_conf.html':
i = leer_file(html_path + 'emby_conf.html')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/oppo_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'oppo_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/lib_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'lib_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/path_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'path_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/tv_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'tv_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/av_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'av_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/other_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'other_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/status.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'status.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/help.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'help.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/remote.html':
i = leer_file(html_path + 'remote.html')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/android-chrome-36x36.png':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_img(resource_path + 'android-chrome-36x36.png')
self.wfile.write(bytes(i))
return(0)
if self.path == '/av-receiver-icon-2.jpg':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_img(resource_path + 'av-receiver-icon-2.jpg')
self.wfile.write(bytes(i))
return(0)
if self.path == '/dragon.png':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_img(resource_path + 'dragon.png')
self.wfile.write(bytes(i))
return(0)
if self.path == '/xnoppo_config':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/xnoppo_config_lib':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
carga_libraries(a)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/xnoppo_config_dev':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
get_devices(a)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/check_version':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = check_version(config)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/update_version':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = update_version(config,vers_path,cwd)
restart()
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/get_state':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = get_state()
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/restart':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
a = "Restarting"
self.wfile.write(bytes(a,"utf-8"))
restart()
if self.path == '/refresh_paths':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
get_selectableFolders(a)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/lang':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = cargar_lang(lang_path + config["language"] + separador +'lang.js')
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path.find("/send_key?")>=0:
get_data = self.path
print(get_data)
a = len('/send_key?sendkey=')
b=get_data[a:len(get_data)]
print(b)
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
sendnotifyremote(config["Oppo_IP"])
result=check_socket(config)
if b=='PON':
if result==0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey("EJT",config)
if config["BRDisc"]==True:
time.sleep(1)
response_data_on = sendremotekey("EJT",config)
time.sleep(1)
response_data6b = getsetupmenu(config)
else:
response_data_on = sendremotekey(b,config)
self.send_response(200)
self.send_header("Content-type", "text")
self.end_headers()
a = "ok"
self.wfile.write(bytes(a,"utf-8"))
return(0)
if self.path == '/log.txt':
self.send_response(200)
self.send_header("Content-type", "text")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')
self.wfile.write(bytes(a))
return(0)
else:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><head><title>https://pythonbasics.org</title></head>", "utf-8"))
self.wfile.write(bytes("<p>Request: %s</p>" % self.path, "utf-8"))
self.wfile.write(bytes("<body>", "utf-8"))
self.wfile.write(bytes("<p>This is an example web server.</p>", "utf-8"))
self.wfile.write(bytes("</body></html>", "utf-8"))
def do_POST(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
resource_path=cwd + separador + 'web' + separador + 'resources' + separador
html_path = cwd + separador + 'web' + separador
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/save_config':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
if self.path == '/check_emby':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = test_emby(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
status = get_state()
if status["Playstate"]=="Not_Connected":
save_config(cwd + separador + 'config.json',config)
emby_wsocket.ws_config=config
restart()
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/check_oppo':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = test_oppo(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/test_path':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
server = json.loads(post_data.decode('utf-8'))
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = test_path(config,server)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(server))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(server),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/navigate_path':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
path_obj = json.loads(post_data.decode('utf-8'))
path = path_obj["path"]
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = navigate_folder(path,config)
a_json=json.dumps(a)
print(len(a_json))
self.send_response(200)
self.send_header("Content-Length", len(a_json))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/move_tv':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json',config)
move_files(tv_path + config["TV_model"],lib_path)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
restart()
return(0)
if self.path == '/move_av':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json',config)
move_files(av_path + config["AV_model"],lib_path)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
restart()
return(0)
if self.path == '/get_tv_key':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = get_tv_key(config)
if a == 'OK':
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/tv_test_conn':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = tv_test_conn(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/get_tv_sources':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = get_tv_sources(config)
if a == 'OK':
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/get_av_sources':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = get_hdmi_list(config)
if a != None:
config["AV_SOURCES"]=a
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/tv_test_init':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = tv_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/tv_test_end':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = tv_set_prev(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/av_test_on':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = av_check_power(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/av_test_off':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = av_power_off(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/av_test_hdmi':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = av_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if __name__ == "__main__":
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
config_file = cwd + separador + "config.json"
resource_path=cwd + separador + 'web' + separador + 'resources' + separador
html_path = cwd + separador + 'web' + separador
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
config = cargar_config(config_file,tv_path,av_path,lang_path)
logfile=cwd + separador + "emby_xnoppo_client_logging.log"
lang = cargar_lang(lang_path + config["language"] + separador +'lang.js')
if config["DebugLevel"]==0:
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.CRITICAL)
elif config["DebugLevel"]==1:
rfh = logging.handlers.RotatingFileHandler(
filename=logfile,
mode='a',
maxBytes=50*1024*1024,
backupCount=2,
encoding=None,
delay=0
)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.INFO,handlers=[rfh])
elif config["DebugLevel"]==2:
rfh = logging.handlers.RotatingFileHandler(
filename=logfile,
mode='a',
maxBytes=5*1024*1024,
backupCount=2,
encoding=None,
delay=0
)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.DEBUG,handlers=[rfh])
emby_wsocket = xnoppo_ws()
emby_wsocket.ws_config=config
emby_wsocket.config_file=config_file
emby_wsocket.ws_lang=lang
x = threading.Thread(target=thread_function, args=(emby_wsocket,))
x.start()
espera=0
estado_anterior=''
logging.debug('Arrancamos el Servidor Web\n')
serverPort = 8090
webServer = HTTPServer(("", serverPort), MyServer)
print("Server started http://%s:%s" % ("", serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
logging.info('Fin proceso')
logging.info('Finished')
print("Server stopped.")
|
flexible
|
{
"blob_id": "2ff85ac059f160fcc6b39b4298e8216cbad77ab3",
"index": 504,
"step-1": "<mask token>\n\n\ndef get_version():\n return '2.01'\n\n\n<mask token>\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<mask token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\n<mask token>\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\n<mask token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_version():\n return '2.01'\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\n<mask token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_version():\n return '2.01'\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return 0\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<mask token>\n",
"step-4": "import http.server\nimport socketserver\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time\nimport json\nimport io\nimport urllib\nimport requests\nfrom lib.Emby_ws import xnoppo_ws\nfrom lib.Emby_http import *\nfrom lib.Xnoppo import *\nfrom lib.Xnoppo_TV import *\nimport lib.Xnoppo_AVR\nimport shutil\nimport asyncio\nimport threading\nimport logging\nimport logging.handlers\nimport psutil\n\n\ndef get_version():\n return '2.01'\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return 0\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\nif __name__ == '__main__':\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n config_file = cwd + separador + 'config.json'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n config = cargar_config(config_file, tv_path, av_path, lang_path)\n logfile = cwd + separador + 'emby_xnoppo_client_logging.log'\n lang = cargar_lang(lang_path + config['language'] + separador + 'lang.js')\n if config['DebugLevel'] == 0:\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.CRITICAL)\n elif config['DebugLevel'] == 1:\n rfh = logging.handlers.RotatingFileHandler(filename=logfile, mode=\n 'a', maxBytes=50 * 1024 * 1024, backupCount=2, encoding=None,\n delay=0)\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.INFO, handlers=[rfh])\n elif config['DebugLevel'] == 2:\n rfh = logging.handlers.RotatingFileHandler(filename=logfile, mode=\n 'a', maxBytes=5 * 1024 * 1024, backupCount=2, encoding=None,\n delay=0)\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.DEBUG, handlers=[rfh]\n )\n emby_wsocket = xnoppo_ws()\n emby_wsocket.ws_config = config\n emby_wsocket.config_file = config_file\n emby_wsocket.ws_lang = lang\n x = threading.Thread(target=thread_function, args=(emby_wsocket,))\n x.start()\n espera = 0\n estado_anterior = ''\n logging.debug('Arrancamos el Servidor Web\\n')\n serverPort = 8090\n webServer = HTTPServer(('', serverPort), MyServer)\n print('Server started http://%s:%s' % ('', serverPort))\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n webServer.server_close()\n logging.info('Fin proceso')\n logging.info('Finished')\n print('Server stopped.')\n",
"step-5": "import http.server\nimport socketserver\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time\nimport json\nimport io\nimport urllib\nimport requests\nfrom lib.Emby_ws import xnoppo_ws\nfrom lib.Emby_http import *\nfrom lib.Xnoppo import *\nfrom lib.Xnoppo_TV import *\nimport lib.Xnoppo_AVR\nimport shutil\nimport asyncio\nimport threading\nimport logging\nimport logging.handlers\nimport psutil\n\ndef get_version():\n return(\"2.01\")\n\ndef thread_function(ws_object):\n print(\"Thread: starting\")\n ws_object.start()\n print(\"Thread: finishing\")\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n \ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config=config\n emby_wsocket.EmbySession.config=config\n except:\n emby_wsocket.ws_config=config\ndef get_state():\n status={}\n status[\"Version\"]=get_version()\n try:\n status[\"Playstate\"]=emby_wsocket.EmbySession.playstate\n status[\"playedtitle\"]=emby_wsocket.EmbySession.playedtitle\n status[\"server\"]=emby_wsocket.EmbySession.server\n status[\"folder\"]=emby_wsocket.EmbySession.folder\n status[\"filename\"]=emby_wsocket.EmbySession.filename\n status[\"CurrentData\"]=emby_wsocket.EmbySession.currentdata\n # gives a single float value\n except:\n status[\"Playstate\"]=\"Not_Connected\"\n status[\"playedtitle\"]=\"\"\n status[\"server\"]=\"\"\n status[\"folder\"]=\"\"\n status[\"filename\"]=\"\"\n status[\"CurrentData\"]=\"\"\n status[\"cpu_perc\"]=psutil.cpu_percent()\n status[\"mem_perc\"]=psutil.virtual_memory().percent\n \n # you can have the percentage of used RAM\n print(psutil.virtual_memory().percent)\n\n\n print(status)\n return(status)\n\ndef cargar_config(config_file,tv_path,av_path,lang_path):\n\n with open(config_file, 'r') as f: \n config = json.load(f)\n #ver_configuracion(config)\n f.close\n ## new options default config values\n config[\"Version\"]=get_version()\n default = config.get(\"Autoscript\", False)\n config[\"Autoscript\"]=default\n default = config.get(\"enable_all_libraries\", False)\n config[\"enable_all_libraries\"]=default\n default = config.get(\"TV_model\", \"\")\n config[\"TV_model\"]=default\n default = config.get(\"TV_SOURCES\", [])\n config[\"TV_SOURCES\"] = default\n default = config.get(\"AV_model\", \"\")\n config[\"AV_model\"]=default\n default = config.get(\"AV_SOURCES\", [])\n config[\"AV_SOURCES\"] = default\n default = config.get(\"TV_script_init\", \"\")\n config[\"TV_script_init\"]=default\n default = config.get(\"TV_script_end\", \"\")\n config[\"TV_script_end\"]=default\n default = config.get(\"av_delay_hdmi\", 0)\n config[\"av_delay_hdmi\"]=default\n default = config.get(\"AV_Port\", 23)\n config[\"AV_Port\"]=default\n default = config.get(\"timeout_oppo_mount\", 60)\n config[\"timeout_oppo_mount\"]=default\n default = config.get(\"language\",\"es-ES\")\n config[\"language\"]=default\n default = config.get(\"default_nfs\",False)\n config[\"default_nfs\"]=default\n default = config.get(\"wait_nfs\",False)\n config[\"wait_nfs\"]=default\n default = config.get(\"refresh_time\",5)\n config[\"refresh_time\"]=default\n default = config.get(\"check_beta\",False)\n config[\"check_beta\"]=default\n default = config.get(\"smbtrick\",False)\n config[\"smbtrick\"]=default\n default = config.get(\"BRDisc\",False)\n config[\"BRDisc\"]=default\n\n ## testeado de rutas\n edit_server=0\n server_list = config[\"servers\"]\n for server in server_list:\n default = server.get(\"Test_OK\", False)\n server_list[edit_server][\"Test_OK\"]=default\n edit_server=edit_server+1\n ## Cambio de booleans de texto antiguos a boleans actuales.\n if config[\"TV\"]=='True':\n config[\"TV\"]=True;\n if config[\"TV\"]=='False':\n config[\"TV\"]=False;\n if config[\"AV\"]=='True':\n config[\"AV\"]=True;\n if config[\"AV\"]=='False':\n config[\"AV\"]=False;\n config[\"servers\"]=server_list\n config[\"tv_dirs\"]=get_dir_folders(tv_path);\n config[\"av_dirs\"]=get_dir_folders(av_path);\n config[\"langs\"]=get_dir_folders(lang_path);\n\n return(config)\n\ndef check_version(config):\n\n url = \"https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js\"\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config[\"check_beta\"])\n if config[\"check_beta\"]==True:\n last_version=version[\"beta_version\"]\n last_version_file=version[\"beta_version_file\"]\n else:\n last_version=version[\"curr_version\"]\n last_version_file=version[\"curr_version_file\"]\n xno_version=get_version()\n resp = {}\n resp[\"version\"]=last_version\n resp[\"file\"]=last_version_file\n print(xno_version)\n print(last_version)\n if xno_version<last_version:\n resp[\"new_version\"]=True\n else:\n resp[\"new_version\"]=False\n print(resp)\n return(resp)\n\ndef update_version(config,vers_path,cwd):\n\n url = \"https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js\"\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config[\"check_beta\"]==True:\n last_version=version[\"beta_version\"]\n last_version_file=version[\"beta_version_file\"]\n else:\n last_version=version[\"curr_version\"]\n last_version_file=version[\"curr_version_file\"]\n url2 = \"https://github.com/siberian-git/Xnoppo/raw/main/versions/\" + last_version_file\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename=vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n if config[\"TV\"]==True and config[\"TV_model\"]!=\"\":\n move_files(tv_path + config[\"TV_model\"],lib_path)\n if config[\"AV\"]==True and config[\"AV_model\"]!=\"\":\n move_files(av_path + config[\"AV_model\"],lib_path)\n resp = {}\n resp[\"version\"]=last_version\n resp[\"file\"]=last_version_file\n resp[\"new_version\"]=False\n return(resp)\n\ndef cargar_lang(config_file):\n\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',encoding='latin-1') as f: \n config = json.load(f)\n #ver_configuracion(config)\n f.close\n ## new options default config values\n return(config)\n\ndef leer_file(web_file):\n\n with open(web_file, 'r',encoding='utf8') as f:\n num=f.read()\n f.close\n return(num)\n\ndef leer_img(web_file):\n\n with open(web_file, 'rb') as f:\n num=f.read()\n f.close\n return(num)\n\n\ndef test_path(config,server):\n \n rutas = get_mount_path(server[\"Emby_Path\"] + \"/test.mkv\",server)\n result2 = test_mount_path(config,rutas[\"Servidor\"],rutas[\"Carpeta\"])\n return(result2)\n\ndef get_mount_path(movie,server_data):\n\n movie = movie.replace(server_data[\"Emby_Path\"],server_data[\"Oppo_Path\"])\n movie = movie.replace('\\\\\\\\','\\\\')\n movie = movie.replace('\\\\','/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio +1 \n final = movie.find(word,inicio,len(movie))\n servidor = movie[inicio:final]\n ultimo=final+1\n result=final+1\n while result > 0:\n ultimo=result+1\n result=movie.find(word,ultimo,len(movie))\n fichero=movie[ultimo:len(movie)]\n final=final+1\n ultimo=ultimo-1\n carpeta=movie[final:ultimo]\n resultado={}\n resultado[\"Servidor\"]=servidor\n resultado[\"Carpeta\"]=carpeta\n resultado[\"Fichero\"]=fichero\n return(resultado)\n\ndef test_mount_path(config,servidor,carpeta):\n sendnotifyremote(config[\"Oppo_IP\"])\n #print(\"Conectando con el OPPO\")\n result=check_socket(config)\n if result==0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey(\"EJT\",config)\n time.sleep(1)\n #print(\"Solicitando montar ruta al OPPO\")\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey(\"QPW\",config)\n device_list=json.loads(response_data6f)\n if config[\"DebugLevel\"]>0: print(device_list)\n nfs=config[\"default_nfs\"]\n for device in device_list[\"devicelist\"]:\n if device[\"name\"].upper()==servidor.upper():\n if device[\"sub_type\"]==\"nfs\":\n nfs=True\n break\n else:\n nfs=False\n break\n if nfs:\n response_login = LoginNFS(config,servidor)\n else:\n response_login = LoginSambaWithOutID(config,servidor)\n if config[\"Always_ON\"]==False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor,carpeta,'','',config)\n else:\n response_mount = mountSharedFolder(servidor,carpeta,'','',config)\n response=json.loads(response_mount)\n #print(response)\n if config[\"Autoscript\"]==True:\n result=umountSharedFolder(config)\n if response[\"success\"]==True:\n a = \"OK\"\n else:\n a = \"FAILURE\" \n return(a)\n else:\n print(\"No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo\")\n\ndef test_emby(config):\n try:\n EmbySession=EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info[\"SessionInfo\"][\"Id\"]!=\"\":\n return(\"OK\")\n else:\n return(\"FAILED\")\n except:\n return(\"FAILED\")\n\ndef test_oppo(config):\n result=check_socket(config)\n if result==0:\n return(\"OK\")\n else:\n return(\"FAILED\")\n\ndef carga_libraries(config):\n try:\n EmbySession=EmbyHttp(config)\n views_list=EmbySession.get_user_views(EmbySession.user_info[\"User\"][\"Id\"])\n libraries = []\n for view in views_list:\n library= {}\n library[\"Name\"]=view[\"Name\"]\n library[\"Id\"]=view[\"Id\"]\n library[\"Active\"]=False\n try:\n lib_list=config[\"Libraries\"]\n except:\n lib_list={}\n for lib in lib_list:\n if lib[\"Id\"]==view[\"Id\"]:\n library[\"Active\"]=lib[\"Active\"]\n libraries.append(library)\n config[\"Libraries\"]=libraries\n return(0)\n except:\n return(1)\ndef is_library_active(config,libraryname):\n for library in config[\"Libraries\"]:\n if library[\"Name\"]==libraryname:\n return(library[\"Active\"])\n return(False)\n\ndef get_selectableFolders(config):\n EmbySession=EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers=[]\n for Folder in MediaFolders:\n index=1\n active=is_library_active(config,Folder[\"Name\"])\n if config[\"enable_all_libraries\"]==True:\n active=True;\n if active==True:\n for SubFolder in Folder[\"SubFolders\"]: \n server={}\n server[\"Id\"]=SubFolder[\"Id\"]\n if index>1:\n server[\"name\"]=Folder[\"Name\"]+\"(\"+str(index)+\")\"\n else:\n server[\"name\"]=Folder[\"Name\"]\n server[\"Emby_Path\"]=SubFolder[\"Path\"]\n server[\"Oppo_Path\"]=\"/\"\n try:\n serv_list=config[\"servers\"]\n except:\n serv_list={}\n for serv in serv_list:\n if server[\"Emby_Path\"]==serv[\"Emby_Path\"]:\n server[\"name\"]=serv[\"name\"];\n server[\"Oppo_Path\"]=serv[\"Oppo_Path\"];\n server[\"Test_OK\"]=serv[\"Test_OK\"];\n servers.append(server)\n index=index+1\n config[\"servers\"]=servers\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir(\".\")\n encontrado=False\n list_dir=[]\n #a =\"\"\n #list_dir.append(a)\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return(list_dir)\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return(0)\n\ndef get_devices(config):\n try:\n EmbySession=EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index=0\n dev_temp = []\n for device in devices[\"Items\"]:\n try:\n if device[\"Id\"]!='Xnoppo':\n device[\"Name\"]=device[\"Name\"] + \" / \" + device[\"AppName\"]\n device[\"Id\"]=device[\"ReportedDeviceId\"]\n dev_temp.append(device)\n except:\n pass\n config[\"devices\"]=dev_temp\n return('OK')\n except:\n return('FAILURE')\n\nclass MyServer(BaseHTTPRequestHandler):\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n resource_path=cwd + separador + 'web' + separador + 'resources' + separador\n html_path = cwd + separador + 'web' + separador\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n \n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return(0)\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return(0)\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return(0)\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\")) \n return(0)\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = update_version(config,vers_path,cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/restart':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n a = \"Restarting\"\n self.wfile.write(bytes(a,\"utf-8\"))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/lang':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = cargar_lang(lang_path + config[\"language\"] + separador +'lang.js')\n self.wfile.write(bytes(json.dumps(a),\"utf-8\")) \n return(0)\n if self.path.find(\"/send_key?\")>=0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b=get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n sendnotifyremote(config[\"Oppo_IP\"])\n result=check_socket(config)\n if b=='PON':\n if result==0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey(\"EJT\",config)\n if config[\"BRDisc\"]==True:\n time.sleep(1)\n response_data_on = sendremotekey(\"EJT\",config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b,config)\n self.send_response(200)\n self.send_header(\"Content-type\", \"text\")\n self.end_headers()\n a = \"ok\"\n self.wfile.write(bytes(a,\"utf-8\")) \n return(0)\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a)) \n return(0)\n else:\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(\"<html><head><title>https://pythonbasics.org</title></head>\", \"utf-8\"))\n self.wfile.write(bytes(\"<p>Request: %s</p>\" % self.path, \"utf-8\"))\n self.wfile.write(bytes(\"<body>\", \"utf-8\"))\n self.wfile.write(bytes(\"<p>This is an example web server.</p>\", \"utf-8\"))\n self.wfile.write(bytes(\"</body></html>\", \"utf-8\"))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n resource_path=cwd + separador + 'web' + separador + 'resources' + separador\n html_path = cwd + separador + 'web' + separador\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n \n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n status = get_state()\n if status[\"Playstate\"]==\"Not_Connected\":\n save_config(cwd + separador + 'config.json',config)\n emby_wsocket.ws_config=config\n restart()\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = test_path(config,server)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(server))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj[\"path\"]\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = navigate_folder(path,config)\n a_json=json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header(\"Content-Length\", len(a_json))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json',config)\n move_files(tv_path + config[\"TV_model\"],lib_path)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n restart()\n return(0)\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json',config)\n move_files(av_path + config[\"AV_model\"],lib_path)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n restart()\n return(0)\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = get_hdmi_list(config)\n if a != None:\n config[\"AV_SOURCES\"]=a\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\nif __name__ == \"__main__\":\n\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n config_file = cwd + separador + \"config.json\"\n resource_path=cwd + separador + 'web' + separador + 'resources' + separador\n html_path = cwd + separador + 'web' + separador\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n config = cargar_config(config_file,tv_path,av_path,lang_path)\n logfile=cwd + separador + \"emby_xnoppo_client_logging.log\"\n lang = cargar_lang(lang_path + config[\"language\"] + separador +'lang.js')\n\n if config[\"DebugLevel\"]==0:\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.CRITICAL)\n elif config[\"DebugLevel\"]==1:\n rfh = logging.handlers.RotatingFileHandler(\n filename=logfile, \n mode='a',\n maxBytes=50*1024*1024,\n backupCount=2,\n encoding=None,\n delay=0\n )\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.INFO,handlers=[rfh])\n elif config[\"DebugLevel\"]==2:\n rfh = logging.handlers.RotatingFileHandler(\n filename=logfile, \n mode='a',\n maxBytes=5*1024*1024,\n backupCount=2,\n encoding=None,\n delay=0\n )\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.DEBUG,handlers=[rfh])\n emby_wsocket = xnoppo_ws()\n emby_wsocket.ws_config=config\n emby_wsocket.config_file=config_file\n emby_wsocket.ws_lang=lang\n x = threading.Thread(target=thread_function, args=(emby_wsocket,))\n x.start()\n espera=0\n estado_anterior=''\n\n logging.debug('Arrancamos el Servidor Web\\n')\n serverPort = 8090\n webServer = HTTPServer((\"\", serverPort), MyServer)\n print(\"Server started http://%s:%s\" % (\"\", serverPort))\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n webServer.server_close()\n logging.info('Fin proceso')\n logging.info('Finished')\n print(\"Server stopped.\")\n",
"step-ids": [
21,
24,
25,
27,
28
]
}
|
[
21,
24,
25,
27,
28
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
paths = ['..', '.']
absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]
for i in absOfEntries:
print(i)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
paths = ['..', '.']
absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]
for i in absOfEntries:
print(i)
if __name__ == '__main__':
main()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from joecceasy import Easy
def main():
paths = ['..', '.']
absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]
for i in absOfEntries:
print(i)
if __name__ == '__main__':
main()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from joecceasy import Easy
def main():
paths = ['..','.']
absOfEntries = [ i.abs for i in Easy.WalkAnIter(paths) ]
for i in absOfEntries:
print( i )
if __name__=='__main__':
main()
"""
def main(maxEntries = 99):
i = -1
print( "Walker test, Walking current directory:" )
for entry in Easy.WalkAnIter( ['.'] ):
i += 1 ## because i start at -1, 1st run of line will be 0
if i > maxEntries:
break
print(entry.abs)
print( ' \n ' )
"""
#isFileByPython = os.path.isfile(entry.abs)
# print( 'entry: ', entry.name, 'f', entry.isFile, 'd', entry.isDir,
# 'fa', entry.isFileAt, 'da', entry.isDirAt, 'pf', isFileByPython, se#p=' ')
#end='' )
#print( entry.abs, entry.isFileAt, entry.isDirAt, sep=' ' )
#print( entry.__dict__ )
|
flexible
|
{
"blob_id": "b720a52f1c2e6e6be7c0887cd94441d248382242",
"index": 1836,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n paths = ['..', '.']\n absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]\n for i in absOfEntries:\n print(i)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n paths = ['..', '.']\n absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]\n for i in absOfEntries:\n print(i)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-4": "from joecceasy import Easy\n\n\ndef main():\n paths = ['..', '.']\n absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]\n for i in absOfEntries:\n print(i)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-5": "from joecceasy import Easy\r\n\r\ndef main():\r\n \r\n paths = ['..','.']\r\n absOfEntries = [ i.abs for i in Easy.WalkAnIter(paths) ]\r\n for i in absOfEntries:\r\n print( i )\r\n \r\nif __name__=='__main__':\r\n main()\r\n \r\n \r\n\"\"\"\r\ndef main(maxEntries = 99):\r\n i = -1\r\n print( \"Walker test, Walking current directory:\" )\r\n for entry in Easy.WalkAnIter( ['.'] ):\r\n i += 1 ## because i start at -1, 1st run of line will be 0\r\n if i > maxEntries:\r\n break\r\n print(entry.abs)\r\n print( ' \\n ' )\r\n\"\"\"\r\n\r\n#isFileByPython = os.path.isfile(entry.abs)\r\n# print( 'entry: ', entry.name, 'f', entry.isFile, 'd', entry.isDir,\r\n# 'fa', entry.isFileAt, 'da', entry.isDirAt, 'pf', isFileByPython, se#p=' ')\r\n#end='' )\r\n#print( entry.abs, entry.isFileAt, entry.isDirAt, sep=' ' )\r\n#print( entry.__dict__ )",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math
def solution(X, Y, D):
# write your code in Python 3.6
xy = Y-X;
if xy == 0: return 0
jumps = math.ceil(xy/D)
return jumps
|
normal
|
{
"blob_id": "bdf819d8a5bc3906febced785c6d95db7dc3a603",
"index": 2376,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(X, Y, D):\n xy = Y - X\n if xy == 0:\n return 0\n jumps = math.ceil(xy / D)\n return jumps\n",
"step-3": "import math\n\n\ndef solution(X, Y, D):\n xy = Y - X\n if xy == 0:\n return 0\n jumps = math.ceil(xy / D)\n return jumps\n",
"step-4": "import math\ndef solution(X, Y, D):\n # write your code in Python 3.6\n xy = Y-X;\n if xy == 0: return 0\n jumps = math.ceil(xy/D)\n return jumps\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from z3 import *
import re
dna = re.compile("dna_(\d+)")
opt = Optimize()
opt.from_file("../benchmarks/bench.smt2")
set_option("sat.random_seed",23)
def get_soft(soft):
return [f.arg(0) for f in soft.children()]
def free_vars(fs):
seen = set([])
vars = set([])
def fv(seen, vars, f):
if f in seen:
return
seen |= { f }
if f.decl().kind() == Z3_OP_UNINTERPRETED:
vars |= { f }
for c in f.children():
fv(seen, vars, c)
for f in fs:
fv(seen, vars, f)
return vars
def is_dna(f):
return f.decl().name().startswith("dna")
def dna_num(f):
m = dna.search(f.decl().name())
assert m
return int(m.group(1))
def split_fmls(fmls):
A = []
B = []
C = []
varsA = set([])
varsB = set([])
min_dna = 100000
max_dna = 0
for f in fmls:
vars = free_vars([f])
for v in vars:
if is_dna(v):
n = dna_num(v)
if n < min_dna:
min_dna = n
if n > max_dna:
max_dna = n
print(min_dna, max_dna)
mid = (max_dna + min_dna) / 2
print("Mid: ", mid)
for f in fmls:
vars = free_vars([f])
above = False
below = False
for v in vars:
if is_dna(v):
n = dna_num(v)
if n > mid:
above = True
else:
below = True
if not above and not below:
C.append((f, vars))
continue
if below:
A.append(f)
varsA |= vars
if above:
B.append(f)
varsB |= vars
for (f, vars) in C:
for v in vars:
if v in varsA:
A.append(f)
break
for v in vars:
if v in varsB:
B.append(f)
break
# print(A)
# print(B)
return A, B
def split_opt(opt):
soft = opt.objectives()[0]
fmls = opt.assertions()
A, B = split_fmls(opt.assertions())
varsA = free_vars(A)
varsB = free_vars(B)
soft_fmls = get_soft(soft)
shared_vars = { v for v in varsA if v in varsB }
optA = Optimize()
optB = Optimize()
optA.add(A)
optB.add(B)
for s in soft_fmls:
vars = free_vars([s])
for v in vars:
if v in varsA:
optA.add_soft(s)
break
for v in vars:
if v in varsB:
optB.add_soft(s)
break
return optA, optB, shared_vars
set_option(verbose=1)
def split_rec(opt, depth):
if depth == 0:
opt.check()
mdl = opt.model()
lb = mdl.eval(opt.objectives()[0])
return lb, lb, opt.model()
optA, optB, shared_vars = split_opt(opt)
lbA, ubA, mA = split_rec(optA, depth - 1)
lbB, ubB, mB = split_rec(optB, depth - 1)
mCommon = [ v == mA.eval(v) for v in shared_vars if mA.eval(v).eq(mB.eval(v)) ]
print("Fix common values:", len(mCommon), mCommon)
opt.add(mCommon)
opt.check()
mdl = opt.model()
ub = mdl.eval(opt.objectives()[0])
lb = mdl.eval(lbA + lbB)
print(lb, ub, mdl.eval(ubA + ubB))
return lb, ub, opt.model()
lb, ub, mdl = split_rec(opt, 4)
print(mdl)
print("value: ", mdl.eval(opt.objectives()[0]))
#optA1, optA2, shared_vars2 = split_opt(optA)
#optA.set(enable_lns=True)
#optA1.check()
#mA1 = optA1.model()
#optA2.add([v == mA1.eval(v) for v in shared_vars2])
#optA2.check()
#mA2 = optA2.model()
#for v in shared_vars2:
# print(v, mA1.eval(v), mA2.eval(v))
#optA1.add([v == mA2.eval(v) for v in shared_vars2])
#optA1.check()
|
normal
|
{
"blob_id": "c0d8f2542f9cf9a5097011c61c90073c031d2708",
"index": 9831,
"step-1": "<mask token>\n\n\ndef get_soft(soft):\n return [f.arg(0) for f in soft.children()]\n\n\ndef free_vars(fs):\n seen = set([])\n vars = set([])\n\n def fv(seen, vars, f):\n if f in seen:\n return\n seen |= {f}\n if f.decl().kind() == Z3_OP_UNINTERPRETED:\n vars |= {f}\n for c in f.children():\n fv(seen, vars, c)\n for f in fs:\n fv(seen, vars, f)\n return vars\n\n\ndef is_dna(f):\n return f.decl().name().startswith('dna')\n\n\ndef dna_num(f):\n m = dna.search(f.decl().name())\n assert m\n return int(m.group(1))\n\n\ndef split_fmls(fmls):\n A = []\n B = []\n C = []\n varsA = set([])\n varsB = set([])\n min_dna = 100000\n max_dna = 0\n for f in fmls:\n vars = free_vars([f])\n for v in vars:\n if is_dna(v):\n n = dna_num(v)\n if n < min_dna:\n min_dna = n\n if n > max_dna:\n max_dna = n\n print(min_dna, max_dna)\n mid = (max_dna + min_dna) / 2\n print('Mid: ', mid)\n for f in fmls:\n vars = free_vars([f])\n above = False\n below = False\n for v in vars:\n if is_dna(v):\n n = dna_num(v)\n if n > mid:\n above = True\n else:\n below = True\n if not above and not below:\n C.append((f, vars))\n continue\n if below:\n A.append(f)\n varsA |= vars\n if above:\n B.append(f)\n varsB |= vars\n for f, vars in C:\n for v in vars:\n if v in varsA:\n A.append(f)\n break\n for v in vars:\n if v in varsB:\n B.append(f)\n break\n return A, B\n\n\ndef split_opt(opt):\n soft = opt.objectives()[0]\n fmls = opt.assertions()\n A, B = split_fmls(opt.assertions())\n varsA = free_vars(A)\n varsB = free_vars(B)\n soft_fmls = get_soft(soft)\n shared_vars = {v for v in varsA if v in varsB}\n optA = Optimize()\n optB = Optimize()\n optA.add(A)\n optB.add(B)\n for s in soft_fmls:\n vars = free_vars([s])\n for v in vars:\n if v in varsA:\n optA.add_soft(s)\n break\n for v in vars:\n if v in varsB:\n optB.add_soft(s)\n break\n return optA, optB, shared_vars\n\n\n<mask token>\n\n\ndef split_rec(opt, depth):\n if depth == 0:\n opt.check()\n mdl = opt.model()\n lb = mdl.eval(opt.objectives()[0])\n return lb, lb, opt.model()\n optA, optB, shared_vars = split_opt(opt)\n lbA, ubA, mA = split_rec(optA, depth - 1)\n lbB, ubB, mB = split_rec(optB, depth - 1)\n mCommon = [(v == mA.eval(v)) for v in shared_vars if mA.eval(v).eq(mB.\n eval(v))]\n print('Fix common values:', len(mCommon), mCommon)\n opt.add(mCommon)\n opt.check()\n mdl = opt.model()\n ub = mdl.eval(opt.objectives()[0])\n lb = mdl.eval(lbA + lbB)\n print(lb, ub, mdl.eval(ubA + ubB))\n return lb, ub, opt.model()\n\n\n<mask token>\n",
"step-2": "<mask token>\nopt.from_file('../benchmarks/bench.smt2')\nset_option('sat.random_seed', 23)\n\n\ndef get_soft(soft):\n return [f.arg(0) for f in soft.children()]\n\n\ndef free_vars(fs):\n seen = set([])\n vars = set([])\n\n def fv(seen, vars, f):\n if f in seen:\n return\n seen |= {f}\n if f.decl().kind() == Z3_OP_UNINTERPRETED:\n vars |= {f}\n for c in f.children():\n fv(seen, vars, c)\n for f in fs:\n fv(seen, vars, f)\n return vars\n\n\ndef is_dna(f):\n return f.decl().name().startswith('dna')\n\n\ndef dna_num(f):\n m = dna.search(f.decl().name())\n assert m\n return int(m.group(1))\n\n\ndef split_fmls(fmls):\n A = []\n B = []\n C = []\n varsA = set([])\n varsB = set([])\n min_dna = 100000\n max_dna = 0\n for f in fmls:\n vars = free_vars([f])\n for v in vars:\n if is_dna(v):\n n = dna_num(v)\n if n < min_dna:\n min_dna = n\n if n > max_dna:\n max_dna = n\n print(min_dna, max_dna)\n mid = (max_dna + min_dna) / 2\n print('Mid: ', mid)\n for f in fmls:\n vars = free_vars([f])\n above = False\n below = False\n for v in vars:\n if is_dna(v):\n n = dna_num(v)\n if n > mid:\n above = True\n else:\n below = True\n if not above and not below:\n C.append((f, vars))\n continue\n if below:\n A.append(f)\n varsA |= vars\n if above:\n B.append(f)\n varsB |= vars\n for f, vars in C:\n for v in vars:\n if v in varsA:\n A.append(f)\n break\n for v in vars:\n if v in varsB:\n B.append(f)\n break\n return A, B\n\n\ndef split_opt(opt):\n soft = opt.objectives()[0]\n fmls = opt.assertions()\n A, B = split_fmls(opt.assertions())\n varsA = free_vars(A)\n varsB = free_vars(B)\n soft_fmls = get_soft(soft)\n shared_vars = {v for v in varsA if v in varsB}\n optA = Optimize()\n optB = Optimize()\n optA.add(A)\n optB.add(B)\n for s in soft_fmls:\n vars = free_vars([s])\n for v in vars:\n if v in varsA:\n optA.add_soft(s)\n break\n for v in vars:\n if v in varsB:\n optB.add_soft(s)\n break\n return optA, optB, shared_vars\n\n\nset_option(verbose=1)\n\n\ndef split_rec(opt, depth):\n if depth == 0:\n opt.check()\n mdl = opt.model()\n lb = mdl.eval(opt.objectives()[0])\n return lb, lb, opt.model()\n optA, optB, shared_vars = split_opt(opt)\n lbA, ubA, mA = split_rec(optA, depth - 1)\n lbB, ubB, mB = split_rec(optB, depth - 1)\n mCommon = [(v == mA.eval(v)) for v in shared_vars if mA.eval(v).eq(mB.\n eval(v))]\n print('Fix common values:', len(mCommon), mCommon)\n opt.add(mCommon)\n opt.check()\n mdl = opt.model()\n ub = mdl.eval(opt.objectives()[0])\n lb = mdl.eval(lbA + lbB)\n print(lb, ub, mdl.eval(ubA + ubB))\n return lb, ub, opt.model()\n\n\n<mask token>\nprint(mdl)\nprint('value: ', mdl.eval(opt.objectives()[0]))\n",
"step-3": "<mask token>\ndna = re.compile('dna_(\\\\d+)')\nopt = Optimize()\nopt.from_file('../benchmarks/bench.smt2')\nset_option('sat.random_seed', 23)\n\n\ndef get_soft(soft):\n return [f.arg(0) for f in soft.children()]\n\n\ndef free_vars(fs):\n seen = set([])\n vars = set([])\n\n def fv(seen, vars, f):\n if f in seen:\n return\n seen |= {f}\n if f.decl().kind() == Z3_OP_UNINTERPRETED:\n vars |= {f}\n for c in f.children():\n fv(seen, vars, c)\n for f in fs:\n fv(seen, vars, f)\n return vars\n\n\ndef is_dna(f):\n return f.decl().name().startswith('dna')\n\n\ndef dna_num(f):\n m = dna.search(f.decl().name())\n assert m\n return int(m.group(1))\n\n\ndef split_fmls(fmls):\n A = []\n B = []\n C = []\n varsA = set([])\n varsB = set([])\n min_dna = 100000\n max_dna = 0\n for f in fmls:\n vars = free_vars([f])\n for v in vars:\n if is_dna(v):\n n = dna_num(v)\n if n < min_dna:\n min_dna = n\n if n > max_dna:\n max_dna = n\n print(min_dna, max_dna)\n mid = (max_dna + min_dna) / 2\n print('Mid: ', mid)\n for f in fmls:\n vars = free_vars([f])\n above = False\n below = False\n for v in vars:\n if is_dna(v):\n n = dna_num(v)\n if n > mid:\n above = True\n else:\n below = True\n if not above and not below:\n C.append((f, vars))\n continue\n if below:\n A.append(f)\n varsA |= vars\n if above:\n B.append(f)\n varsB |= vars\n for f, vars in C:\n for v in vars:\n if v in varsA:\n A.append(f)\n break\n for v in vars:\n if v in varsB:\n B.append(f)\n break\n return A, B\n\n\ndef split_opt(opt):\n soft = opt.objectives()[0]\n fmls = opt.assertions()\n A, B = split_fmls(opt.assertions())\n varsA = free_vars(A)\n varsB = free_vars(B)\n soft_fmls = get_soft(soft)\n shared_vars = {v for v in varsA if v in varsB}\n optA = Optimize()\n optB = Optimize()\n optA.add(A)\n optB.add(B)\n for s in soft_fmls:\n vars = free_vars([s])\n for v in vars:\n if v in varsA:\n optA.add_soft(s)\n break\n for v in vars:\n if v in varsB:\n optB.add_soft(s)\n break\n return optA, optB, shared_vars\n\n\nset_option(verbose=1)\n\n\ndef split_rec(opt, depth):\n if depth == 0:\n opt.check()\n mdl = opt.model()\n lb = mdl.eval(opt.objectives()[0])\n return lb, lb, opt.model()\n optA, optB, shared_vars = split_opt(opt)\n lbA, ubA, mA = split_rec(optA, depth - 1)\n lbB, ubB, mB = split_rec(optB, depth - 1)\n mCommon = [(v == mA.eval(v)) for v in shared_vars if mA.eval(v).eq(mB.\n eval(v))]\n print('Fix common values:', len(mCommon), mCommon)\n opt.add(mCommon)\n opt.check()\n mdl = opt.model()\n ub = mdl.eval(opt.objectives()[0])\n lb = mdl.eval(lbA + lbB)\n print(lb, ub, mdl.eval(ubA + ubB))\n return lb, ub, opt.model()\n\n\nlb, ub, mdl = split_rec(opt, 4)\nprint(mdl)\nprint('value: ', mdl.eval(opt.objectives()[0]))\n",
"step-4": "from z3 import *\nimport re\ndna = re.compile('dna_(\\\\d+)')\nopt = Optimize()\nopt.from_file('../benchmarks/bench.smt2')\nset_option('sat.random_seed', 23)\n\n\ndef get_soft(soft):\n return [f.arg(0) for f in soft.children()]\n\n\ndef free_vars(fs):\n seen = set([])\n vars = set([])\n\n def fv(seen, vars, f):\n if f in seen:\n return\n seen |= {f}\n if f.decl().kind() == Z3_OP_UNINTERPRETED:\n vars |= {f}\n for c in f.children():\n fv(seen, vars, c)\n for f in fs:\n fv(seen, vars, f)\n return vars\n\n\ndef is_dna(f):\n return f.decl().name().startswith('dna')\n\n\ndef dna_num(f):\n m = dna.search(f.decl().name())\n assert m\n return int(m.group(1))\n\n\ndef split_fmls(fmls):\n A = []\n B = []\n C = []\n varsA = set([])\n varsB = set([])\n min_dna = 100000\n max_dna = 0\n for f in fmls:\n vars = free_vars([f])\n for v in vars:\n if is_dna(v):\n n = dna_num(v)\n if n < min_dna:\n min_dna = n\n if n > max_dna:\n max_dna = n\n print(min_dna, max_dna)\n mid = (max_dna + min_dna) / 2\n print('Mid: ', mid)\n for f in fmls:\n vars = free_vars([f])\n above = False\n below = False\n for v in vars:\n if is_dna(v):\n n = dna_num(v)\n if n > mid:\n above = True\n else:\n below = True\n if not above and not below:\n C.append((f, vars))\n continue\n if below:\n A.append(f)\n varsA |= vars\n if above:\n B.append(f)\n varsB |= vars\n for f, vars in C:\n for v in vars:\n if v in varsA:\n A.append(f)\n break\n for v in vars:\n if v in varsB:\n B.append(f)\n break\n return A, B\n\n\ndef split_opt(opt):\n soft = opt.objectives()[0]\n fmls = opt.assertions()\n A, B = split_fmls(opt.assertions())\n varsA = free_vars(A)\n varsB = free_vars(B)\n soft_fmls = get_soft(soft)\n shared_vars = {v for v in varsA if v in varsB}\n optA = Optimize()\n optB = Optimize()\n optA.add(A)\n optB.add(B)\n for s in soft_fmls:\n vars = free_vars([s])\n for v in vars:\n if v in varsA:\n optA.add_soft(s)\n break\n for v in vars:\n if v in varsB:\n optB.add_soft(s)\n break\n return optA, optB, shared_vars\n\n\nset_option(verbose=1)\n\n\ndef split_rec(opt, depth):\n if depth == 0:\n opt.check()\n mdl = opt.model()\n lb = mdl.eval(opt.objectives()[0])\n return lb, lb, opt.model()\n optA, optB, shared_vars = split_opt(opt)\n lbA, ubA, mA = split_rec(optA, depth - 1)\n lbB, ubB, mB = split_rec(optB, depth - 1)\n mCommon = [(v == mA.eval(v)) for v in shared_vars if mA.eval(v).eq(mB.\n eval(v))]\n print('Fix common values:', len(mCommon), mCommon)\n opt.add(mCommon)\n opt.check()\n mdl = opt.model()\n ub = mdl.eval(opt.objectives()[0])\n lb = mdl.eval(lbA + lbB)\n print(lb, ub, mdl.eval(ubA + ubB))\n return lb, ub, opt.model()\n\n\nlb, ub, mdl = split_rec(opt, 4)\nprint(mdl)\nprint('value: ', mdl.eval(opt.objectives()[0]))\n",
"step-5": "from z3 import *\nimport re\n\ndna = re.compile(\"dna_(\\d+)\")\n\nopt = Optimize()\nopt.from_file(\"../benchmarks/bench.smt2\")\n\nset_option(\"sat.random_seed\",23)\n\ndef get_soft(soft):\n return [f.arg(0) for f in soft.children()]\n\ndef free_vars(fs):\n seen = set([])\n vars = set([])\n def fv(seen, vars, f):\n if f in seen:\n return\n seen |= { f }\n if f.decl().kind() == Z3_OP_UNINTERPRETED:\n vars |= { f }\n for c in f.children():\n fv(seen, vars, c)\n for f in fs:\n fv(seen, vars, f)\n return vars\n\ndef is_dna(f):\n return f.decl().name().startswith(\"dna\")\n\ndef dna_num(f):\n m = dna.search(f.decl().name())\n assert m\n return int(m.group(1))\n\n\ndef split_fmls(fmls):\n A = []\n B = []\n C = []\n varsA = set([])\n varsB = set([])\n min_dna = 100000\n max_dna = 0\n for f in fmls:\n vars = free_vars([f])\n for v in vars:\n if is_dna(v):\n n = dna_num(v)\n if n < min_dna:\n min_dna = n\n if n > max_dna:\n max_dna = n\n print(min_dna, max_dna)\n mid = (max_dna + min_dna) / 2\n print(\"Mid: \", mid)\n for f in fmls:\n vars = free_vars([f])\n above = False\n below = False\n for v in vars:\n if is_dna(v):\n n = dna_num(v)\n if n > mid:\n above = True\n else:\n below = True\n if not above and not below:\n C.append((f, vars))\n continue\n if below:\n A.append(f)\n varsA |= vars\n if above:\n B.append(f)\n varsB |= vars\n for (f, vars) in C:\n for v in vars:\n if v in varsA:\n A.append(f)\n break\n for v in vars:\n if v in varsB:\n B.append(f)\n break\n# print(A)\n# print(B)\n return A, B\n\ndef split_opt(opt):\n soft = opt.objectives()[0]\n fmls = opt.assertions()\n A, B = split_fmls(opt.assertions())\n varsA = free_vars(A)\n varsB = free_vars(B)\n soft_fmls = get_soft(soft)\n shared_vars = { v for v in varsA if v in varsB }\n\n optA = Optimize()\n optB = Optimize()\n optA.add(A)\n optB.add(B)\n for s in soft_fmls:\n vars = free_vars([s])\n for v in vars:\n if v in varsA:\n optA.add_soft(s)\n break\n for v in vars:\n if v in varsB:\n optB.add_soft(s)\n break\n\n return optA, optB, shared_vars\n\nset_option(verbose=1)\n\ndef split_rec(opt, depth):\n if depth == 0:\n opt.check()\n mdl = opt.model()\n lb = mdl.eval(opt.objectives()[0])\n return lb, lb, opt.model()\n optA, optB, shared_vars = split_opt(opt)\n lbA, ubA, mA = split_rec(optA, depth - 1)\n lbB, ubB, mB = split_rec(optB, depth - 1)\n mCommon = [ v == mA.eval(v) for v in shared_vars if mA.eval(v).eq(mB.eval(v)) ]\n print(\"Fix common values:\", len(mCommon), mCommon)\n opt.add(mCommon)\n opt.check()\n mdl = opt.model()\n ub = mdl.eval(opt.objectives()[0])\n lb = mdl.eval(lbA + lbB)\n print(lb, ub, mdl.eval(ubA + ubB))\n return lb, ub, opt.model()\n\nlb, ub, mdl = split_rec(opt, 4)\nprint(mdl)\nprint(\"value: \", mdl.eval(opt.objectives()[0]))\n\n#optA1, optA2, shared_vars2 = split_opt(optA)\n#optA.set(enable_lns=True)\n#optA1.check()\n#mA1 = optA1.model()\n#optA2.add([v == mA1.eval(v) for v in shared_vars2])\n#optA2.check()\n#mA2 = optA2.model()\n#for v in shared_vars2:\n# print(v, mA1.eval(v), mA2.eval(v))\n#optA1.add([v == mA2.eval(v) for v in shared_vars2])\n#optA1.check()\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('events', '0005_auto_20160207_1529')]
operations = [migrations.AddField(model_name='event', name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'),
('N', 'No Skins')], default='N', max_length=1, verbose_name=
'Skins type')), migrations.AddField(model_name='eventtemplate',
name='skins_type', field=models.CharField(choices=[('I',
'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N',
max_length=1, verbose_name='Skins type')), migrations.AddField(
model_name='historicalevent', name='skins_type', field=models.
CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N',
'No Skins')], default='N', max_length=1, verbose_name='Skins type')
), migrations.AddField(model_name='historicaleventtemplate', name=
'skins_type', field=models.CharField(choices=[('I', 'Individual'),
('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1,
verbose_name='Skins type')), migrations.AlterField(model_name=
'event', name='event_type', field=models.CharField(choices=[('L',
'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'),
('O', 'Other')], default='M', max_length=1, verbose_name=
'Event type')), migrations.AlterField(model_name='event', name=
'scoring', field=models.CharField(choices=[('IN', 'Individual'), (
'TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',
'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',
'Team: Combination')], default='IN', max_length=3, verbose_name=
'Scoring type')), migrations.AlterField(model_name='eventtemplate',
name='event_type', field=models.CharField(choices=[('L', 'League'),
('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O',
'Other')], default='M', max_length=1, verbose_name='Event type')),
migrations.AlterField(model_name='eventtemplate', name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB',
'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',
'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',
'Team: Combination')], default='IN', max_length=3, verbose_name=
'Scoring type')), migrations.AlterField(model_name=
'historicalevent', name='event_type', field=models.CharField(
choices=[('L', 'League'), ('M', 'Weekend Major'), ('H',
'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length
=1, verbose_name='Event type')), migrations.AlterField(model_name=
'historicalevent', name='scoring', field=models.CharField(choices=[
('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG',
'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',
'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',
max_length=3, verbose_name='Scoring type')), migrations.AlterField(
model_name='historicaleventtemplate', name='event_type', field=
models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'),
('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M',
max_length=1, verbose_name='Event type')), migrations.AlterField(
model_name='historicaleventtemplate', name='scoring', field=models.
CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'),
('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',
'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',
max_length=3, verbose_name='Scoring type'))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('events', '0005_auto_20160207_1529')]
operations = [migrations.AddField(model_name='event', name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'),
('N', 'No Skins')], default='N', max_length=1, verbose_name=
'Skins type')), migrations.AddField(model_name='eventtemplate',
name='skins_type', field=models.CharField(choices=[('I',
'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N',
max_length=1, verbose_name='Skins type')), migrations.AddField(
model_name='historicalevent', name='skins_type', field=models.
CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N',
'No Skins')], default='N', max_length=1, verbose_name='Skins type')
), migrations.AddField(model_name='historicaleventtemplate', name=
'skins_type', field=models.CharField(choices=[('I', 'Individual'),
('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1,
verbose_name='Skins type')), migrations.AlterField(model_name=
'event', name='event_type', field=models.CharField(choices=[('L',
'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'),
('O', 'Other')], default='M', max_length=1, verbose_name=
'Event type')), migrations.AlterField(model_name='event', name=
'scoring', field=models.CharField(choices=[('IN', 'Individual'), (
'TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',
'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',
'Team: Combination')], default='IN', max_length=3, verbose_name=
'Scoring type')), migrations.AlterField(model_name='eventtemplate',
name='event_type', field=models.CharField(choices=[('L', 'League'),
('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O',
'Other')], default='M', max_length=1, verbose_name='Event type')),
migrations.AlterField(model_name='eventtemplate', name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB',
'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',
'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',
'Team: Combination')], default='IN', max_length=3, verbose_name=
'Scoring type')), migrations.AlterField(model_name=
'historicalevent', name='event_type', field=models.CharField(
choices=[('L', 'League'), ('M', 'Weekend Major'), ('H',
'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length
=1, verbose_name='Event type')), migrations.AlterField(model_name=
'historicalevent', name='scoring', field=models.CharField(choices=[
('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG',
'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',
'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',
max_length=3, verbose_name='Scoring type')), migrations.AlterField(
model_name='historicaleventtemplate', name='event_type', field=
models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'),
('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M',
max_length=1, verbose_name='Event type')), migrations.AlterField(
model_name='historicaleventtemplate', name='scoring', field=models.
CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'),
('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',
'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',
max_length=3, verbose_name='Scoring type'))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-07 23:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20160207_1529'),
]
operations = [
migrations.AddField(
model_name='event',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AddField(
model_name='eventtemplate',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AddField(
model_name='historicalevent',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AddField(
model_name='historicaleventtemplate',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AlterField(
model_name='event',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='event',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
migrations.AlterField(
model_name='eventtemplate',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='eventtemplate',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
migrations.AlterField(
model_name='historicalevent',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='historicalevent',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
migrations.AlterField(
model_name='historicaleventtemplate',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='historicaleventtemplate',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
]
|
flexible
|
{
"blob_id": "ab3609c27fa002d79735c5d5c09ec7a52fedd040",
"index": 3484,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('events', '0005_auto_20160207_1529')]\n operations = [migrations.AddField(model_name='event', name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'),\n ('N', 'No Skins')], default='N', max_length=1, verbose_name=\n 'Skins type')), migrations.AddField(model_name='eventtemplate',\n name='skins_type', field=models.CharField(choices=[('I',\n 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N',\n max_length=1, verbose_name='Skins type')), migrations.AddField(\n model_name='historicalevent', name='skins_type', field=models.\n CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N',\n 'No Skins')], default='N', max_length=1, verbose_name='Skins type')\n ), migrations.AddField(model_name='historicaleventtemplate', name=\n 'skins_type', field=models.CharField(choices=[('I', 'Individual'),\n ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1,\n verbose_name='Skins type')), migrations.AlterField(model_name=\n 'event', name='event_type', field=models.CharField(choices=[('L',\n 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'),\n ('O', 'Other')], default='M', max_length=1, verbose_name=\n 'Event type')), migrations.AlterField(model_name='event', name=\n 'scoring', field=models.CharField(choices=[('IN', 'Individual'), (\n 'TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name='eventtemplate',\n name='event_type', field=models.CharField(choices=[('L', 'League'),\n ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O',\n 'Other')], default='M', max_length=1, verbose_name='Event type')),\n migrations.AlterField(model_name='eventtemplate', name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB',\n 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name=\n 'historicalevent', name='event_type', field=models.CharField(\n choices=[('L', 'League'), ('M', 'Weekend Major'), ('H',\n 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length\n =1, verbose_name='Event type')), migrations.AlterField(model_name=\n 'historicalevent', name='scoring', field=models.CharField(choices=[\n ('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG',\n 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='event_type', field=\n models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'),\n ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M',\n max_length=1, verbose_name='Event type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='scoring', field=models.\n CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'),\n ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('events', '0005_auto_20160207_1529')]\n operations = [migrations.AddField(model_name='event', name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'),\n ('N', 'No Skins')], default='N', max_length=1, verbose_name=\n 'Skins type')), migrations.AddField(model_name='eventtemplate',\n name='skins_type', field=models.CharField(choices=[('I',\n 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N',\n max_length=1, verbose_name='Skins type')), migrations.AddField(\n model_name='historicalevent', name='skins_type', field=models.\n CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N',\n 'No Skins')], default='N', max_length=1, verbose_name='Skins type')\n ), migrations.AddField(model_name='historicaleventtemplate', name=\n 'skins_type', field=models.CharField(choices=[('I', 'Individual'),\n ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1,\n verbose_name='Skins type')), migrations.AlterField(model_name=\n 'event', name='event_type', field=models.CharField(choices=[('L',\n 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'),\n ('O', 'Other')], default='M', max_length=1, verbose_name=\n 'Event type')), migrations.AlterField(model_name='event', name=\n 'scoring', field=models.CharField(choices=[('IN', 'Individual'), (\n 'TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name='eventtemplate',\n name='event_type', field=models.CharField(choices=[('L', 'League'),\n ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O',\n 'Other')], default='M', max_length=1, verbose_name='Event type')),\n migrations.AlterField(model_name='eventtemplate', name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB',\n 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name=\n 'historicalevent', name='event_type', field=models.CharField(\n choices=[('L', 'League'), ('M', 'Weekend Major'), ('H',\n 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length\n =1, verbose_name='Event type')), migrations.AlterField(model_name=\n 'historicalevent', name='scoring', field=models.CharField(choices=[\n ('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG',\n 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='event_type', field=\n models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'),\n ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M',\n max_length=1, verbose_name='Event type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='scoring', field=models.\n CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'),\n ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-07 23:42\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('events', '0005_auto_20160207_1529'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='event',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AddField(\n model_name='eventtemplate',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AddField(\n model_name='historicalevent',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AddField(\n model_name='historicaleventtemplate',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AlterField(\n model_name='event',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='event',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n migrations.AlterField(\n model_name='eventtemplate',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='eventtemplate',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n migrations.AlterField(\n model_name='historicalevent',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='historicalevent',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n migrations.AlterField(\n model_name='historicaleventtemplate',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='historicaleventtemplate',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding=gbk
from numpy import *
import fp_growth
'''
#创建树的一个单节点
rootNode=fp_growth.treeNode('pyramid',9,None)
#为其增加一个子节点
rootNode.children['eye']=fp_growth.treeNode('eye',13,None)
rootNode.disp()
#导入事务数据库实例
simpData=fp_growth.loadSimpData()
#print("simpData:")
#print(simpData)
#对数据进行格式化处理
initSet=fp_growth.createInitSet(simpData)
#print("initSet:")
#print(initSet)
myFPtree,myHeaderTab=fp_growth.createTree(initSet,3)
#print("myFPtree:")
#print(myFPtree)
#myFPtree.disp()
print("myFPtree:")
#print(myFPtree)
myFPtree.disp()
print("myHeaderTab:")
for item in myHeaderTab.items():
print(item)
path=fp_growth.findPrefixPath('r',myHeaderTab['r'][1])
print("path:")
print(path)
#建立一个空列表来存储所有的频繁项集
freqItems=[]
fp_growth.minTree(myFPtree,myHeaderTab,3,set([]),freqItems)
'''
parsedDat=[line.split() for line in open('kosarak.dat').readlines()]
initSet=fp_growth.createInitSet(parsedDat)
myFPtree,myHeaderTab=fp_growth.createTree(initSet,100000)
myFreqList=[]
fp_growth.minTree(myFPtree,myHeaderTab,100000,set([]),myFreqList)
print(len(myFreqList))
|
normal
|
{
"blob_id": "e8b0e6e5e68933703e2ac8c9b2b62d68c0c2f53d",
"index": 8295,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfp_growth.minTree(myFPtree, myHeaderTab, 100000, set([]), myFreqList)\nprint(len(myFreqList))\n",
"step-3": "<mask token>\nparsedDat = [line.split() for line in open('kosarak.dat').readlines()]\ninitSet = fp_growth.createInitSet(parsedDat)\nmyFPtree, myHeaderTab = fp_growth.createTree(initSet, 100000)\nmyFreqList = []\nfp_growth.minTree(myFPtree, myHeaderTab, 100000, set([]), myFreqList)\nprint(len(myFreqList))\n",
"step-4": "from numpy import *\nimport fp_growth\n<mask token>\nparsedDat = [line.split() for line in open('kosarak.dat').readlines()]\ninitSet = fp_growth.createInitSet(parsedDat)\nmyFPtree, myHeaderTab = fp_growth.createTree(initSet, 100000)\nmyFreqList = []\nfp_growth.minTree(myFPtree, myHeaderTab, 100000, set([]), myFreqList)\nprint(len(myFreqList))\n",
"step-5": "# coding=gbk\nfrom numpy import *\n\nimport fp_growth\n\n\n'''\n#创建树的一个单节点\nrootNode=fp_growth.treeNode('pyramid',9,None)\n#为其增加一个子节点\nrootNode.children['eye']=fp_growth.treeNode('eye',13,None)\n\nrootNode.disp()\n\n\n\n#导入事务数据库实例\nsimpData=fp_growth.loadSimpData()\n#print(\"simpData:\")\n#print(simpData)\n\n#对数据进行格式化处理\ninitSet=fp_growth.createInitSet(simpData)\n#print(\"initSet:\")\n#print(initSet)\n\nmyFPtree,myHeaderTab=fp_growth.createTree(initSet,3)\n\n#print(\"myFPtree:\")\n#print(myFPtree)\n#myFPtree.disp()\n\nprint(\"myFPtree:\")\n#print(myFPtree)\nmyFPtree.disp()\n\n\nprint(\"myHeaderTab:\")\nfor item in myHeaderTab.items():\n\tprint(item)\n\t\npath=fp_growth.findPrefixPath('r',myHeaderTab['r'][1])\nprint(\"path:\")\t\nprint(path)\n\n#建立一个空列表来存储所有的频繁项集\nfreqItems=[]\nfp_growth.minTree(myFPtree,myHeaderTab,3,set([]),freqItems)\n\n\n'''\n\nparsedDat=[line.split() for line in open('kosarak.dat').readlines()]\ninitSet=fp_growth.createInitSet(parsedDat)\nmyFPtree,myHeaderTab=fp_growth.createTree(initSet,100000)\nmyFreqList=[]\nfp_growth.minTree(myFPtree,myHeaderTab,100000,set([]),myFreqList)\nprint(len(myFreqList))\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import collect_from_webapi.api_public_data as pdapi
from collect_from_webapi import pd_fetch_tourspot_visitor
# url = pdapi.pd_gen_url("http://openapi.tour.go.kr/openapi/serviceTourismResourceStatsService/getPchrgTrrsrtVisitorList",
# YM='{0:04d}{1:02d}'.format(2017, 1),
# SIDO='서울특별시',
# GUNGU='',
# RES_NM='',
# numOfRows=10,
# _type='json',
# pageNo=1)
# test for pd_fetch_tourspot_visitor
for items in pd_fetch_tourspot_visitor(district='서울특별시', year=2017, month=7):
print(items)
# test for pd_fetch_tourspot_visitor()
item = pdapi.pd_fetch_foreign_visitor(112, 2012, 7)
print(item)
|
normal
|
{
"blob_id": "c6a6b8f2485528af479fadbdf286e82f10a11de8",
"index": 9101,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor items in pd_fetch_tourspot_visitor(district='서울특별시', year=2017, month=7):\n print(items)\n<mask token>\nprint(item)\n",
"step-3": "<mask token>\nfor items in pd_fetch_tourspot_visitor(district='서울특별시', year=2017, month=7):\n print(items)\nitem = pdapi.pd_fetch_foreign_visitor(112, 2012, 7)\nprint(item)\n",
"step-4": "import collect_from_webapi.api_public_data as pdapi\nfrom collect_from_webapi import pd_fetch_tourspot_visitor\nfor items in pd_fetch_tourspot_visitor(district='서울특별시', year=2017, month=7):\n print(items)\nitem = pdapi.pd_fetch_foreign_visitor(112, 2012, 7)\nprint(item)\n",
"step-5": "import collect_from_webapi.api_public_data as pdapi\nfrom collect_from_webapi import pd_fetch_tourspot_visitor\n\n# url = pdapi.pd_gen_url(\"http://openapi.tour.go.kr/openapi/serviceTourismResourceStatsService/getPchrgTrrsrtVisitorList\",\n# YM='{0:04d}{1:02d}'.format(2017, 1),\n# SIDO='서울특별시',\n# GUNGU='',\n# RES_NM='',\n# numOfRows=10,\n# _type='json',\n# pageNo=1)\n\n\n# test for pd_fetch_tourspot_visitor\nfor items in pd_fetch_tourspot_visitor(district='서울특별시', year=2017, month=7):\n print(items)\n\n\n# test for pd_fetch_tourspot_visitor()\n\nitem = pdapi.pd_fetch_foreign_visitor(112, 2012, 7)\nprint(item)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
Paulie Jo Gonzalez
CS 4375 - os
Lab 0
Last modified: 02/14/2021
This code includes a reference to C code for my_getChar method provided by Dr. Freudenthal.
'''
from os import read
next_c = 0
limit = 0
def get_char():
global next_c, limit
if next_c == limit:
next_c = 0
limit = read(0, 100) # allocate bytes
if limit == 0:
return ''
if next_c >= len(limit) - 1: # check upperbound
return ''
ch = chr(limit[next_c]) # convert to char (from ASCII)
next_c += 1
return ch
def my_read_line():
global next_c, limit
line = ''
ch = get_char()
# get each char of line
while (ch != '\n'): # while char is not new line
line += ch # build line
ch = get_char()
if ch == '':
return line # EOF
next_c = 0 # reset next_c and limit after line is read
limit = 0
line += '\n'
return line
# def my_read_lines():
# num_lines = 0
# in_line = my_read_line() # read line
# while len(in_line):
# num_lines += 1
# print(f'###line {num_lines}: <{str(in_line)}> ###\n')
# in_line = my_read_lines()
# print(f'eof after {num_lines}\n')
|
normal
|
{
"blob_id": "67ac5d82bc37b67cfdae73b6667b73b70ed33cfb",
"index": 8868,
"step-1": "<mask token>\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n line = ''\n ch = get_char()\n while ch != '\\n':\n line += ch\n ch = get_char()\n if ch == '':\n return line\n next_c = 0\n limit = 0\n line += '\\n'\n return line\n",
"step-3": "<mask token>\nnext_c = 0\nlimit = 0\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n line = ''\n ch = get_char()\n while ch != '\\n':\n line += ch\n ch = get_char()\n if ch == '':\n return line\n next_c = 0\n limit = 0\n line += '\\n'\n return line\n",
"step-4": "<mask token>\nfrom os import read\nnext_c = 0\nlimit = 0\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n line = ''\n ch = get_char()\n while ch != '\\n':\n line += ch\n ch = get_char()\n if ch == '':\n return line\n next_c = 0\n limit = 0\n line += '\\n'\n return line\n",
"step-5": "'''\nPaulie Jo Gonzalez\nCS 4375 - os\nLab 0\nLast modified: 02/14/2021\nThis code includes a reference to C code for my_getChar method provided by Dr. Freudenthal.\n'''\n\nfrom os import read\n\nnext_c = 0\nlimit = 0\n\n\ndef get_char():\n global next_c, limit\n\n if next_c == limit:\n next_c = 0\n limit = read(0, 100) # allocate bytes\n\n if limit == 0:\n return ''\n\n if next_c >= len(limit) - 1: # check upperbound\n return ''\n ch = chr(limit[next_c]) # convert to char (from ASCII)\n next_c += 1\n\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n\n line = ''\n ch = get_char()\n\n # get each char of line\n while (ch != '\\n'): # while char is not new line\n line += ch # build line\n ch = get_char()\n if ch == '':\n return line # EOF\n\n next_c = 0 # reset next_c and limit after line is read\n limit = 0\n line += '\\n'\n\n return line\n\n\n# def my_read_lines():\n# num_lines = 0\n# in_line = my_read_line() # read line\n\n# while len(in_line):\n# num_lines += 1\n# print(f'###line {num_lines}: <{str(in_line)}> ###\\n')\n\n# in_line = my_read_lines()\n# print(f'eof after {num_lines}\\n')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MagicBoxDHT22(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MagicBoxDHT22(object):
<|reserved_special_token_0|>
def __init__(self):
self.pi = pigpio.pi()
self.s = DHT22.sensor(self.pi, 4)
self.tempF = 0
self.humidity = 0
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MagicBoxDHT22(object):
def DHT22(self):
self.s.trigger()
time.sleep(0.2)
self.tempF = round(self.s.temperature() * 1.8 + 32, 2) - 3.7
self.humidity = round(self.s.humidity())
def __init__(self):
self.pi = pigpio.pi()
self.s = DHT22.sensor(self.pi, 4)
self.tempF = 0
self.humidity = 0
<|reserved_special_token_1|>
import time
import DHT22
import pigpio
import Sensor
class MagicBoxDHT22(object):
def DHT22(self):
self.s.trigger()
time.sleep(0.2)
self.tempF = round(self.s.temperature() * 1.8 + 32, 2) - 3.7
self.humidity = round(self.s.humidity())
def __init__(self):
self.pi = pigpio.pi()
self.s = DHT22.sensor(self.pi, 4)
self.tempF = 0
self.humidity = 0
<|reserved_special_token_1|>
import time
import DHT22
import pigpio
import Sensor
class MagicBoxDHT22(object):
def DHT22(self):
self.s.trigger()
time.sleep(0.2)
self.tempF=round(self.s.temperature()*1.8+32,2) -3.7 #+adjustment
self.humidity=round(self.s.humidity())
def __init__(self):
self.pi=pigpio.pi()
self.s=DHT22.sensor(self.pi, 4)
self.tempF=0
self.humidity=0
|
flexible
|
{
"blob_id": "179b07870d656fb24b73d8b0a1f76ffed08aa5c2",
"index": 9665,
"step-1": "<mask token>\n\n\nclass MagicBoxDHT22(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MagicBoxDHT22(object):\n <mask token>\n\n def __init__(self):\n self.pi = pigpio.pi()\n self.s = DHT22.sensor(self.pi, 4)\n self.tempF = 0\n self.humidity = 0\n",
"step-3": "<mask token>\n\n\nclass MagicBoxDHT22(object):\n\n def DHT22(self):\n self.s.trigger()\n time.sleep(0.2)\n self.tempF = round(self.s.temperature() * 1.8 + 32, 2) - 3.7\n self.humidity = round(self.s.humidity())\n\n def __init__(self):\n self.pi = pigpio.pi()\n self.s = DHT22.sensor(self.pi, 4)\n self.tempF = 0\n self.humidity = 0\n",
"step-4": "import time\nimport DHT22\nimport pigpio\nimport Sensor\n\n\nclass MagicBoxDHT22(object):\n\n def DHT22(self):\n self.s.trigger()\n time.sleep(0.2)\n self.tempF = round(self.s.temperature() * 1.8 + 32, 2) - 3.7\n self.humidity = round(self.s.humidity())\n\n def __init__(self):\n self.pi = pigpio.pi()\n self.s = DHT22.sensor(self.pi, 4)\n self.tempF = 0\n self.humidity = 0\n",
"step-5": "import time\nimport DHT22\nimport pigpio\nimport Sensor\n\nclass MagicBoxDHT22(object):\n\n def DHT22(self):\n self.s.trigger()\n time.sleep(0.2)\n self.tempF=round(self.s.temperature()*1.8+32,2) -3.7 #+adjustment\n self.humidity=round(self.s.humidity())\n\n def __init__(self):\n self.pi=pigpio.pi()\n self.s=DHT22.sensor(self.pi, 4)\n self.tempF=0\n self.humidity=0\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def run():
torch.manual_seed(1729)
""" Setup """
args = parse()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
""" Dataset """
train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)
test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.
batch_size, shuffle=True, num_workers=args.num_workers)
test_loader = DataLoader(dataset=test_dataset, batch_size=args.
batch_size, shuffle=True, num_workers=args.num_workers)
""" TCN """
tcn = TCN(num_layers=args.num_layers, in_channels=args.in_channels,
out_channels=args.out_channels, kernel_size=args.kernel_size,
residual_blocks_channel_size=[args.res_block_size] * args.
num_layers, bias=args.bias, dropout=args.dropout, stride=args.
stride, dilations=None, leveledinit=args.leveledinit)
tcn.to(device)
if args.print:
print(
f'Number of learnable parameters : {sum(p.numel() for p in tcn.parameters() if p.requires_grad)}'
)
""" Training parameters"""
criterion = nn.MSELoss()
optimizer = optim.Adam(tcn.parameters(), lr=args.lr)
""" Tensorboard """
writer = SummaryWriter(args.writer_path)
for ep in range(1, args.epochs + 1):
""" TRAIN """
tcn.train()
total_loss = 0
for i, data in enumerate(train_loader):
x, y = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
output = tcn(x)
loss = F.mse_loss(output, y)
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
total_loss += loss.item()
if i % args.log_interval == 0:
cur_loss = total_loss / args.log_interval
processed = min(i * args.batch_size, args.N_train)
writer.add_scalar('training_loss', cur_loss, processed)
if args.print:
print(
f'Train Epoch: {ep:2d}[{processed:6d}/{args.N_train:6d}({100.0 * processed / args.N_train:.0f}%)]\tLearning rate: {args.lr:.4f}\tLoss: {cur_loss:.6f}'
)
total_loss = 0
""" EVALUATE """
tcn.eval()
with torch.no_grad():
for data in test_loader:
x, y = data[0].to(device), data[1].to(device)
output = tcn(x)
test_loss = criterion(output, y)
if args.print:
print(f'\nTest set: Average loss: {test_loss.item():.6f}\n'
)
writer.add_scalar('test_loss', test_loss.item(), ep)
writer.close()
torch.save(tcn.state_dict(), args.model_save_path)
print('Finished Training')
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse():
parser = argparse.ArgumentParser(description='Adding Problem')
parser.add_argument('--N_train', type=int, default=50000, metavar='N_train'
)
parser.add_argument('--N_test', type=int, default=1000, metavar='N_test')
parser.add_argument('--seq_length', type=int, default=200, metavar=
'seq_length')
parser.add_argument('--batch_size', type=int, default=32, metavar=
'batch_size')
parser.add_argument('--num_layers', type=int, default=8, metavar=
'num_layers')
parser.add_argument('--in_channels', type=int, default=2, metavar=
'in_channels')
parser.add_argument('--out_channels', type=int, default=1, metavar=
'out_channels')
parser.add_argument('--kernel_size', type=int, default=7, metavar=
'kernel_size')
parser.add_argument('--res_block_size', type=int, default=30, metavar=
'res_block_size')
parser.add_argument('--bias', type=bool, default=True, metavar='bias')
parser.add_argument('--dropout', type=float, default=0.0, metavar='dropout'
)
parser.add_argument('--stride', type=int, default=1, metavar='stride')
parser.add_argument('--leveledinit', type=bool, default=False, metavar=
'leveledinit')
parser.add_argument('--model_save_path', type=str, default=
'adding_problem/models/tcn_addtwo.pt', metavar='model_save_path')
parser.add_argument('--epochs', type=int, default=10, metavar='epochs')
parser.add_argument('--lr', type=float, default=0.002, metavar='lr')
parser.add_argument('--clip', type=bool, default=False, metavar='clip')
parser.add_argument('--log_interval', type=int, default=100, metavar=
'log_interval')
parser.add_argument('--writer_path', type=str, default=
'adding_problem/sruns/add_two1', metavar='writer_path')
parser.add_argument('--print', type=bool, default=False, metavar='print')
parser.add_argument('--num_workers', type=int, default=0, metavar=
'num_workers')
args = parser.parse_args()
return args
def run():
torch.manual_seed(1729)
""" Setup """
args = parse()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
""" Dataset """
train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)
test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.
batch_size, shuffle=True, num_workers=args.num_workers)
test_loader = DataLoader(dataset=test_dataset, batch_size=args.
batch_size, shuffle=True, num_workers=args.num_workers)
""" TCN """
tcn = TCN(num_layers=args.num_layers, in_channels=args.in_channels,
out_channels=args.out_channels, kernel_size=args.kernel_size,
residual_blocks_channel_size=[args.res_block_size] * args.
num_layers, bias=args.bias, dropout=args.dropout, stride=args.
stride, dilations=None, leveledinit=args.leveledinit)
tcn.to(device)
if args.print:
print(
f'Number of learnable parameters : {sum(p.numel() for p in tcn.parameters() if p.requires_grad)}'
)
""" Training parameters"""
criterion = nn.MSELoss()
optimizer = optim.Adam(tcn.parameters(), lr=args.lr)
""" Tensorboard """
writer = SummaryWriter(args.writer_path)
for ep in range(1, args.epochs + 1):
""" TRAIN """
tcn.train()
total_loss = 0
for i, data in enumerate(train_loader):
x, y = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
output = tcn(x)
loss = F.mse_loss(output, y)
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
total_loss += loss.item()
if i % args.log_interval == 0:
cur_loss = total_loss / args.log_interval
processed = min(i * args.batch_size, args.N_train)
writer.add_scalar('training_loss', cur_loss, processed)
if args.print:
print(
f'Train Epoch: {ep:2d}[{processed:6d}/{args.N_train:6d}({100.0 * processed / args.N_train:.0f}%)]\tLearning rate: {args.lr:.4f}\tLoss: {cur_loss:.6f}'
)
total_loss = 0
""" EVALUATE """
tcn.eval()
with torch.no_grad():
for data in test_loader:
x, y = data[0].to(device), data[1].to(device)
output = tcn(x)
test_loss = criterion(output, y)
if args.print:
print(f'\nTest set: Average loss: {test_loss.item():.6f}\n'
)
writer.add_scalar('test_loss', test_loss.item(), ep)
writer.close()
torch.save(tcn.state_dict(), args.model_save_path)
print('Finished Training')
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Importing modules')
<|reserved_special_token_0|>
sys.path.append('')
sys.path.append('../../')
<|reserved_special_token_0|>
print('modules imported')
def parse():
parser = argparse.ArgumentParser(description='Adding Problem')
parser.add_argument('--N_train', type=int, default=50000, metavar='N_train'
)
parser.add_argument('--N_test', type=int, default=1000, metavar='N_test')
parser.add_argument('--seq_length', type=int, default=200, metavar=
'seq_length')
parser.add_argument('--batch_size', type=int, default=32, metavar=
'batch_size')
parser.add_argument('--num_layers', type=int, default=8, metavar=
'num_layers')
parser.add_argument('--in_channels', type=int, default=2, metavar=
'in_channels')
parser.add_argument('--out_channels', type=int, default=1, metavar=
'out_channels')
parser.add_argument('--kernel_size', type=int, default=7, metavar=
'kernel_size')
parser.add_argument('--res_block_size', type=int, default=30, metavar=
'res_block_size')
parser.add_argument('--bias', type=bool, default=True, metavar='bias')
parser.add_argument('--dropout', type=float, default=0.0, metavar='dropout'
)
parser.add_argument('--stride', type=int, default=1, metavar='stride')
parser.add_argument('--leveledinit', type=bool, default=False, metavar=
'leveledinit')
parser.add_argument('--model_save_path', type=str, default=
'adding_problem/models/tcn_addtwo.pt', metavar='model_save_path')
parser.add_argument('--epochs', type=int, default=10, metavar='epochs')
parser.add_argument('--lr', type=float, default=0.002, metavar='lr')
parser.add_argument('--clip', type=bool, default=False, metavar='clip')
parser.add_argument('--log_interval', type=int, default=100, metavar=
'log_interval')
parser.add_argument('--writer_path', type=str, default=
'adding_problem/sruns/add_two1', metavar='writer_path')
parser.add_argument('--print', type=bool, default=False, metavar='print')
parser.add_argument('--num_workers', type=int, default=0, metavar=
'num_workers')
args = parser.parse_args()
return args
def run():
torch.manual_seed(1729)
""" Setup """
args = parse()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
""" Dataset """
train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)
test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.
batch_size, shuffle=True, num_workers=args.num_workers)
test_loader = DataLoader(dataset=test_dataset, batch_size=args.
batch_size, shuffle=True, num_workers=args.num_workers)
""" TCN """
tcn = TCN(num_layers=args.num_layers, in_channels=args.in_channels,
out_channels=args.out_channels, kernel_size=args.kernel_size,
residual_blocks_channel_size=[args.res_block_size] * args.
num_layers, bias=args.bias, dropout=args.dropout, stride=args.
stride, dilations=None, leveledinit=args.leveledinit)
tcn.to(device)
if args.print:
print(
f'Number of learnable parameters : {sum(p.numel() for p in tcn.parameters() if p.requires_grad)}'
)
""" Training parameters"""
criterion = nn.MSELoss()
optimizer = optim.Adam(tcn.parameters(), lr=args.lr)
""" Tensorboard """
writer = SummaryWriter(args.writer_path)
for ep in range(1, args.epochs + 1):
""" TRAIN """
tcn.train()
total_loss = 0
for i, data in enumerate(train_loader):
x, y = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
output = tcn(x)
loss = F.mse_loss(output, y)
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
total_loss += loss.item()
if i % args.log_interval == 0:
cur_loss = total_loss / args.log_interval
processed = min(i * args.batch_size, args.N_train)
writer.add_scalar('training_loss', cur_loss, processed)
if args.print:
print(
f'Train Epoch: {ep:2d}[{processed:6d}/{args.N_train:6d}({100.0 * processed / args.N_train:.0f}%)]\tLearning rate: {args.lr:.4f}\tLoss: {cur_loss:.6f}'
)
total_loss = 0
""" EVALUATE """
tcn.eval()
with torch.no_grad():
for data in test_loader:
x, y = data[0].to(device), data[1].to(device)
output = tcn(x)
test_loss = criterion(output, y)
if args.print:
print(f'\nTest set: Average loss: {test_loss.item():.6f}\n'
)
writer.add_scalar('test_loss', test_loss.item(), ep)
writer.close()
torch.save(tcn.state_dict(), args.model_save_path)
print('Finished Training')
return 0
if __name__ == '__main__':
run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Importing modules')
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import argparse
import sys
sys.path.append('')
sys.path.append('../../')
from data import AddTwoDataSet
from model import TCN
print('modules imported')
def parse():
parser = argparse.ArgumentParser(description='Adding Problem')
parser.add_argument('--N_train', type=int, default=50000, metavar='N_train'
)
parser.add_argument('--N_test', type=int, default=1000, metavar='N_test')
parser.add_argument('--seq_length', type=int, default=200, metavar=
'seq_length')
parser.add_argument('--batch_size', type=int, default=32, metavar=
'batch_size')
parser.add_argument('--num_layers', type=int, default=8, metavar=
'num_layers')
parser.add_argument('--in_channels', type=int, default=2, metavar=
'in_channels')
parser.add_argument('--out_channels', type=int, default=1, metavar=
'out_channels')
parser.add_argument('--kernel_size', type=int, default=7, metavar=
'kernel_size')
parser.add_argument('--res_block_size', type=int, default=30, metavar=
'res_block_size')
parser.add_argument('--bias', type=bool, default=True, metavar='bias')
parser.add_argument('--dropout', type=float, default=0.0, metavar='dropout'
)
parser.add_argument('--stride', type=int, default=1, metavar='stride')
parser.add_argument('--leveledinit', type=bool, default=False, metavar=
'leveledinit')
parser.add_argument('--model_save_path', type=str, default=
'adding_problem/models/tcn_addtwo.pt', metavar='model_save_path')
parser.add_argument('--epochs', type=int, default=10, metavar='epochs')
parser.add_argument('--lr', type=float, default=0.002, metavar='lr')
parser.add_argument('--clip', type=bool, default=False, metavar='clip')
parser.add_argument('--log_interval', type=int, default=100, metavar=
'log_interval')
parser.add_argument('--writer_path', type=str, default=
'adding_problem/sruns/add_two1', metavar='writer_path')
parser.add_argument('--print', type=bool, default=False, metavar='print')
parser.add_argument('--num_workers', type=int, default=0, metavar=
'num_workers')
args = parser.parse_args()
return args
def run():
torch.manual_seed(1729)
""" Setup """
args = parse()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
""" Dataset """
train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)
test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.
batch_size, shuffle=True, num_workers=args.num_workers)
test_loader = DataLoader(dataset=test_dataset, batch_size=args.
batch_size, shuffle=True, num_workers=args.num_workers)
""" TCN """
tcn = TCN(num_layers=args.num_layers, in_channels=args.in_channels,
out_channels=args.out_channels, kernel_size=args.kernel_size,
residual_blocks_channel_size=[args.res_block_size] * args.
num_layers, bias=args.bias, dropout=args.dropout, stride=args.
stride, dilations=None, leveledinit=args.leveledinit)
tcn.to(device)
if args.print:
print(
f'Number of learnable parameters : {sum(p.numel() for p in tcn.parameters() if p.requires_grad)}'
)
""" Training parameters"""
criterion = nn.MSELoss()
optimizer = optim.Adam(tcn.parameters(), lr=args.lr)
""" Tensorboard """
writer = SummaryWriter(args.writer_path)
for ep in range(1, args.epochs + 1):
""" TRAIN """
tcn.train()
total_loss = 0
for i, data in enumerate(train_loader):
x, y = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
output = tcn(x)
loss = F.mse_loss(output, y)
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
total_loss += loss.item()
if i % args.log_interval == 0:
cur_loss = total_loss / args.log_interval
processed = min(i * args.batch_size, args.N_train)
writer.add_scalar('training_loss', cur_loss, processed)
if args.print:
print(
f'Train Epoch: {ep:2d}[{processed:6d}/{args.N_train:6d}({100.0 * processed / args.N_train:.0f}%)]\tLearning rate: {args.lr:.4f}\tLoss: {cur_loss:.6f}'
)
total_loss = 0
""" EVALUATE """
tcn.eval()
with torch.no_grad():
for data in test_loader:
x, y = data[0].to(device), data[1].to(device)
output = tcn(x)
test_loss = criterion(output, y)
if args.print:
print(f'\nTest set: Average loss: {test_loss.item():.6f}\n'
)
writer.add_scalar('test_loss', test_loss.item(), ep)
writer.close()
torch.save(tcn.state_dict(), args.model_save_path)
print('Finished Training')
return 0
if __name__ == '__main__':
run()
<|reserved_special_token_1|>
# addtwo_run-py
"""
Train and test a TCN on the add two dataset.
Trying to reproduce https://arxiv.org/abs/1803.01271.
"""
print('Importing modules')
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import argparse
import sys
sys.path.append('')
sys.path.append("../../")
from data import AddTwoDataSet
from model import TCN
print('modules imported')
def parse():
parser = argparse.ArgumentParser(description='Adding Problem')
parser.add_argument(
'--N_train', type=int, default=50000, metavar='N_train')
parser.add_argument(
'--N_test', type=int, default=1000, metavar='N_test')
parser.add_argument(
'--seq_length', type=int, default=200, metavar='seq_length')
parser.add_argument(
'--batch_size', type=int, default=32, metavar='batch_size')
parser.add_argument(
'--num_layers', type=int, default=8, metavar='num_layers')
parser.add_argument(
'--in_channels', type=int, default=2, metavar='in_channels')
parser.add_argument(
'--out_channels', type=int, default=1, metavar='out_channels')
parser.add_argument(
'--kernel_size', type=int, default=7, metavar='kernel_size')
parser.add_argument(
'--res_block_size', type=int, default=30, metavar='res_block_size')
parser.add_argument(
'--bias', type=bool, default=True, metavar='bias')
parser.add_argument(
'--dropout', type=float, default=0.0, metavar='dropout')
parser.add_argument(
'--stride', type=int, default=1, metavar='stride')
parser.add_argument(
'--leveledinit', type=bool, default=False, metavar='leveledinit')
parser.add_argument(
'--model_save_path', type=str, default='adding_problem/models/tcn_addtwo.pt',
metavar='model_save_path')
parser.add_argument(
'--epochs', type=int, default=10, metavar='epochs')
parser.add_argument(
'--lr', type=float, default=2e-3, metavar='lr')
parser.add_argument(
'--clip', type=bool, default=False, metavar='clip')
parser.add_argument(
'--log_interval', type=int, default=100, metavar='log_interval')
parser.add_argument(
'--writer_path', type=str, default='adding_problem/sruns/add_two1',
metavar='writer_path')
parser.add_argument(
'--print', type=bool, default=False, metavar='print')
parser.add_argument(
'--num_workers', type=int, default=0, metavar='num_workers')
args = parser.parse_args()
return args
def run():
torch.manual_seed(1729)
""" Setup """
args = parse()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
""" Dataset """
train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)
test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)
train_loader = DataLoader(
dataset=train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
test_loader = DataLoader(
dataset=test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
""" TCN """
tcn = TCN(
num_layers=args.num_layers,
in_channels=args.in_channels,
out_channels=args.out_channels,
kernel_size=args.kernel_size,
residual_blocks_channel_size=[args.res_block_size] * args.num_layers,
bias=args.bias,
dropout=args.dropout,
stride=args.stride,
dilations=None,
leveledinit=args.leveledinit)
tcn.to(device)
if args.print:
print(
f"""Number of learnable parameters : {
sum(p.numel() for p in tcn.parameters() if p.requires_grad)}""")
""" Training parameters"""
criterion = nn.MSELoss()
optimizer = optim.Adam(tcn.parameters(), lr=args.lr)
""" Tensorboard """
writer = SummaryWriter(args.writer_path)
for ep in range(1, args.epochs+1):
""" TRAIN """
tcn.train()
total_loss = 0
for i, data in enumerate(train_loader):
x, y = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
output = tcn(x)
loss = F.mse_loss(output, y)
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
total_loss += loss.item()
if i % args.log_interval == 0:
cur_loss = total_loss / args.log_interval
processed = min(i*args.batch_size, args.N_train)
writer.add_scalar('training_loss', cur_loss, processed)
if args.print:
print(
(f"Train Epoch: {ep:2d}"
f"[{processed:6d}/{args.N_train:6d}"
f"({100.*processed/args.N_train:.0f}%)]"
f"\tLearning rate: {args.lr:.4f}\tLoss: {cur_loss:.6f}"))
total_loss = 0
""" EVALUATE """
tcn.eval()
with torch.no_grad():
for data in test_loader:
x, y = data[0].to(device), data[1].to(device)
output = tcn(x)
test_loss = criterion(output, y)
if args.print:
print(
f'\nTest set: Average loss: {test_loss.item():.6f}\n')
writer.add_scalar('test_loss', test_loss.item() , ep)
writer.close()
torch.save(tcn.state_dict(), args.model_save_path)
print('Finished Training')
return 0
if __name__ == "__main__":
run()
|
flexible
|
{
"blob_id": "fe1a9804862942491b11b9baceecd37bf628fbb8",
"index": 8732,
"step-1": "<mask token>\n\n\ndef run():\n torch.manual_seed(1729)\n \"\"\" Setup \"\"\"\n args = parse()\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(device)\n \"\"\" Dataset \"\"\"\n train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)\n test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)\n train_loader = DataLoader(dataset=train_dataset, batch_size=args.\n batch_size, shuffle=True, num_workers=args.num_workers)\n test_loader = DataLoader(dataset=test_dataset, batch_size=args.\n batch_size, shuffle=True, num_workers=args.num_workers)\n \"\"\" TCN \"\"\"\n tcn = TCN(num_layers=args.num_layers, in_channels=args.in_channels,\n out_channels=args.out_channels, kernel_size=args.kernel_size,\n residual_blocks_channel_size=[args.res_block_size] * args.\n num_layers, bias=args.bias, dropout=args.dropout, stride=args.\n stride, dilations=None, leveledinit=args.leveledinit)\n tcn.to(device)\n if args.print:\n print(\n f'Number of learnable parameters : {sum(p.numel() for p in tcn.parameters() if p.requires_grad)}'\n )\n \"\"\" Training parameters\"\"\"\n criterion = nn.MSELoss()\n optimizer = optim.Adam(tcn.parameters(), lr=args.lr)\n \"\"\" Tensorboard \"\"\"\n writer = SummaryWriter(args.writer_path)\n for ep in range(1, args.epochs + 1):\n \"\"\" TRAIN \"\"\"\n tcn.train()\n total_loss = 0\n for i, data in enumerate(train_loader):\n x, y = data[0].to(device), data[1].to(device)\n optimizer.zero_grad()\n output = tcn(x)\n loss = F.mse_loss(output, y)\n loss.backward()\n if args.clip > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n optimizer.step()\n total_loss += loss.item()\n if i % args.log_interval == 0:\n cur_loss = total_loss / args.log_interval\n processed = min(i * args.batch_size, args.N_train)\n writer.add_scalar('training_loss', cur_loss, processed)\n if args.print:\n print(\n f'Train Epoch: {ep:2d}[{processed:6d}/{args.N_train:6d}({100.0 * processed / args.N_train:.0f}%)]\\tLearning rate: {args.lr:.4f}\\tLoss: {cur_loss:.6f}'\n )\n total_loss = 0\n \"\"\" EVALUATE \"\"\"\n tcn.eval()\n with torch.no_grad():\n for data in test_loader:\n x, y = data[0].to(device), data[1].to(device)\n output = tcn(x)\n test_loss = criterion(output, y)\n if args.print:\n print(f'\\nTest set: Average loss: {test_loss.item():.6f}\\n'\n )\n writer.add_scalar('test_loss', test_loss.item(), ep)\n writer.close()\n torch.save(tcn.state_dict(), args.model_save_path)\n print('Finished Training')\n return 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse():\n parser = argparse.ArgumentParser(description='Adding Problem')\n parser.add_argument('--N_train', type=int, default=50000, metavar='N_train'\n )\n parser.add_argument('--N_test', type=int, default=1000, metavar='N_test')\n parser.add_argument('--seq_length', type=int, default=200, metavar=\n 'seq_length')\n parser.add_argument('--batch_size', type=int, default=32, metavar=\n 'batch_size')\n parser.add_argument('--num_layers', type=int, default=8, metavar=\n 'num_layers')\n parser.add_argument('--in_channels', type=int, default=2, metavar=\n 'in_channels')\n parser.add_argument('--out_channels', type=int, default=1, metavar=\n 'out_channels')\n parser.add_argument('--kernel_size', type=int, default=7, metavar=\n 'kernel_size')\n parser.add_argument('--res_block_size', type=int, default=30, metavar=\n 'res_block_size')\n parser.add_argument('--bias', type=bool, default=True, metavar='bias')\n parser.add_argument('--dropout', type=float, default=0.0, metavar='dropout'\n )\n parser.add_argument('--stride', type=int, default=1, metavar='stride')\n parser.add_argument('--leveledinit', type=bool, default=False, metavar=\n 'leveledinit')\n parser.add_argument('--model_save_path', type=str, default=\n 'adding_problem/models/tcn_addtwo.pt', metavar='model_save_path')\n parser.add_argument('--epochs', type=int, default=10, metavar='epochs')\n parser.add_argument('--lr', type=float, default=0.002, metavar='lr')\n parser.add_argument('--clip', type=bool, default=False, metavar='clip')\n parser.add_argument('--log_interval', type=int, default=100, metavar=\n 'log_interval')\n parser.add_argument('--writer_path', type=str, default=\n 'adding_problem/sruns/add_two1', metavar='writer_path')\n parser.add_argument('--print', type=bool, default=False, metavar='print')\n parser.add_argument('--num_workers', type=int, default=0, metavar=\n 'num_workers')\n args = parser.parse_args()\n return args\n\n\ndef run():\n torch.manual_seed(1729)\n \"\"\" Setup \"\"\"\n args = parse()\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(device)\n \"\"\" Dataset \"\"\"\n train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)\n test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)\n train_loader = DataLoader(dataset=train_dataset, batch_size=args.\n batch_size, shuffle=True, num_workers=args.num_workers)\n test_loader = DataLoader(dataset=test_dataset, batch_size=args.\n batch_size, shuffle=True, num_workers=args.num_workers)\n \"\"\" TCN \"\"\"\n tcn = TCN(num_layers=args.num_layers, in_channels=args.in_channels,\n out_channels=args.out_channels, kernel_size=args.kernel_size,\n residual_blocks_channel_size=[args.res_block_size] * args.\n num_layers, bias=args.bias, dropout=args.dropout, stride=args.\n stride, dilations=None, leveledinit=args.leveledinit)\n tcn.to(device)\n if args.print:\n print(\n f'Number of learnable parameters : {sum(p.numel() for p in tcn.parameters() if p.requires_grad)}'\n )\n \"\"\" Training parameters\"\"\"\n criterion = nn.MSELoss()\n optimizer = optim.Adam(tcn.parameters(), lr=args.lr)\n \"\"\" Tensorboard \"\"\"\n writer = SummaryWriter(args.writer_path)\n for ep in range(1, args.epochs + 1):\n \"\"\" TRAIN \"\"\"\n tcn.train()\n total_loss = 0\n for i, data in enumerate(train_loader):\n x, y = data[0].to(device), data[1].to(device)\n optimizer.zero_grad()\n output = tcn(x)\n loss = F.mse_loss(output, y)\n loss.backward()\n if args.clip > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n optimizer.step()\n total_loss += loss.item()\n if i % args.log_interval == 0:\n cur_loss = total_loss / args.log_interval\n processed = min(i * args.batch_size, args.N_train)\n writer.add_scalar('training_loss', cur_loss, processed)\n if args.print:\n print(\n f'Train Epoch: {ep:2d}[{processed:6d}/{args.N_train:6d}({100.0 * processed / args.N_train:.0f}%)]\\tLearning rate: {args.lr:.4f}\\tLoss: {cur_loss:.6f}'\n )\n total_loss = 0\n \"\"\" EVALUATE \"\"\"\n tcn.eval()\n with torch.no_grad():\n for data in test_loader:\n x, y = data[0].to(device), data[1].to(device)\n output = tcn(x)\n test_loss = criterion(output, y)\n if args.print:\n print(f'\\nTest set: Average loss: {test_loss.item():.6f}\\n'\n )\n writer.add_scalar('test_loss', test_loss.item(), ep)\n writer.close()\n torch.save(tcn.state_dict(), args.model_save_path)\n print('Finished Training')\n return 0\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint('Importing modules')\n<mask token>\nsys.path.append('')\nsys.path.append('../../')\n<mask token>\nprint('modules imported')\n\n\ndef parse():\n parser = argparse.ArgumentParser(description='Adding Problem')\n parser.add_argument('--N_train', type=int, default=50000, metavar='N_train'\n )\n parser.add_argument('--N_test', type=int, default=1000, metavar='N_test')\n parser.add_argument('--seq_length', type=int, default=200, metavar=\n 'seq_length')\n parser.add_argument('--batch_size', type=int, default=32, metavar=\n 'batch_size')\n parser.add_argument('--num_layers', type=int, default=8, metavar=\n 'num_layers')\n parser.add_argument('--in_channels', type=int, default=2, metavar=\n 'in_channels')\n parser.add_argument('--out_channels', type=int, default=1, metavar=\n 'out_channels')\n parser.add_argument('--kernel_size', type=int, default=7, metavar=\n 'kernel_size')\n parser.add_argument('--res_block_size', type=int, default=30, metavar=\n 'res_block_size')\n parser.add_argument('--bias', type=bool, default=True, metavar='bias')\n parser.add_argument('--dropout', type=float, default=0.0, metavar='dropout'\n )\n parser.add_argument('--stride', type=int, default=1, metavar='stride')\n parser.add_argument('--leveledinit', type=bool, default=False, metavar=\n 'leveledinit')\n parser.add_argument('--model_save_path', type=str, default=\n 'adding_problem/models/tcn_addtwo.pt', metavar='model_save_path')\n parser.add_argument('--epochs', type=int, default=10, metavar='epochs')\n parser.add_argument('--lr', type=float, default=0.002, metavar='lr')\n parser.add_argument('--clip', type=bool, default=False, metavar='clip')\n parser.add_argument('--log_interval', type=int, default=100, metavar=\n 'log_interval')\n parser.add_argument('--writer_path', type=str, default=\n 'adding_problem/sruns/add_two1', metavar='writer_path')\n parser.add_argument('--print', type=bool, default=False, metavar='print')\n parser.add_argument('--num_workers', type=int, default=0, metavar=\n 'num_workers')\n args = parser.parse_args()\n return args\n\n\ndef run():\n torch.manual_seed(1729)\n \"\"\" Setup \"\"\"\n args = parse()\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(device)\n \"\"\" Dataset \"\"\"\n train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)\n test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)\n train_loader = DataLoader(dataset=train_dataset, batch_size=args.\n batch_size, shuffle=True, num_workers=args.num_workers)\n test_loader = DataLoader(dataset=test_dataset, batch_size=args.\n batch_size, shuffle=True, num_workers=args.num_workers)\n \"\"\" TCN \"\"\"\n tcn = TCN(num_layers=args.num_layers, in_channels=args.in_channels,\n out_channels=args.out_channels, kernel_size=args.kernel_size,\n residual_blocks_channel_size=[args.res_block_size] * args.\n num_layers, bias=args.bias, dropout=args.dropout, stride=args.\n stride, dilations=None, leveledinit=args.leveledinit)\n tcn.to(device)\n if args.print:\n print(\n f'Number of learnable parameters : {sum(p.numel() for p in tcn.parameters() if p.requires_grad)}'\n )\n \"\"\" Training parameters\"\"\"\n criterion = nn.MSELoss()\n optimizer = optim.Adam(tcn.parameters(), lr=args.lr)\n \"\"\" Tensorboard \"\"\"\n writer = SummaryWriter(args.writer_path)\n for ep in range(1, args.epochs + 1):\n \"\"\" TRAIN \"\"\"\n tcn.train()\n total_loss = 0\n for i, data in enumerate(train_loader):\n x, y = data[0].to(device), data[1].to(device)\n optimizer.zero_grad()\n output = tcn(x)\n loss = F.mse_loss(output, y)\n loss.backward()\n if args.clip > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n optimizer.step()\n total_loss += loss.item()\n if i % args.log_interval == 0:\n cur_loss = total_loss / args.log_interval\n processed = min(i * args.batch_size, args.N_train)\n writer.add_scalar('training_loss', cur_loss, processed)\n if args.print:\n print(\n f'Train Epoch: {ep:2d}[{processed:6d}/{args.N_train:6d}({100.0 * processed / args.N_train:.0f}%)]\\tLearning rate: {args.lr:.4f}\\tLoss: {cur_loss:.6f}'\n )\n total_loss = 0\n \"\"\" EVALUATE \"\"\"\n tcn.eval()\n with torch.no_grad():\n for data in test_loader:\n x, y = data[0].to(device), data[1].to(device)\n output = tcn(x)\n test_loss = criterion(output, y)\n if args.print:\n print(f'\\nTest set: Average loss: {test_loss.item():.6f}\\n'\n )\n writer.add_scalar('test_loss', test_loss.item(), ep)\n writer.close()\n torch.save(tcn.state_dict(), args.model_save_path)\n print('Finished Training')\n return 0\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "<mask token>\nprint('Importing modules')\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DataLoader\nimport argparse\nimport sys\nsys.path.append('')\nsys.path.append('../../')\nfrom data import AddTwoDataSet\nfrom model import TCN\nprint('modules imported')\n\n\ndef parse():\n parser = argparse.ArgumentParser(description='Adding Problem')\n parser.add_argument('--N_train', type=int, default=50000, metavar='N_train'\n )\n parser.add_argument('--N_test', type=int, default=1000, metavar='N_test')\n parser.add_argument('--seq_length', type=int, default=200, metavar=\n 'seq_length')\n parser.add_argument('--batch_size', type=int, default=32, metavar=\n 'batch_size')\n parser.add_argument('--num_layers', type=int, default=8, metavar=\n 'num_layers')\n parser.add_argument('--in_channels', type=int, default=2, metavar=\n 'in_channels')\n parser.add_argument('--out_channels', type=int, default=1, metavar=\n 'out_channels')\n parser.add_argument('--kernel_size', type=int, default=7, metavar=\n 'kernel_size')\n parser.add_argument('--res_block_size', type=int, default=30, metavar=\n 'res_block_size')\n parser.add_argument('--bias', type=bool, default=True, metavar='bias')\n parser.add_argument('--dropout', type=float, default=0.0, metavar='dropout'\n )\n parser.add_argument('--stride', type=int, default=1, metavar='stride')\n parser.add_argument('--leveledinit', type=bool, default=False, metavar=\n 'leveledinit')\n parser.add_argument('--model_save_path', type=str, default=\n 'adding_problem/models/tcn_addtwo.pt', metavar='model_save_path')\n parser.add_argument('--epochs', type=int, default=10, metavar='epochs')\n parser.add_argument('--lr', type=float, default=0.002, metavar='lr')\n parser.add_argument('--clip', type=bool, default=False, metavar='clip')\n parser.add_argument('--log_interval', type=int, default=100, metavar=\n 'log_interval')\n parser.add_argument('--writer_path', type=str, default=\n 'adding_problem/sruns/add_two1', metavar='writer_path')\n parser.add_argument('--print', type=bool, default=False, metavar='print')\n parser.add_argument('--num_workers', type=int, default=0, metavar=\n 'num_workers')\n args = parser.parse_args()\n return args\n\n\ndef run():\n torch.manual_seed(1729)\n \"\"\" Setup \"\"\"\n args = parse()\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(device)\n \"\"\" Dataset \"\"\"\n train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)\n test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)\n train_loader = DataLoader(dataset=train_dataset, batch_size=args.\n batch_size, shuffle=True, num_workers=args.num_workers)\n test_loader = DataLoader(dataset=test_dataset, batch_size=args.\n batch_size, shuffle=True, num_workers=args.num_workers)\n \"\"\" TCN \"\"\"\n tcn = TCN(num_layers=args.num_layers, in_channels=args.in_channels,\n out_channels=args.out_channels, kernel_size=args.kernel_size,\n residual_blocks_channel_size=[args.res_block_size] * args.\n num_layers, bias=args.bias, dropout=args.dropout, stride=args.\n stride, dilations=None, leveledinit=args.leveledinit)\n tcn.to(device)\n if args.print:\n print(\n f'Number of learnable parameters : {sum(p.numel() for p in tcn.parameters() if p.requires_grad)}'\n )\n \"\"\" Training parameters\"\"\"\n criterion = nn.MSELoss()\n optimizer = optim.Adam(tcn.parameters(), lr=args.lr)\n \"\"\" Tensorboard \"\"\"\n writer = SummaryWriter(args.writer_path)\n for ep in range(1, args.epochs + 1):\n \"\"\" TRAIN \"\"\"\n tcn.train()\n total_loss = 0\n for i, data in enumerate(train_loader):\n x, y = data[0].to(device), data[1].to(device)\n optimizer.zero_grad()\n output = tcn(x)\n loss = F.mse_loss(output, y)\n loss.backward()\n if args.clip > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n optimizer.step()\n total_loss += loss.item()\n if i % args.log_interval == 0:\n cur_loss = total_loss / args.log_interval\n processed = min(i * args.batch_size, args.N_train)\n writer.add_scalar('training_loss', cur_loss, processed)\n if args.print:\n print(\n f'Train Epoch: {ep:2d}[{processed:6d}/{args.N_train:6d}({100.0 * processed / args.N_train:.0f}%)]\\tLearning rate: {args.lr:.4f}\\tLoss: {cur_loss:.6f}'\n )\n total_loss = 0\n \"\"\" EVALUATE \"\"\"\n tcn.eval()\n with torch.no_grad():\n for data in test_loader:\n x, y = data[0].to(device), data[1].to(device)\n output = tcn(x)\n test_loss = criterion(output, y)\n if args.print:\n print(f'\\nTest set: Average loss: {test_loss.item():.6f}\\n'\n )\n writer.add_scalar('test_loss', test_loss.item(), ep)\n writer.close()\n torch.save(tcn.state_dict(), args.model_save_path)\n print('Finished Training')\n return 0\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": "# addtwo_run-py\r\n\"\"\"\r\nTrain and test a TCN on the add two dataset.\r\nTrying to reproduce https://arxiv.org/abs/1803.01271.\r\n\"\"\"\r\nprint('Importing modules')\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nfrom torch.utils.data import DataLoader\r\n\r\nimport argparse\r\nimport sys\r\nsys.path.append('')\r\nsys.path.append(\"../../\")\r\n\r\nfrom data import AddTwoDataSet\r\nfrom model import TCN\r\nprint('modules imported')\r\n\r\ndef parse():\r\n parser = argparse.ArgumentParser(description='Adding Problem')\r\n parser.add_argument(\r\n '--N_train', type=int, default=50000, metavar='N_train')\r\n parser.add_argument(\r\n '--N_test', type=int, default=1000, metavar='N_test')\r\n parser.add_argument(\r\n '--seq_length', type=int, default=200, metavar='seq_length')\r\n parser.add_argument(\r\n '--batch_size', type=int, default=32, metavar='batch_size')\r\n parser.add_argument(\r\n '--num_layers', type=int, default=8, metavar='num_layers')\r\n parser.add_argument(\r\n '--in_channels', type=int, default=2, metavar='in_channels')\r\n parser.add_argument(\r\n '--out_channels', type=int, default=1, metavar='out_channels')\r\n parser.add_argument(\r\n '--kernel_size', type=int, default=7, metavar='kernel_size')\r\n parser.add_argument(\r\n '--res_block_size', type=int, default=30, metavar='res_block_size')\r\n parser.add_argument(\r\n '--bias', type=bool, default=True, metavar='bias')\r\n parser.add_argument(\r\n '--dropout', type=float, default=0.0, metavar='dropout')\r\n parser.add_argument(\r\n '--stride', type=int, default=1, metavar='stride')\r\n parser.add_argument(\r\n '--leveledinit', type=bool, default=False, metavar='leveledinit')\r\n parser.add_argument(\r\n '--model_save_path', type=str, default='adding_problem/models/tcn_addtwo.pt', \r\n metavar='model_save_path')\r\n parser.add_argument(\r\n '--epochs', type=int, default=10, metavar='epochs')\r\n parser.add_argument(\r\n '--lr', type=float, default=2e-3, metavar='lr')\r\n parser.add_argument(\r\n '--clip', type=bool, default=False, metavar='clip')\r\n parser.add_argument(\r\n '--log_interval', type=int, default=100, metavar='log_interval')\r\n parser.add_argument(\r\n '--writer_path', type=str, default='adding_problem/sruns/add_two1', \r\n metavar='writer_path')\r\n parser.add_argument(\r\n '--print', type=bool, default=False, metavar='print')\r\n parser.add_argument(\r\n '--num_workers', type=int, default=0, metavar='num_workers')\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef run():\r\n torch.manual_seed(1729)\r\n \r\n \"\"\" Setup \"\"\"\r\n args = parse()\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n print(device)\r\n\r\n \"\"\" Dataset \"\"\"\r\n train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)\r\n test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)\r\n train_loader = DataLoader(\r\n dataset=train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)\r\n test_loader = DataLoader(\r\n dataset=test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)\r\n\r\n \"\"\" TCN \"\"\"\r\n tcn = TCN(\r\n num_layers=args.num_layers,\r\n in_channels=args.in_channels,\r\n out_channels=args.out_channels,\r\n kernel_size=args.kernel_size,\r\n residual_blocks_channel_size=[args.res_block_size] * args.num_layers,\r\n bias=args.bias,\r\n dropout=args.dropout,\r\n stride=args.stride,\r\n dilations=None,\r\n leveledinit=args.leveledinit)\r\n tcn.to(device)\r\n if args.print:\r\n print(\r\n f\"\"\"Number of learnable parameters : {\r\n sum(p.numel() for p in tcn.parameters() if p.requires_grad)}\"\"\")\r\n\r\n \"\"\" Training parameters\"\"\"\r\n criterion = nn.MSELoss()\r\n optimizer = optim.Adam(tcn.parameters(), lr=args.lr)\r\n\r\n \"\"\" Tensorboard \"\"\"\r\n writer = SummaryWriter(args.writer_path)\r\n\r\n for ep in range(1, args.epochs+1):\r\n \"\"\" TRAIN \"\"\"\r\n tcn.train()\r\n total_loss = 0\r\n for i, data in enumerate(train_loader):\r\n x, y = data[0].to(device), data[1].to(device)\r\n optimizer.zero_grad()\r\n output = tcn(x)\r\n loss = F.mse_loss(output, y)\r\n loss.backward()\r\n if args.clip > 0:\r\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\r\n optimizer.step()\r\n total_loss += loss.item()\r\n\r\n if i % args.log_interval == 0:\r\n cur_loss = total_loss / args.log_interval\r\n processed = min(i*args.batch_size, args.N_train)\r\n writer.add_scalar('training_loss', cur_loss, processed)\r\n if args.print:\r\n print(\r\n (f\"Train Epoch: {ep:2d}\"\r\n f\"[{processed:6d}/{args.N_train:6d}\"\r\n f\"({100.*processed/args.N_train:.0f}%)]\"\r\n f\"\\tLearning rate: {args.lr:.4f}\\tLoss: {cur_loss:.6f}\"))\r\n total_loss = 0\r\n \"\"\" EVALUATE \"\"\"\r\n tcn.eval()\r\n with torch.no_grad():\r\n for data in test_loader:\r\n x, y = data[0].to(device), data[1].to(device)\r\n output = tcn(x)\r\n test_loss = criterion(output, y)\r\n if args.print:\r\n print(\r\n f'\\nTest set: Average loss: {test_loss.item():.6f}\\n')\r\n writer.add_scalar('test_loss', test_loss.item() , ep)\r\n\r\n writer.close()\r\n torch.save(tcn.state_dict(), args.model_save_path)\r\n print('Finished Training')\r\n return 0\r\n\r\nif __name__ == \"__main__\":\r\n run()\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def digits(x):
return set(int(d) for d in str(x))
<|reserved_special_token_0|>
def euler052():
multiples = range(2, 7)
for i in itertools.count(10 ** 5):
if same_digits_as_multiples(i, multiples):
return i
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def digits(x):
return set(int(d) for d in str(x))
<|reserved_special_token_0|>
def same_digits_as_multiples(x, multiples):
d = digits(x)
if len(d) != len(str(x)):
return False
for i in multiples:
if d != digits(i * x):
return False
return True
<|reserved_special_token_0|>
def euler052():
multiples = range(2, 7)
for i in itertools.count(10 ** 5):
if same_digits_as_multiples(i, multiples):
return i
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def digits(x):
return set(int(d) for d in str(x))
common.assertEquals(digits(125874), digits(251748))
def same_digits_as_multiples(x, multiples):
d = digits(x)
if len(d) != len(str(x)):
return False
for i in multiples:
if d != digits(i * x):
return False
return True
common.assertEquals(True, same_digits_as_multiples(125874, [2]))
common.assertEquals(False, same_digits_as_multiples(123456, [2]))
def euler052():
multiples = range(2, 7)
for i in itertools.count(10 ** 5):
if same_digits_as_multiples(i, multiples):
return i
common.submit(euler052(), expected=142857)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import common
import itertools
def digits(x):
return set(int(d) for d in str(x))
common.assertEquals(digits(125874), digits(251748))
def same_digits_as_multiples(x, multiples):
d = digits(x)
if len(d) != len(str(x)):
return False
for i in multiples:
if d != digits(i * x):
return False
return True
common.assertEquals(True, same_digits_as_multiples(125874, [2]))
common.assertEquals(False, same_digits_as_multiples(123456, [2]))
def euler052():
multiples = range(2, 7)
for i in itertools.count(10 ** 5):
if same_digits_as_multiples(i, multiples):
return i
common.submit(euler052(), expected=142857)
<|reserved_special_token_1|>
'''It can be seen that the number, 125874, and its double, 251748, contain
exactly the same digits, but in a different order.
Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain
the same digits.
'''
import common
import itertools
def digits(x):
return set(int(d) for d in str(x))
common.assertEquals(digits(125874), digits(251748))
def same_digits_as_multiples(x, multiples):
d = digits(x)
# duplicate digits are implicitly forbidden
if len(d) != len(str(x)): return False
for i in multiples:
if d != digits(i*x):
return False
return True
common.assertEquals(True, same_digits_as_multiples(125874, [2]))
common.assertEquals(False, same_digits_as_multiples(123456, [2]))
def euler052():
multiples = range(2,7)
for i in itertools.count(10**5): # solution must have at least 6 digits
if same_digits_as_multiples(i, multiples):
return i
common.submit(euler052(), expected=142857)
|
flexible
|
{
"blob_id": "2ec8b9a92f8dd42faf99f0cd569ebf356e12c1d6",
"index": 8042,
"step-1": "<mask token>\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\n<mask token>\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\n<mask token>\n\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n if len(d) != len(str(x)):\n return False\n for i in multiples:\n if d != digits(i * x):\n return False\n return True\n\n\n<mask token>\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\ncommon.assertEquals(digits(125874), digits(251748))\n\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n if len(d) != len(str(x)):\n return False\n for i in multiples:\n if d != digits(i * x):\n return False\n return True\n\n\ncommon.assertEquals(True, same_digits_as_multiples(125874, [2]))\ncommon.assertEquals(False, same_digits_as_multiples(123456, [2]))\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\ncommon.submit(euler052(), expected=142857)\n",
"step-4": "<mask token>\nimport common\nimport itertools\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\ncommon.assertEquals(digits(125874), digits(251748))\n\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n if len(d) != len(str(x)):\n return False\n for i in multiples:\n if d != digits(i * x):\n return False\n return True\n\n\ncommon.assertEquals(True, same_digits_as_multiples(125874, [2]))\ncommon.assertEquals(False, same_digits_as_multiples(123456, [2]))\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\ncommon.submit(euler052(), expected=142857)\n",
"step-5": "'''It can be seen that the number, 125874, and its double, 251748, contain\nexactly the same digits, but in a different order.\n\nFind the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain\nthe same digits.\n'''\n\nimport common\nimport itertools\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\ncommon.assertEquals(digits(125874), digits(251748))\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n\n # duplicate digits are implicitly forbidden\n if len(d) != len(str(x)): return False\n\n for i in multiples:\n if d != digits(i*x):\n return False\n return True\n\ncommon.assertEquals(True, same_digits_as_multiples(125874, [2]))\ncommon.assertEquals(False, same_digits_as_multiples(123456, [2]))\n\ndef euler052():\n multiples = range(2,7)\n for i in itertools.count(10**5): # solution must have at least 6 digits\n if same_digits_as_multiples(i, multiples):\n return i\n\ncommon.submit(euler052(), expected=142857)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class Solution:
def isUgly(self, num):
if num == 0:
return False
for n in [2, 3, 5]:
while num % n == 0:
num = num / n
return num == 1
a = Solution()
print(a.isUgly(14))
print(a.isUgly(8))
print(a.isUgly(6))
print(a.isUgly(0))
|
normal
|
{
"blob_id": "d39cc2dbbc83869e559f8355ceba5cf420adea5e",
"index": 1662,
"step-1": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Solution:\n\n def isUgly(self, num):\n if num == 0:\n return False\n for n in [2, 3, 5]:\n while num % n == 0:\n num = num / n\n return num == 1\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def isUgly(self, num):\n if num == 0:\n return False\n for n in [2, 3, 5]:\n while num % n == 0:\n num = num / n\n return num == 1\n\n\n<mask token>\nprint(a.isUgly(14))\nprint(a.isUgly(8))\nprint(a.isUgly(6))\nprint(a.isUgly(0))\n",
"step-4": "class Solution:\n\n def isUgly(self, num):\n if num == 0:\n return False\n for n in [2, 3, 5]:\n while num % n == 0:\n num = num / n\n return num == 1\n\n\na = Solution()\nprint(a.isUgly(14))\nprint(a.isUgly(8))\nprint(a.isUgly(6))\nprint(a.isUgly(0))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('show', t.root.show())
print('sum', t.root.sum())
print('find 3', t.root.find(3) != False)
print('evens', t.root.evens())
print('min depth', t.root.min_depth())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
t = createIntTree()
print('show', t.root.show())
print('sum', t.root.sum())
print('find 3', t.root.find(3) != False)
print('evens', t.root.evens())
print('min depth', t.root.min_depth())
<|reserved_special_token_1|>
from tree import Tree, createIntTree
t = createIntTree()
print('show', t.root.show())
print('sum', t.root.sum())
print('find 3', t.root.find(3) != False)
print('evens', t.root.evens())
print('min depth', t.root.min_depth())
|
flexible
|
{
"blob_id": "21a7fd5148f73ac47adafc9d5c2361ebe318ae59",
"index": 2842,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('show', t.root.show())\nprint('sum', t.root.sum())\nprint('find 3', t.root.find(3) != False)\nprint('evens', t.root.evens())\nprint('min depth', t.root.min_depth())\n",
"step-3": "<mask token>\nt = createIntTree()\nprint('show', t.root.show())\nprint('sum', t.root.sum())\nprint('find 3', t.root.find(3) != False)\nprint('evens', t.root.evens())\nprint('min depth', t.root.min_depth())\n",
"step-4": "from tree import Tree, createIntTree\nt = createIntTree()\nprint('show', t.root.show())\nprint('sum', t.root.sum())\nprint('find 3', t.root.find(3) != False)\nprint('evens', t.root.evens())\nprint('min depth', t.root.min_depth())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from datetime import *
dd=int(input("enter number day: "))
nn=int(datetime.now().strftime("%w"))+1
# print(dd)
# print(nn)
print((datetime.now().date())+(timedelta(days=dd-nn)))
|
normal
|
{
"blob_id": "d3342507cb1966e14380ff28ae12b5c334abd20a",
"index": 5430,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(datetime.now().date() + timedelta(days=dd - nn))\n",
"step-3": "<mask token>\ndd = int(input('enter number day: '))\nnn = int(datetime.now().strftime('%w')) + 1\nprint(datetime.now().date() + timedelta(days=dd - nn))\n",
"step-4": "from datetime import *\ndd = int(input('enter number day: '))\nnn = int(datetime.now().strftime('%w')) + 1\nprint(datetime.now().date() + timedelta(days=dd - nn))\n",
"step-5": "from datetime import *\ndd=int(input(\"enter number day: \"))\nnn=int(datetime.now().strftime(\"%w\"))+1\n# print(dd)\n# print(nn)\nprint((datetime.now().date())+(timedelta(days=dd-nn)))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def greet(name):
hostname = subprocess.check_output('hostname').decode('utf-8')[:-1]
return "Hello, {}! I'm {}#{}.".format(name, hostname, os.getppid())
<|reserved_special_token_1|>
import os, subprocess
def greet(name):
hostname = subprocess.check_output('hostname').decode('utf-8')[:-1]
return "Hello, {}! I'm {}#{}.".format(name, hostname, os.getppid())
<|reserved_special_token_1|>
import os, subprocess
def greet(name):
hostname = subprocess.check_output("hostname").decode("utf-8")[:-1]
return "Hello, {}! I'm {}#{}.".format(name, hostname, os.getppid())
|
flexible
|
{
"blob_id": "9bd55a2f224acfa2cb34d0ca14a25e8864d644b3",
"index": 5250,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef greet(name):\n hostname = subprocess.check_output('hostname').decode('utf-8')[:-1]\n return \"Hello, {}! I'm {}#{}.\".format(name, hostname, os.getppid())\n",
"step-3": "import os, subprocess\n\n\ndef greet(name):\n hostname = subprocess.check_output('hostname').decode('utf-8')[:-1]\n return \"Hello, {}! I'm {}#{}.\".format(name, hostname, os.getppid())\n",
"step-4": "import os, subprocess\n\ndef greet(name):\n hostname = subprocess.check_output(\"hostname\").decode(\"utf-8\")[:-1]\n return \"Hello, {}! I'm {}#{}.\".format(name, hostname, os.getppid())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense,Activation,Dropout
from keras.optimizers import SGD,Adam,RMSprop
from keras.utils import np_utils
x_train=np.loadtxt('Xtrain.txt',delimiter=' ')
x_test=np.loadtxt('Xtest.txt',delimiter=' ')
sigmoid_output=np.loadtxt('sigmoid_MSE.txt',delimiter=' ')
softplus_output=np.loadtxt('softplus_MSE.txt',delimiter=' ')
sigmoid_mean=np.mean(sigmoid_output,axis=0)
softplus_mean=np.mean(softplus_output,axis=0)
plt.plot(np.arange(1,11), sigmoid_mean,'r')
plt.plot(np.arange(1,11), softplus_mean,'g')
plt.title('Model Select')
plt.xlabel('Number of Perceptrons')
plt.ylabel('Mean Squared Error')
plt.legend(['Sigmoid', 'Softplus'])
plt.show()
if np.amin(sigmoid_mean)>np.amin(softplus_mean):
activation_function="softplus"
n_perceptron=np.argmin(softplus_mean)+1
else:
activation_function="sigmoid"
n_perceptron=np.argmin(sigmoid_mean)+1
print "the number of perceptron of best perform model: ",n_perceptron
print "the activation function of best perform model: ",activation_function
model = Sequential([Dense(n_perceptron,input_dim=1,activation=activation_function),
Dense(1,input_dim=n_perceptron,activation = None)
])
model.compile(optimizer='adam',loss='mean_squared_error')
#Begin training with best perform model
converged=0
tolerance=0.001
last_score=0
while converged==0:
model.fit(x_train[:,0], x_train[:,1], epochs = 20, batch_size=64, verbose = 0)
score = model.evaluate(x_test[:,0], x_test[:,1])
print np.abs(score-last_score)
print score
if np.abs(score-last_score)<tolerance:
converged=1
last_score=score
print "MSE of test dataset with best model: ",last_score
y_predict=model.predict(x_test[:,0])
plt.subplot(121)
plt.plot(x_test[:,0], y_predict,'.r')
plt.title('Predict Distribution')
plt.xlabel('x1 value')
plt.ylabel('predict x2 value')
plt.subplot(122)
plt.plot(x_test[:,0], x_test[:,1],'.r')
plt.title('True Distribution')
plt.xlabel('x1 value')
plt.ylabel('x2 value')
plt.show()
|
normal
|
{
"blob_id": "5b919bde9f4fe1da867695ece58f151abb9b70fb",
"index": 1492,
"step-1": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense,Activation,Dropout\nfrom keras.optimizers import SGD,Adam,RMSprop\nfrom keras.utils import np_utils\n\n\nx_train=np.loadtxt('Xtrain.txt',delimiter=' ')\nx_test=np.loadtxt('Xtest.txt',delimiter=' ')\n\nsigmoid_output=np.loadtxt('sigmoid_MSE.txt',delimiter=' ')\nsoftplus_output=np.loadtxt('softplus_MSE.txt',delimiter=' ')\n\nsigmoid_mean=np.mean(sigmoid_output,axis=0)\nsoftplus_mean=np.mean(softplus_output,axis=0)\nplt.plot(np.arange(1,11), sigmoid_mean,'r')\nplt.plot(np.arange(1,11), softplus_mean,'g')\nplt.title('Model Select')\nplt.xlabel('Number of Perceptrons')\nplt.ylabel('Mean Squared Error')\nplt.legend(['Sigmoid', 'Softplus'])\nplt.show()\n\nif np.amin(sigmoid_mean)>np.amin(softplus_mean):\n\tactivation_function=\"softplus\"\n\tn_perceptron=np.argmin(softplus_mean)+1\nelse:\n\tactivation_function=\"sigmoid\"\n\tn_perceptron=np.argmin(sigmoid_mean)+1\n\nprint \"the number of perceptron of best perform model: \",n_perceptron\nprint \"the activation function of best perform model: \",activation_function\n\nmodel = Sequential([Dense(n_perceptron,input_dim=1,activation=activation_function),\n Dense(1,input_dim=n_perceptron,activation = None)\n ])\nmodel.compile(optimizer='adam',loss='mean_squared_error')\n\n#Begin training with best perform model\nconverged=0\ntolerance=0.001\nlast_score=0\nwhile converged==0:\n model.fit(x_train[:,0], x_train[:,1], epochs = 20, batch_size=64, verbose = 0)\n score = model.evaluate(x_test[:,0], x_test[:,1])\n print np.abs(score-last_score)\n print score\n if np.abs(score-last_score)<tolerance:\n converged=1\n last_score=score\n\nprint \"MSE of test dataset with best model: \",last_score\ny_predict=model.predict(x_test[:,0])\nplt.subplot(121)\nplt.plot(x_test[:,0], y_predict,'.r')\nplt.title('Predict Distribution')\nplt.xlabel('x1 value')\nplt.ylabel('predict x2 value')\nplt.subplot(122)\nplt.plot(x_test[:,0], x_test[:,1],'.r')\nplt.title('True Distribution')\nplt.xlabel('x1 value')\nplt.ylabel('x2 value')\nplt.show()\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class DegreeCourseDetailView(APIView):
def get(self, request, pk, *args, **kwargs):
response = {'code': 100, 'data': None, 'error': None}
try:
degree_course = models.DegreeCourse.objects.filter(id=pk).first()
ser = DegreeCourseSerializer(degree_course)
response['data'] = ser.data
except Exception as e:
response['code'] = 500
response['error'] = '获取数据失败'
return Response(response)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DegreeCourseView(APIView):
<|reserved_special_token_0|>
class DegreeCourseDetailView(APIView):
def get(self, request, pk, *args, **kwargs):
response = {'code': 100, 'data': None, 'error': None}
try:
degree_course = models.DegreeCourse.objects.filter(id=pk).first()
ser = DegreeCourseSerializer(degree_course)
response['data'] = ser.data
except Exception as e:
response['code'] = 500
response['error'] = '获取数据失败'
return Response(response)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DegreeCourseView(APIView):
def get(self, request, *args, **kwargs):
response = {'code': 100, 'data': None, 'error': None}
try:
degreecourse_list = models.DegreeCourse.objects.all()
ser_obj = DegreeCourseSerializer(degreecourse_list, many=True)
response['data'] = ser_obj.data
except Exception as e:
response['error'] = '获取数据失败'
return Response(response)
class DegreeCourseDetailView(APIView):
def get(self, request, pk, *args, **kwargs):
response = {'code': 100, 'data': None, 'error': None}
try:
degree_course = models.DegreeCourse.objects.filter(id=pk).first()
ser = DegreeCourseSerializer(degree_course)
response['data'] = ser.data
except Exception as e:
response['code'] = 500
response['error'] = '获取数据失败'
return Response(response)
<|reserved_special_token_1|>
from app01 import models
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from api.serializers.course import DegreeCourseSerializer
class DegreeCourseView(APIView):
def get(self, request, *args, **kwargs):
response = {'code': 100, 'data': None, 'error': None}
try:
degreecourse_list = models.DegreeCourse.objects.all()
ser_obj = DegreeCourseSerializer(degreecourse_list, many=True)
response['data'] = ser_obj.data
except Exception as e:
response['error'] = '获取数据失败'
return Response(response)
class DegreeCourseDetailView(APIView):
def get(self, request, pk, *args, **kwargs):
response = {'code': 100, 'data': None, 'error': None}
try:
degree_course = models.DegreeCourse.objects.filter(id=pk).first()
ser = DegreeCourseSerializer(degree_course)
response['data'] = ser.data
except Exception as e:
response['code'] = 500
response['error'] = '获取数据失败'
return Response(response)
<|reserved_special_token_1|>
from app01 import models
from rest_framework.views import APIView
# from api.utils.response import BaseResponse
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from api.serializers.course import DegreeCourseSerializer
# 查询所有学位课程
class DegreeCourseView(APIView):
def get(self,request,*args,**kwargs):
response = {'code':100,'data':None,'error':None}
try:
# 从数据库获取数据
degreecourse_list = models.DegreeCourse.objects.all()
# 分页
# page = PageNumberPagination()
# course_list = page.paginate_queryset(queryset,request,self)
# 分页之后的结果执行序列化
ser_obj = DegreeCourseSerializer(degreecourse_list,many=True)
response['data'] = ser_obj.data
except Exception as e:
response['error'] = '获取数据失败'
return Response(response)
class DegreeCourseDetailView(APIView):
def get(self, request, pk, *args, **kwargs):
response = {'code': 100, 'data': None, 'error': None}
try:
degree_course = models.DegreeCourse.objects.filter(id=pk).first()
ser = DegreeCourseSerializer(degree_course)
response['data'] = ser.data
except Exception as e:
response['code'] = 500
response['error'] = '获取数据失败'
return Response(response)
|
flexible
|
{
"blob_id": "2b3f8b1ac4735785683c00f6e6ced85d201de53f",
"index": 8567,
"step-1": "<mask token>\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-2": "<mask token>\n\n\nclass DegreeCourseView(APIView):\n <mask token>\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-3": "<mask token>\n\n\nclass DegreeCourseView(APIView):\n\n def get(self, request, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degreecourse_list = models.DegreeCourse.objects.all()\n ser_obj = DegreeCourseSerializer(degreecourse_list, many=True)\n response['data'] = ser_obj.data\n except Exception as e:\n response['error'] = '获取数据失败'\n return Response(response)\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-4": "from app01 import models\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\nfrom api.serializers.course import DegreeCourseSerializer\n\n\nclass DegreeCourseView(APIView):\n\n def get(self, request, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degreecourse_list = models.DegreeCourse.objects.all()\n ser_obj = DegreeCourseSerializer(degreecourse_list, many=True)\n response['data'] = ser_obj.data\n except Exception as e:\n response['error'] = '获取数据失败'\n return Response(response)\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-5": "from app01 import models\nfrom rest_framework.views import APIView\n# from api.utils.response import BaseResponse\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\nfrom api.serializers.course import DegreeCourseSerializer\n\n\n# 查询所有学位课程\n\nclass DegreeCourseView(APIView):\n\n def get(self,request,*args,**kwargs):\n response = {'code':100,'data':None,'error':None}\n\n try:\n # 从数据库获取数据\n degreecourse_list = models.DegreeCourse.objects.all()\n\n # 分页\n # page = PageNumberPagination()\n # course_list = page.paginate_queryset(queryset,request,self)\n\n # 分页之后的结果执行序列化\n ser_obj = DegreeCourseSerializer(degreecourse_list,many=True)\n\n response['data'] = ser_obj.data\n except Exception as e:\n\n response['error'] = '获取数据失败'\n\n return Response(response)\n\n\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from fixate.reporting.csv import register_csv, unregister_csv
|
flexible
|
{
"blob_id": "c70db0fc9d98657e318ecab7eb8af60cc2b19a2c",
"index": 4145,
"step-1": "<mask token>\n",
"step-2": "from fixate.reporting.csv import register_csv, unregister_csv\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def intro():
print(
"""
####### ####### ####### # #######
# # #### # ## #### # #### ###### ## #
# # # # # # # # # # # # # # # #
# # # ##### # # # # ##### # # # ##### # ######
# # # # ###### # # # # # # #
# # # # # # # # # # # # # # # #
# # #### # # # #### # #### ###### ##### #####
How to play Tic-Tac-Toe 15:
To win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers.
Board Instructions: Tell the program the position of which you would like to enter by entering the number position of
the boxes as shown below. Players can can only enter from numbers from 1-9.
| |
1 | 2 | 3
_____|_____|_____
| |
4 | 5 | 6
_____|_____|_____
| |
7 | 8 | 9
| |
"""
)
<|reserved_special_token_0|>
def choose_who_first(player1, player2, player_order):
flip = random.randint(1, 2)
if flip == 1:
print('\n' + player1, 'goes first.', player1,
'can only play odd numbers and', player2,
'can only play even numbers from 1-9. ')
print()
player_order.append(player1)
player_order.append(player2)
return player1
elif flip == 2:
print('\n' + player2, 'goes first.', player2,
'can only play odd numbers and', name1,
'can only play even numbers from 1-9. ')
print()
player_order.append(player2)
player_order.append(player1)
return player2
def make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order):
odd_moves = [1, 3, 5, 7, 9]
even_moves = [2, 4, 6, 8]
try:
if turn == player1:
print('\nIts your turn', player1 + ': ')
print()
p1_move_input = int(input('Move to which space? (1-9): '))
if player_order[0] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1
] == 0:
print()
p1_num_input = int(input('Enter an ODD NUMBER from 1-9: '))
if (p1_num_input in odd_moves and p1_num_input not in
unavailable_moves_p1):
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an ODD number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
elif player_order[1] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1
] == 0:
print()
p1_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))
if (p1_num_input in even_moves and p1_num_input not in
unavailable_moves_p1):
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter a EVEN number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
if turn == player2:
print('\nIts your turn', player2 + ': ')
print()
p2_move_input = int(input('Move to which space? (1-9): '))
if player_order[0] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1
] == 0:
print()
p2_num_input = int(input('Enter an ODD NUMBER from 1-9: '))
if (p2_num_input in odd_moves and p2_num_input not in
unavailable_moves_p2):
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an ODD number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
elif player_order[1] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1
] == 0:
print()
p2_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))
if (p2_num_input in even_moves and p2_num_input not in
unavailable_moves_p2):
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter a EVEN number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
except ValueError:
print('\nINVALID INPUT, Please try again and enter only in integers. ')
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order)
def check_game(board, winner):
is_game_over = ''
if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1
] != 0 and board[2] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[1]) + ',', 'and', str(
board[2]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4
] != 0 and board[5] != 0:
print_board(board)
print('\n' + str(board[3]) + ',', str(board[4]) + ',', 'and', str(
board[5]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[6]) + ',', str(board[7]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3
] != 0 and board[6] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[3]) + ',', 'and', str(
board[6]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4
] != 0 and board[7] != 0:
print_board(board)
print('\n' + str(board[1]) + ',', str(board[4]) + ',', 'and', str(
board[7]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[2]) + ',', str(board[5]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4
] != 0 and board[2] != 0:
print_board(board)
print('\n' + str(board[6]) + ',', str(board[4]) + ',', 'and', str(
board[2]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[4]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
return is_game_over
def score(score1, score2, player1, player2):
print('\n\t------------------')
print('\t SCOREBOARD')
print('\t------------------')
print('\t' + ' ' + player1 + ':', score1)
print('\t' + ' ' + player2 + ':', score2)
print('\t------------------')
print()
def play_game(score1, score2, player1, player2):
unavailable_moves_p1 = []
unavailable_moves_p2 = []
player_order = []
the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]
count = 0
restart = ''
turn = choose_who_first(player1, player2, player_order)
input('Enter anything to start the round: ')
for i in range(10):
print_board(the_board)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order)
count += 1
if check_game(the_board, turn):
if turn == player1:
score1 += 1
elif turn == player2:
score2 += 1
break
if count == 9:
print("No numbers added up to 15, it's a DRAW! ")
break
if turn == player1:
turn = player2
else:
turn = player1
input('\nEnter anything to continue: ')
score(score1, score2, player1, player2)
while (restart != 'yes' or restart != 'y' or restart != 'n' or restart !=
'no'):
restart = input('Do want to play Again? (y/n) ').lower()
if restart == 'y' or restart == 'yes':
print('\nLoading new round...')
play_game(score1, score2, player1, player2)
elif restart == 'n' or restart == 'no':
if score1 > score2:
print('\n' + player1, 'is the overall winner! Congratulations!'
)
elif score2 > score1:
print('\n' + player2, 'is the overall winner! Congratulations!'
)
elif score1 == score2:
print(
"\nBoth players have one the same amount of rounds. It's a draw! "
)
print('\nThanks for playing! ')
break
else:
print('\nPlease enter YES or NO ')
print()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def intro():
print(
"""
####### ####### ####### # #######
# # #### # ## #### # #### ###### ## #
# # # # # # # # # # # # # # # #
# # # ##### # # # # ##### # # # ##### # ######
# # # # ###### # # # # # # #
# # # # # # # # # # # # # # # #
# # #### # # # #### # #### ###### ##### #####
How to play Tic-Tac-Toe 15:
To win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers.
Board Instructions: Tell the program the position of which you would like to enter by entering the number position of
the boxes as shown below. Players can can only enter from numbers from 1-9.
| |
1 | 2 | 3
_____|_____|_____
| |
4 | 5 | 6
_____|_____|_____
| |
7 | 8 | 9
| |
"""
)
def print_board(board):
print('\n\t | |')
print('\t {} | {} | {}'.format(board[0], board[1], board[2]))
print('\t_____|_____|_____')
print('\t | |')
print('\t {} | {} | {}'.format(board[3], board[4], board[5]))
print('\t_____|_____|_____')
print('\t | |')
print('\t {} | {} | {}'.format(board[6], board[7], board[8]))
print('\t | |')
def choose_who_first(player1, player2, player_order):
flip = random.randint(1, 2)
if flip == 1:
print('\n' + player1, 'goes first.', player1,
'can only play odd numbers and', player2,
'can only play even numbers from 1-9. ')
print()
player_order.append(player1)
player_order.append(player2)
return player1
elif flip == 2:
print('\n' + player2, 'goes first.', player2,
'can only play odd numbers and', name1,
'can only play even numbers from 1-9. ')
print()
player_order.append(player2)
player_order.append(player1)
return player2
def make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order):
odd_moves = [1, 3, 5, 7, 9]
even_moves = [2, 4, 6, 8]
try:
if turn == player1:
print('\nIts your turn', player1 + ': ')
print()
p1_move_input = int(input('Move to which space? (1-9): '))
if player_order[0] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1
] == 0:
print()
p1_num_input = int(input('Enter an ODD NUMBER from 1-9: '))
if (p1_num_input in odd_moves and p1_num_input not in
unavailable_moves_p1):
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an ODD number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
elif player_order[1] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1
] == 0:
print()
p1_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))
if (p1_num_input in even_moves and p1_num_input not in
unavailable_moves_p1):
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter a EVEN number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
if turn == player2:
print('\nIts your turn', player2 + ': ')
print()
p2_move_input = int(input('Move to which space? (1-9): '))
if player_order[0] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1
] == 0:
print()
p2_num_input = int(input('Enter an ODD NUMBER from 1-9: '))
if (p2_num_input in odd_moves and p2_num_input not in
unavailable_moves_p2):
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an ODD number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
elif player_order[1] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1
] == 0:
print()
p2_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))
if (p2_num_input in even_moves and p2_num_input not in
unavailable_moves_p2):
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter a EVEN number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
except ValueError:
print('\nINVALID INPUT, Please try again and enter only in integers. ')
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order)
def check_game(board, winner):
is_game_over = ''
if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1
] != 0 and board[2] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[1]) + ',', 'and', str(
board[2]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4
] != 0 and board[5] != 0:
print_board(board)
print('\n' + str(board[3]) + ',', str(board[4]) + ',', 'and', str(
board[5]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[6]) + ',', str(board[7]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3
] != 0 and board[6] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[3]) + ',', 'and', str(
board[6]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4
] != 0 and board[7] != 0:
print_board(board)
print('\n' + str(board[1]) + ',', str(board[4]) + ',', 'and', str(
board[7]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[2]) + ',', str(board[5]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4
] != 0 and board[2] != 0:
print_board(board)
print('\n' + str(board[6]) + ',', str(board[4]) + ',', 'and', str(
board[2]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[4]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
return is_game_over
def score(score1, score2, player1, player2):
print('\n\t------------------')
print('\t SCOREBOARD')
print('\t------------------')
print('\t' + ' ' + player1 + ':', score1)
print('\t' + ' ' + player2 + ':', score2)
print('\t------------------')
print()
def play_game(score1, score2, player1, player2):
unavailable_moves_p1 = []
unavailable_moves_p2 = []
player_order = []
the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]
count = 0
restart = ''
turn = choose_who_first(player1, player2, player_order)
input('Enter anything to start the round: ')
for i in range(10):
print_board(the_board)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order)
count += 1
if check_game(the_board, turn):
if turn == player1:
score1 += 1
elif turn == player2:
score2 += 1
break
if count == 9:
print("No numbers added up to 15, it's a DRAW! ")
break
if turn == player1:
turn = player2
else:
turn = player1
input('\nEnter anything to continue: ')
score(score1, score2, player1, player2)
while (restart != 'yes' or restart != 'y' or restart != 'n' or restart !=
'no'):
restart = input('Do want to play Again? (y/n) ').lower()
if restart == 'y' or restart == 'yes':
print('\nLoading new round...')
play_game(score1, score2, player1, player2)
elif restart == 'n' or restart == 'no':
if score1 > score2:
print('\n' + player1, 'is the overall winner! Congratulations!'
)
elif score2 > score1:
print('\n' + player2, 'is the overall winner! Congratulations!'
)
elif score1 == score2:
print(
"\nBoth players have one the same amount of rounds. It's a draw! "
)
print('\nThanks for playing! ')
break
else:
print('\nPlease enter YES or NO ')
print()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def intro():
print(
"""
####### ####### ####### # #######
# # #### # ## #### # #### ###### ## #
# # # # # # # # # # # # # # # #
# # # ##### # # # # ##### # # # ##### # ######
# # # # ###### # # # # # # #
# # # # # # # # # # # # # # # #
# # #### # # # #### # #### ###### ##### #####
How to play Tic-Tac-Toe 15:
To win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers.
Board Instructions: Tell the program the position of which you would like to enter by entering the number position of
the boxes as shown below. Players can can only enter from numbers from 1-9.
| |
1 | 2 | 3
_____|_____|_____
| |
4 | 5 | 6
_____|_____|_____
| |
7 | 8 | 9
| |
"""
)
def print_board(board):
print('\n\t | |')
print('\t {} | {} | {}'.format(board[0], board[1], board[2]))
print('\t_____|_____|_____')
print('\t | |')
print('\t {} | {} | {}'.format(board[3], board[4], board[5]))
print('\t_____|_____|_____')
print('\t | |')
print('\t {} | {} | {}'.format(board[6], board[7], board[8]))
print('\t | |')
def choose_who_first(player1, player2, player_order):
flip = random.randint(1, 2)
if flip == 1:
print('\n' + player1, 'goes first.', player1,
'can only play odd numbers and', player2,
'can only play even numbers from 1-9. ')
print()
player_order.append(player1)
player_order.append(player2)
return player1
elif flip == 2:
print('\n' + player2, 'goes first.', player2,
'can only play odd numbers and', name1,
'can only play even numbers from 1-9. ')
print()
player_order.append(player2)
player_order.append(player1)
return player2
def make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order):
odd_moves = [1, 3, 5, 7, 9]
even_moves = [2, 4, 6, 8]
try:
if turn == player1:
print('\nIts your turn', player1 + ': ')
print()
p1_move_input = int(input('Move to which space? (1-9): '))
if player_order[0] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1
] == 0:
print()
p1_num_input = int(input('Enter an ODD NUMBER from 1-9: '))
if (p1_num_input in odd_moves and p1_num_input not in
unavailable_moves_p1):
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an ODD number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
elif player_order[1] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1
] == 0:
print()
p1_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))
if (p1_num_input in even_moves and p1_num_input not in
unavailable_moves_p1):
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter a EVEN number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
if turn == player2:
print('\nIts your turn', player2 + ': ')
print()
p2_move_input = int(input('Move to which space? (1-9): '))
if player_order[0] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1
] == 0:
print()
p2_num_input = int(input('Enter an ODD NUMBER from 1-9: '))
if (p2_num_input in odd_moves and p2_num_input not in
unavailable_moves_p2):
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an ODD number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
elif player_order[1] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1
] == 0:
print()
p2_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))
if (p2_num_input in even_moves and p2_num_input not in
unavailable_moves_p2):
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter a EVEN number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
except ValueError:
print('\nINVALID INPUT, Please try again and enter only in integers. ')
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order)
def check_game(board, winner):
is_game_over = ''
if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1
] != 0 and board[2] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[1]) + ',', 'and', str(
board[2]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4
] != 0 and board[5] != 0:
print_board(board)
print('\n' + str(board[3]) + ',', str(board[4]) + ',', 'and', str(
board[5]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[6]) + ',', str(board[7]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3
] != 0 and board[6] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[3]) + ',', 'and', str(
board[6]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4
] != 0 and board[7] != 0:
print_board(board)
print('\n' + str(board[1]) + ',', str(board[4]) + ',', 'and', str(
board[7]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[2]) + ',', str(board[5]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4
] != 0 and board[2] != 0:
print_board(board)
print('\n' + str(board[6]) + ',', str(board[4]) + ',', 'and', str(
board[2]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[4]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
return is_game_over
def score(score1, score2, player1, player2):
print('\n\t------------------')
print('\t SCOREBOARD')
print('\t------------------')
print('\t' + ' ' + player1 + ':', score1)
print('\t' + ' ' + player2 + ':', score2)
print('\t------------------')
print()
def play_game(score1, score2, player1, player2):
unavailable_moves_p1 = []
unavailable_moves_p2 = []
player_order = []
the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]
count = 0
restart = ''
turn = choose_who_first(player1, player2, player_order)
input('Enter anything to start the round: ')
for i in range(10):
print_board(the_board)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order)
count += 1
if check_game(the_board, turn):
if turn == player1:
score1 += 1
elif turn == player2:
score2 += 1
break
if count == 9:
print("No numbers added up to 15, it's a DRAW! ")
break
if turn == player1:
turn = player2
else:
turn = player1
input('\nEnter anything to continue: ')
score(score1, score2, player1, player2)
while (restart != 'yes' or restart != 'y' or restart != 'n' or restart !=
'no'):
restart = input('Do want to play Again? (y/n) ').lower()
if restart == 'y' or restart == 'yes':
print('\nLoading new round...')
play_game(score1, score2, player1, player2)
elif restart == 'n' or restart == 'no':
if score1 > score2:
print('\n' + player1, 'is the overall winner! Congratulations!'
)
elif score2 > score1:
print('\n' + player2, 'is the overall winner! Congratulations!'
)
elif score1 == score2:
print(
"\nBoth players have one the same amount of rounds. It's a draw! "
)
print('\nThanks for playing! ')
break
else:
print('\nPlease enter YES or NO ')
print()
if __name__ == '__main__':
intro()
input('Enter anything to continue: ')
print('\nEnter usernames: ')
name1 = input('\nPlayer 1, Enter your name: ').title()
name2 = input('\nPlayer 2, Enter your name: ').title()
p1_score = 0
p2_score = 0
play_game(p1_score, p2_score, name1, name2)
<|reserved_special_token_1|>
import random
def intro():
print(
"""
####### ####### ####### # #######
# # #### # ## #### # #### ###### ## #
# # # # # # # # # # # # # # # #
# # # ##### # # # # ##### # # # ##### # ######
# # # # ###### # # # # # # #
# # # # # # # # # # # # # # # #
# # #### # # # #### # #### ###### ##### #####
How to play Tic-Tac-Toe 15:
To win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers.
Board Instructions: Tell the program the position of which you would like to enter by entering the number position of
the boxes as shown below. Players can can only enter from numbers from 1-9.
| |
1 | 2 | 3
_____|_____|_____
| |
4 | 5 | 6
_____|_____|_____
| |
7 | 8 | 9
| |
"""
)
def print_board(board):
print('\n\t | |')
print('\t {} | {} | {}'.format(board[0], board[1], board[2]))
print('\t_____|_____|_____')
print('\t | |')
print('\t {} | {} | {}'.format(board[3], board[4], board[5]))
print('\t_____|_____|_____')
print('\t | |')
print('\t {} | {} | {}'.format(board[6], board[7], board[8]))
print('\t | |')
def choose_who_first(player1, player2, player_order):
flip = random.randint(1, 2)
if flip == 1:
print('\n' + player1, 'goes first.', player1,
'can only play odd numbers and', player2,
'can only play even numbers from 1-9. ')
print()
player_order.append(player1)
player_order.append(player2)
return player1
elif flip == 2:
print('\n' + player2, 'goes first.', player2,
'can only play odd numbers and', name1,
'can only play even numbers from 1-9. ')
print()
player_order.append(player2)
player_order.append(player1)
return player2
def make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order):
odd_moves = [1, 3, 5, 7, 9]
even_moves = [2, 4, 6, 8]
try:
if turn == player1:
print('\nIts your turn', player1 + ': ')
print()
p1_move_input = int(input('Move to which space? (1-9): '))
if player_order[0] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1
] == 0:
print()
p1_num_input = int(input('Enter an ODD NUMBER from 1-9: '))
if (p1_num_input in odd_moves and p1_num_input not in
unavailable_moves_p1):
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an ODD number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
elif player_order[1] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1
] == 0:
print()
p1_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))
if (p1_num_input in even_moves and p1_num_input not in
unavailable_moves_p1):
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter a EVEN number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
if turn == player2:
print('\nIts your turn', player2 + ': ')
print()
p2_move_input = int(input('Move to which space? (1-9): '))
if player_order[0] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1
] == 0:
print()
p2_num_input = int(input('Enter an ODD NUMBER from 1-9: '))
if (p2_num_input in odd_moves and p2_num_input not in
unavailable_moves_p2):
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an ODD number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
elif player_order[1] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1
] == 0:
print()
p2_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))
if (p2_num_input in even_moves and p2_num_input not in
unavailable_moves_p2):
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print(
"""
INVALID INPUT, Please try again and enter a number that you haven't used. """
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
else:
print(
'\nINVALID INPUT, Please try again and enter a EVEN number. '
)
make_move_and_update(the_board, turn, player1,
player2, unavailable_moves_p1,
unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print(
'\nINVALID INPUT, Please try again and enter a number between 1-9. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
else:
print(
'\nINVALID, Please try again and enter an unoccupied spot. '
)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2,
player_order)
except ValueError:
print('\nINVALID INPUT, Please try again and enter only in integers. ')
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order)
def check_game(board, winner):
is_game_over = ''
if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1
] != 0 and board[2] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[1]) + ',', 'and', str(
board[2]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4
] != 0 and board[5] != 0:
print_board(board)
print('\n' + str(board[3]) + ',', str(board[4]) + ',', 'and', str(
board[5]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[6]) + ',', str(board[7]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3
] != 0 and board[6] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[3]) + ',', 'and', str(
board[6]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4
] != 0 and board[7] != 0:
print_board(board)
print('\n' + str(board[1]) + ',', str(board[4]) + ',', 'and', str(
board[7]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[2]) + ',', str(board[5]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4
] != 0 and board[2] != 0:
print_board(board)
print('\n' + str(board[6]) + ',', str(board[4]) + ',', 'and', str(
board[2]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4
] != 0 and board[8] != 0:
print_board(board)
print('\n' + str(board[0]) + ',', str(board[4]) + ',', 'and', str(
board[8]), 'add up to 15! ')
print('\n' + winner, 'wins! ')
is_game_over = True
return is_game_over
def score(score1, score2, player1, player2):
print('\n\t------------------')
print('\t SCOREBOARD')
print('\t------------------')
print('\t' + ' ' + player1 + ':', score1)
print('\t' + ' ' + player2 + ':', score2)
print('\t------------------')
print()
def play_game(score1, score2, player1, player2):
unavailable_moves_p1 = []
unavailable_moves_p2 = []
player_order = []
the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]
count = 0
restart = ''
turn = choose_who_first(player1, player2, player_order)
input('Enter anything to start the round: ')
for i in range(10):
print_board(the_board)
make_move_and_update(the_board, turn, player1, player2,
unavailable_moves_p1, unavailable_moves_p2, player_order)
count += 1
if check_game(the_board, turn):
if turn == player1:
score1 += 1
elif turn == player2:
score2 += 1
break
if count == 9:
print("No numbers added up to 15, it's a DRAW! ")
break
if turn == player1:
turn = player2
else:
turn = player1
input('\nEnter anything to continue: ')
score(score1, score2, player1, player2)
while (restart != 'yes' or restart != 'y' or restart != 'n' or restart !=
'no'):
restart = input('Do want to play Again? (y/n) ').lower()
if restart == 'y' or restart == 'yes':
print('\nLoading new round...')
play_game(score1, score2, player1, player2)
elif restart == 'n' or restart == 'no':
if score1 > score2:
print('\n' + player1, 'is the overall winner! Congratulations!'
)
elif score2 > score1:
print('\n' + player2, 'is the overall winner! Congratulations!'
)
elif score1 == score2:
print(
"\nBoth players have one the same amount of rounds. It's a draw! "
)
print('\nThanks for playing! ')
break
else:
print('\nPlease enter YES or NO ')
print()
if __name__ == '__main__':
intro()
input('Enter anything to continue: ')
print('\nEnter usernames: ')
name1 = input('\nPlayer 1, Enter your name: ').title()
name2 = input('\nPlayer 2, Enter your name: ').title()
p1_score = 0
p2_score = 0
play_game(p1_score, p2_score, name1, name2)
<|reserved_special_token_1|>
# Name: Calvin Liew
# Date: 2021-01-29
# Purpose: Video game final project, Tic-Tac-Toe 15 by Calvin Liew.
import random
# Function that reminds the users of the game rules and other instructions.
def intro():
print("""\n####### ####### ####### # #######
# # #### # ## #### # #### ###### ## #
# # # # # # # # # # # # # # # #
# # # ##### # # # # ##### # # # ##### # ######
# # # # ###### # # # # # # #
# # # # # # # # # # # # # # # #
# # #### # # # #### # #### ###### ##### #####
How to play Tic-Tac-Toe 15:
To win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers.
Board Instructions: Tell the program the position of which you would like to enter by entering the number position of
the boxes as shown below. Players can can only enter from numbers from 1-9.
| |
1 | 2 | 3
_____|_____|_____
| |
4 | 5 | 6
_____|_____|_____
| |
7 | 8 | 9
| |
""")
# Function that prints the tic-tac-toe board.
def print_board(board):
print("\n\t | |")
print("\t {} | {} | {}".format(board[0], board[1], board[2]))
print('\t_____|_____|_____')
print("\t | |")
print("\t {} | {} | {}".format(board[3], board[4], board[5]))
print('\t_____|_____|_____')
print("\t | |")
print("\t {} | {} | {}".format(board[6], board[7], board[8]))
print("\t | |")
# Function that chooses who goes first and assigns the player order.
def choose_who_first(player1, player2, player_order):
flip = random.randint(1, 2)
if flip == 1:
print("\n" + player1, "goes first.", player1, "can only play odd numbers and", player2,
"can only play even numbers from 1-9. ")
print()
player_order.append(player1)
player_order.append(player2)
return player1
elif flip == 2:
print("\n" + player2, "goes first.", player2, "can only play odd numbers and", name1,
"can only play even numbers from 1-9. ")
print()
player_order.append(player2)
player_order.append(player1)
return player2
# Function that calls the print_board() function as well as makes the moves that the players provide while checking if the moves are legal or not.
def make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order):
odd_moves = [1, 3, 5, 7, 9]
even_moves = [2, 4, 6, 8]
try:
if turn == player1:
print("\nIts your turn", player1 + ": ")
print()
p1_move_input = int(input("Move to which space? (1-9): "))
if player_order[0] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1] == 0:
print()
p1_num_input = int(input("Enter an ODD NUMBER from 1-9: "))
if p1_num_input in odd_moves and p1_num_input not in unavailable_moves_p1:
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print("\nINVALID INPUT, Please try again and enter a number that you haven't used. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter an ODD number. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print("\nINVALID INPUT, Please try again and enter a number between 1-9. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter an unoccupied spot. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif player_order[1] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1] == 0:
print()
p1_num_input = int(input("Enter a EVEN NUMBER from 1-9: "))
if p1_num_input in even_moves and p1_num_input not in unavailable_moves_p1:
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print("\nINVALID INPUT, Please try again and enter a number that you haven't used. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter a EVEN number. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print("\nINVALID INPUT, Please try again and enter a number between 1-9. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter an unoccupied spot. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
if turn == player2:
print("\nIts your turn", player2 + ": ")
print()
p2_move_input = int(input("Move to which space? (1-9): "))
if player_order[0] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1] == 0:
print()
p2_num_input = int(input("Enter an ODD NUMBER from 1-9: "))
if p2_num_input in odd_moves and p2_num_input not in unavailable_moves_p2:
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print("\nINVALID INPUT, Please try again and enter a number that you haven't used. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter an ODD number. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print("\nINVALID INPUT, Please try again and enter a number between 1-9. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter an unoccupied spot. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif player_order[1] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1] == 0:
print()
p2_num_input = int(input("Enter a EVEN NUMBER from 1-9: "))
if p2_num_input in even_moves and p2_num_input not in unavailable_moves_p2:
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print("\nINVALID INPUT, Please try again and enter a number that you haven't used. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter a EVEN number. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print("\nINVALID INPUT, Please try again and enter a number between 1-9. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID, Please try again and enter an unoccupied spot. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
except ValueError:
print("\nINVALID INPUT, Please try again and enter only in integers. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
# Function that checks if any three numbers in a row/column/diagonal add up to 15. If there is, the function returns is_game_over and the game ends.
def check_game(board, winner):
is_game_over = ""
if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1] != 0 and board[2] != 0:
print_board(board)
print("\n"+str(board[0])+",", str(board[1])+",", "and", str(board[2]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4] != 0 and board[5] != 0:
print_board(board)
print("\n"+str(board[3])+",", str(board[4])+",", "and", str(board[5]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7] != 0 and board[8] != 0:
print_board(board)
print("\n"+str(board[6])+",", str(board[7])+",", "and", str(board[8]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3] != 0 and board[6] != 0:
print_board(board)
print("\n"+str(board[0])+",", str(board[3])+",", "and", str(board[6]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4] != 0 and board[7] != 0:
print_board(board)
print("\n"+str(board[1])+",", str(board[4])+",", "and", str(board[7]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5] != 0 and board[8] != 0:
print_board(board)
print("\n"+str(board[2])+",", str(board[5])+",", "and", str(board[8]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4] != 0 and board[2] != 0:
print_board(board)
print("\n"+str(board[6])+",", str(board[4])+",", "and", str(board[2]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4] != 0 and board[8] != 0:
print_board(board)
print("\n"+str(board[0])+",", str(board[4])+",", "and", str(board[8]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
return is_game_over
# Function that prints the scoreboard and the scores of the two players. Prints after a round has ended.
def score(score1, score2, player1, player2):
print("\n\t------------------")
print("\t SCOREBOARD")
print("\t------------------")
print("\t" + " " + player1 + ":", score1)
print("\t" + " " + player2 + ":", score2)
print("\t------------------")
print()
# Function that is where most of the game takes place. Function calls other functions such as make_move_and_update, choose_who_first, score and other code that make up the game.
# Function keeps track of the player order, the board, unavailable moves, amount of rounds and other variables. The game ends in a draw when count reaches 9. At the end of the round, it asks the users if they want to play again.
def play_game(score1, score2, player1, player2):
unavailable_moves_p1 = []
unavailable_moves_p2 = []
player_order = []
the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]
count = 0
restart = ""
turn = choose_who_first(player1, player2, player_order)
input("Enter anything to start the round: ")
for i in range(10):
print_board(the_board)
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
count += 1
if check_game(the_board, turn):
if turn == player1:
score1 += 1
elif turn == player2:
score2 += 1
break
if count == 9:
print("No numbers added up to 15, it's a DRAW! ")
break
if turn == player1:
turn = player2
else:
turn = player1
input("\nEnter anything to continue: ")
score(score1, score2, player1, player2)
# Asks if the players want to restart. If yes, it calls the play_game function. If no, it ends the game and congratulates the overall winner.
while restart != "yes" or restart != "y" or restart != "n" or restart != "no":
restart = input("Do want to play Again? (y/n) ").lower()
if restart == "y" or restart == "yes":
print("\nLoading new round...")
play_game(score1, score2, player1, player2)
elif restart == "n" or restart == "no":
if score1 > score2:
print("\n"+player1, "is the overall winner! Congratulations!")
elif score2 > score1:
print("\n"+player2, "is the overall winner! Congratulations!")
elif score1 == score2:
print("\nBoth players have one the same amount of rounds. It's a draw! ")
print("\nThanks for playing! ")
break
else:
print("\nPlease enter YES or NO ")
print()
# This code manages the important things before the actual game starts such as the instructions, usernames, etc. Calls the play_game function.
if __name__ == "__main__":
intro()
input("Enter anything to continue: ")
print("\nEnter usernames: ")
name1 = input("\nPlayer 1, Enter your name: ").title()
name2 = input("\nPlayer 2, Enter your name: ").title()
p1_score = 0
p2_score = 0
play_game(p1_score, p2_score, name1, name2)
|
flexible
|
{
"blob_id": "11259c92b005a66e5f3c9592875f478df199c813",
"index": 6993,
"step-1": "<mask token>\n\n\ndef intro():\n print(\n \"\"\"\n####### ####### ####### # ####### \n # # #### # ## #### # #### ###### ## # \n # # # # # # # # # # # # # # # # \n # # # ##### # # # # ##### # # # ##### # ###### \n # # # # ###### # # # # # # # \n # # # # # # # # # # # # # # # # \n # # #### # # # #### # #### ###### ##### ##### \n\nHow to play Tic-Tac-Toe 15: \n\nTo win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers. \n\nBoard Instructions: Tell the program the position of which you would like to enter by entering the number position of \nthe boxes as shown below. Players can can only enter from numbers from 1-9. \n\n | |\n\t 1 | 2 | 3\n\t_____|_____|_____\n\t | |\n\t 4 | 5 | 6\n\t_____|_____|_____\n\t | |\n\t 7 | 8 | 9\n\t | |\n \"\"\"\n )\n\n\n<mask token>\n\n\ndef choose_who_first(player1, player2, player_order):\n flip = random.randint(1, 2)\n if flip == 1:\n print('\\n' + player1, 'goes first.', player1,\n 'can only play odd numbers and', player2,\n 'can only play even numbers from 1-9. ')\n print()\n player_order.append(player1)\n player_order.append(player2)\n return player1\n elif flip == 2:\n print('\\n' + player2, 'goes first.', player2,\n 'can only play odd numbers and', name1,\n 'can only play even numbers from 1-9. ')\n print()\n player_order.append(player2)\n player_order.append(player1)\n return player2\n\n\ndef make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order):\n odd_moves = [1, 3, 5, 7, 9]\n even_moves = [2, 4, 6, 8]\n try:\n if turn == player1:\n print('\\nIts your turn', player1 + ': ')\n print()\n p1_move_input = int(input('Move to which space? (1-9): '))\n if player_order[0] == player1:\n if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1\n ] == 0:\n print()\n p1_num_input = int(input('Enter an ODD NUMBER from 1-9: '))\n if (p1_num_input in odd_moves and p1_num_input not in\n unavailable_moves_p1):\n the_board[p1_move_input - 1] = p1_num_input\n unavailable_moves_p1.append(p1_num_input)\n elif p1_num_input in unavailable_moves_p1:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an ODD number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p1_move_input < 1 or p1_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n elif player_order[1] == player1:\n if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1\n ] == 0:\n print()\n p1_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))\n if (p1_num_input in even_moves and p1_num_input not in\n unavailable_moves_p1):\n the_board[p1_move_input - 1] = p1_num_input\n unavailable_moves_p1.append(p1_num_input)\n elif p1_num_input in unavailable_moves_p1:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter a EVEN number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p1_move_input < 1 or p1_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n if turn == player2:\n print('\\nIts your turn', player2 + ': ')\n print()\n p2_move_input = int(input('Move to which space? (1-9): '))\n if player_order[0] == player2:\n if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1\n ] == 0:\n print()\n p2_num_input = int(input('Enter an ODD NUMBER from 1-9: '))\n if (p2_num_input in odd_moves and p2_num_input not in\n unavailable_moves_p2):\n the_board[p2_move_input - 1] = p2_num_input\n unavailable_moves_p2.append(p2_num_input)\n elif p2_num_input in unavailable_moves_p2:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an ODD number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p2_move_input < 1 or p2_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n elif player_order[1] == player2:\n if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1\n ] == 0:\n print()\n p2_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))\n if (p2_num_input in even_moves and p2_num_input not in\n unavailable_moves_p2):\n the_board[p2_move_input - 1] = p2_num_input\n unavailable_moves_p2.append(p2_num_input)\n elif p2_num_input in unavailable_moves_p2:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter a EVEN number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p2_move_input < 1 or p2_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n except ValueError:\n print('\\nINVALID INPUT, Please try again and enter only in integers. ')\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order)\n\n\ndef check_game(board, winner):\n is_game_over = ''\n if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1\n ] != 0 and board[2] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[1]) + ',', 'and', str(\n board[2]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4\n ] != 0 and board[5] != 0:\n print_board(board)\n print('\\n' + str(board[3]) + ',', str(board[4]) + ',', 'and', str(\n board[5]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[6]) + ',', str(board[7]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3\n ] != 0 and board[6] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[3]) + ',', 'and', str(\n board[6]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4\n ] != 0 and board[7] != 0:\n print_board(board)\n print('\\n' + str(board[1]) + ',', str(board[4]) + ',', 'and', str(\n board[7]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[2]) + ',', str(board[5]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4\n ] != 0 and board[2] != 0:\n print_board(board)\n print('\\n' + str(board[6]) + ',', str(board[4]) + ',', 'and', str(\n board[2]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[4]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n return is_game_over\n\n\ndef score(score1, score2, player1, player2):\n print('\\n\\t------------------')\n print('\\t SCOREBOARD')\n print('\\t------------------')\n print('\\t' + ' ' + player1 + ':', score1)\n print('\\t' + ' ' + player2 + ':', score2)\n print('\\t------------------')\n print()\n\n\ndef play_game(score1, score2, player1, player2):\n unavailable_moves_p1 = []\n unavailable_moves_p2 = []\n player_order = []\n the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n count = 0\n restart = ''\n turn = choose_who_first(player1, player2, player_order)\n input('Enter anything to start the round: ')\n for i in range(10):\n print_board(the_board)\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order)\n count += 1\n if check_game(the_board, turn):\n if turn == player1:\n score1 += 1\n elif turn == player2:\n score2 += 1\n break\n if count == 9:\n print(\"No numbers added up to 15, it's a DRAW! \")\n break\n if turn == player1:\n turn = player2\n else:\n turn = player1\n input('\\nEnter anything to continue: ')\n score(score1, score2, player1, player2)\n while (restart != 'yes' or restart != 'y' or restart != 'n' or restart !=\n 'no'):\n restart = input('Do want to play Again? (y/n) ').lower()\n if restart == 'y' or restart == 'yes':\n print('\\nLoading new round...')\n play_game(score1, score2, player1, player2)\n elif restart == 'n' or restart == 'no':\n if score1 > score2:\n print('\\n' + player1, 'is the overall winner! Congratulations!'\n )\n elif score2 > score1:\n print('\\n' + player2, 'is the overall winner! Congratulations!'\n )\n elif score1 == score2:\n print(\n \"\\nBoth players have one the same amount of rounds. It's a draw! \"\n )\n print('\\nThanks for playing! ')\n break\n else:\n print('\\nPlease enter YES or NO ')\n print()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef intro():\n print(\n \"\"\"\n####### ####### ####### # ####### \n # # #### # ## #### # #### ###### ## # \n # # # # # # # # # # # # # # # # \n # # # ##### # # # # ##### # # # ##### # ###### \n # # # # ###### # # # # # # # \n # # # # # # # # # # # # # # # # \n # # #### # # # #### # #### ###### ##### ##### \n\nHow to play Tic-Tac-Toe 15: \n\nTo win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers. \n\nBoard Instructions: Tell the program the position of which you would like to enter by entering the number position of \nthe boxes as shown below. Players can can only enter from numbers from 1-9. \n\n | |\n\t 1 | 2 | 3\n\t_____|_____|_____\n\t | |\n\t 4 | 5 | 6\n\t_____|_____|_____\n\t | |\n\t 7 | 8 | 9\n\t | |\n \"\"\"\n )\n\n\ndef print_board(board):\n print('\\n\\t | |')\n print('\\t {} | {} | {}'.format(board[0], board[1], board[2]))\n print('\\t_____|_____|_____')\n print('\\t | |')\n print('\\t {} | {} | {}'.format(board[3], board[4], board[5]))\n print('\\t_____|_____|_____')\n print('\\t | |')\n print('\\t {} | {} | {}'.format(board[6], board[7], board[8]))\n print('\\t | |')\n\n\ndef choose_who_first(player1, player2, player_order):\n flip = random.randint(1, 2)\n if flip == 1:\n print('\\n' + player1, 'goes first.', player1,\n 'can only play odd numbers and', player2,\n 'can only play even numbers from 1-9. ')\n print()\n player_order.append(player1)\n player_order.append(player2)\n return player1\n elif flip == 2:\n print('\\n' + player2, 'goes first.', player2,\n 'can only play odd numbers and', name1,\n 'can only play even numbers from 1-9. ')\n print()\n player_order.append(player2)\n player_order.append(player1)\n return player2\n\n\ndef make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order):\n odd_moves = [1, 3, 5, 7, 9]\n even_moves = [2, 4, 6, 8]\n try:\n if turn == player1:\n print('\\nIts your turn', player1 + ': ')\n print()\n p1_move_input = int(input('Move to which space? (1-9): '))\n if player_order[0] == player1:\n if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1\n ] == 0:\n print()\n p1_num_input = int(input('Enter an ODD NUMBER from 1-9: '))\n if (p1_num_input in odd_moves and p1_num_input not in\n unavailable_moves_p1):\n the_board[p1_move_input - 1] = p1_num_input\n unavailable_moves_p1.append(p1_num_input)\n elif p1_num_input in unavailable_moves_p1:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an ODD number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p1_move_input < 1 or p1_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n elif player_order[1] == player1:\n if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1\n ] == 0:\n print()\n p1_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))\n if (p1_num_input in even_moves and p1_num_input not in\n unavailable_moves_p1):\n the_board[p1_move_input - 1] = p1_num_input\n unavailable_moves_p1.append(p1_num_input)\n elif p1_num_input in unavailable_moves_p1:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter a EVEN number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p1_move_input < 1 or p1_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n if turn == player2:\n print('\\nIts your turn', player2 + ': ')\n print()\n p2_move_input = int(input('Move to which space? (1-9): '))\n if player_order[0] == player2:\n if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1\n ] == 0:\n print()\n p2_num_input = int(input('Enter an ODD NUMBER from 1-9: '))\n if (p2_num_input in odd_moves and p2_num_input not in\n unavailable_moves_p2):\n the_board[p2_move_input - 1] = p2_num_input\n unavailable_moves_p2.append(p2_num_input)\n elif p2_num_input in unavailable_moves_p2:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an ODD number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p2_move_input < 1 or p2_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n elif player_order[1] == player2:\n if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1\n ] == 0:\n print()\n p2_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))\n if (p2_num_input in even_moves and p2_num_input not in\n unavailable_moves_p2):\n the_board[p2_move_input - 1] = p2_num_input\n unavailable_moves_p2.append(p2_num_input)\n elif p2_num_input in unavailable_moves_p2:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter a EVEN number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p2_move_input < 1 or p2_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n except ValueError:\n print('\\nINVALID INPUT, Please try again and enter only in integers. ')\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order)\n\n\ndef check_game(board, winner):\n is_game_over = ''\n if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1\n ] != 0 and board[2] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[1]) + ',', 'and', str(\n board[2]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4\n ] != 0 and board[5] != 0:\n print_board(board)\n print('\\n' + str(board[3]) + ',', str(board[4]) + ',', 'and', str(\n board[5]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[6]) + ',', str(board[7]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3\n ] != 0 and board[6] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[3]) + ',', 'and', str(\n board[6]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4\n ] != 0 and board[7] != 0:\n print_board(board)\n print('\\n' + str(board[1]) + ',', str(board[4]) + ',', 'and', str(\n board[7]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[2]) + ',', str(board[5]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4\n ] != 0 and board[2] != 0:\n print_board(board)\n print('\\n' + str(board[6]) + ',', str(board[4]) + ',', 'and', str(\n board[2]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[4]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n return is_game_over\n\n\ndef score(score1, score2, player1, player2):\n print('\\n\\t------------------')\n print('\\t SCOREBOARD')\n print('\\t------------------')\n print('\\t' + ' ' + player1 + ':', score1)\n print('\\t' + ' ' + player2 + ':', score2)\n print('\\t------------------')\n print()\n\n\ndef play_game(score1, score2, player1, player2):\n unavailable_moves_p1 = []\n unavailable_moves_p2 = []\n player_order = []\n the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n count = 0\n restart = ''\n turn = choose_who_first(player1, player2, player_order)\n input('Enter anything to start the round: ')\n for i in range(10):\n print_board(the_board)\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order)\n count += 1\n if check_game(the_board, turn):\n if turn == player1:\n score1 += 1\n elif turn == player2:\n score2 += 1\n break\n if count == 9:\n print(\"No numbers added up to 15, it's a DRAW! \")\n break\n if turn == player1:\n turn = player2\n else:\n turn = player1\n input('\\nEnter anything to continue: ')\n score(score1, score2, player1, player2)\n while (restart != 'yes' or restart != 'y' or restart != 'n' or restart !=\n 'no'):\n restart = input('Do want to play Again? (y/n) ').lower()\n if restart == 'y' or restart == 'yes':\n print('\\nLoading new round...')\n play_game(score1, score2, player1, player2)\n elif restart == 'n' or restart == 'no':\n if score1 > score2:\n print('\\n' + player1, 'is the overall winner! Congratulations!'\n )\n elif score2 > score1:\n print('\\n' + player2, 'is the overall winner! Congratulations!'\n )\n elif score1 == score2:\n print(\n \"\\nBoth players have one the same amount of rounds. It's a draw! \"\n )\n print('\\nThanks for playing! ')\n break\n else:\n print('\\nPlease enter YES or NO ')\n print()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef intro():\n print(\n \"\"\"\n####### ####### ####### # ####### \n # # #### # ## #### # #### ###### ## # \n # # # # # # # # # # # # # # # # \n # # # ##### # # # # ##### # # # ##### # ###### \n # # # # ###### # # # # # # # \n # # # # # # # # # # # # # # # # \n # # #### # # # #### # #### ###### ##### ##### \n\nHow to play Tic-Tac-Toe 15: \n\nTo win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers. \n\nBoard Instructions: Tell the program the position of which you would like to enter by entering the number position of \nthe boxes as shown below. Players can can only enter from numbers from 1-9. \n\n | |\n\t 1 | 2 | 3\n\t_____|_____|_____\n\t | |\n\t 4 | 5 | 6\n\t_____|_____|_____\n\t | |\n\t 7 | 8 | 9\n\t | |\n \"\"\"\n )\n\n\ndef print_board(board):\n print('\\n\\t | |')\n print('\\t {} | {} | {}'.format(board[0], board[1], board[2]))\n print('\\t_____|_____|_____')\n print('\\t | |')\n print('\\t {} | {} | {}'.format(board[3], board[4], board[5]))\n print('\\t_____|_____|_____')\n print('\\t | |')\n print('\\t {} | {} | {}'.format(board[6], board[7], board[8]))\n print('\\t | |')\n\n\ndef choose_who_first(player1, player2, player_order):\n flip = random.randint(1, 2)\n if flip == 1:\n print('\\n' + player1, 'goes first.', player1,\n 'can only play odd numbers and', player2,\n 'can only play even numbers from 1-9. ')\n print()\n player_order.append(player1)\n player_order.append(player2)\n return player1\n elif flip == 2:\n print('\\n' + player2, 'goes first.', player2,\n 'can only play odd numbers and', name1,\n 'can only play even numbers from 1-9. ')\n print()\n player_order.append(player2)\n player_order.append(player1)\n return player2\n\n\ndef make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order):\n odd_moves = [1, 3, 5, 7, 9]\n even_moves = [2, 4, 6, 8]\n try:\n if turn == player1:\n print('\\nIts your turn', player1 + ': ')\n print()\n p1_move_input = int(input('Move to which space? (1-9): '))\n if player_order[0] == player1:\n if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1\n ] == 0:\n print()\n p1_num_input = int(input('Enter an ODD NUMBER from 1-9: '))\n if (p1_num_input in odd_moves and p1_num_input not in\n unavailable_moves_p1):\n the_board[p1_move_input - 1] = p1_num_input\n unavailable_moves_p1.append(p1_num_input)\n elif p1_num_input in unavailable_moves_p1:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an ODD number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p1_move_input < 1 or p1_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n elif player_order[1] == player1:\n if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1\n ] == 0:\n print()\n p1_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))\n if (p1_num_input in even_moves and p1_num_input not in\n unavailable_moves_p1):\n the_board[p1_move_input - 1] = p1_num_input\n unavailable_moves_p1.append(p1_num_input)\n elif p1_num_input in unavailable_moves_p1:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter a EVEN number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p1_move_input < 1 or p1_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n if turn == player2:\n print('\\nIts your turn', player2 + ': ')\n print()\n p2_move_input = int(input('Move to which space? (1-9): '))\n if player_order[0] == player2:\n if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1\n ] == 0:\n print()\n p2_num_input = int(input('Enter an ODD NUMBER from 1-9: '))\n if (p2_num_input in odd_moves and p2_num_input not in\n unavailable_moves_p2):\n the_board[p2_move_input - 1] = p2_num_input\n unavailable_moves_p2.append(p2_num_input)\n elif p2_num_input in unavailable_moves_p2:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an ODD number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p2_move_input < 1 or p2_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n elif player_order[1] == player2:\n if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1\n ] == 0:\n print()\n p2_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))\n if (p2_num_input in even_moves and p2_num_input not in\n unavailable_moves_p2):\n the_board[p2_move_input - 1] = p2_num_input\n unavailable_moves_p2.append(p2_num_input)\n elif p2_num_input in unavailable_moves_p2:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter a EVEN number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p2_move_input < 1 or p2_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n except ValueError:\n print('\\nINVALID INPUT, Please try again and enter only in integers. ')\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order)\n\n\ndef check_game(board, winner):\n is_game_over = ''\n if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1\n ] != 0 and board[2] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[1]) + ',', 'and', str(\n board[2]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4\n ] != 0 and board[5] != 0:\n print_board(board)\n print('\\n' + str(board[3]) + ',', str(board[4]) + ',', 'and', str(\n board[5]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[6]) + ',', str(board[7]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3\n ] != 0 and board[6] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[3]) + ',', 'and', str(\n board[6]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4\n ] != 0 and board[7] != 0:\n print_board(board)\n print('\\n' + str(board[1]) + ',', str(board[4]) + ',', 'and', str(\n board[7]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[2]) + ',', str(board[5]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4\n ] != 0 and board[2] != 0:\n print_board(board)\n print('\\n' + str(board[6]) + ',', str(board[4]) + ',', 'and', str(\n board[2]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[4]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n return is_game_over\n\n\ndef score(score1, score2, player1, player2):\n print('\\n\\t------------------')\n print('\\t SCOREBOARD')\n print('\\t------------------')\n print('\\t' + ' ' + player1 + ':', score1)\n print('\\t' + ' ' + player2 + ':', score2)\n print('\\t------------------')\n print()\n\n\ndef play_game(score1, score2, player1, player2):\n unavailable_moves_p1 = []\n unavailable_moves_p2 = []\n player_order = []\n the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n count = 0\n restart = ''\n turn = choose_who_first(player1, player2, player_order)\n input('Enter anything to start the round: ')\n for i in range(10):\n print_board(the_board)\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order)\n count += 1\n if check_game(the_board, turn):\n if turn == player1:\n score1 += 1\n elif turn == player2:\n score2 += 1\n break\n if count == 9:\n print(\"No numbers added up to 15, it's a DRAW! \")\n break\n if turn == player1:\n turn = player2\n else:\n turn = player1\n input('\\nEnter anything to continue: ')\n score(score1, score2, player1, player2)\n while (restart != 'yes' or restart != 'y' or restart != 'n' or restart !=\n 'no'):\n restart = input('Do want to play Again? (y/n) ').lower()\n if restart == 'y' or restart == 'yes':\n print('\\nLoading new round...')\n play_game(score1, score2, player1, player2)\n elif restart == 'n' or restart == 'no':\n if score1 > score2:\n print('\\n' + player1, 'is the overall winner! Congratulations!'\n )\n elif score2 > score1:\n print('\\n' + player2, 'is the overall winner! Congratulations!'\n )\n elif score1 == score2:\n print(\n \"\\nBoth players have one the same amount of rounds. It's a draw! \"\n )\n print('\\nThanks for playing! ')\n break\n else:\n print('\\nPlease enter YES or NO ')\n print()\n\n\nif __name__ == '__main__':\n intro()\n input('Enter anything to continue: ')\n print('\\nEnter usernames: ')\n name1 = input('\\nPlayer 1, Enter your name: ').title()\n name2 = input('\\nPlayer 2, Enter your name: ').title()\n p1_score = 0\n p2_score = 0\n play_game(p1_score, p2_score, name1, name2)\n",
"step-4": "import random\n\n\ndef intro():\n print(\n \"\"\"\n####### ####### ####### # ####### \n # # #### # ## #### # #### ###### ## # \n # # # # # # # # # # # # # # # # \n # # # ##### # # # # ##### # # # ##### # ###### \n # # # # ###### # # # # # # # \n # # # # # # # # # # # # # # # # \n # # #### # # # #### # #### ###### ##### ##### \n\nHow to play Tic-Tac-Toe 15: \n\nTo win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers. \n\nBoard Instructions: Tell the program the position of which you would like to enter by entering the number position of \nthe boxes as shown below. Players can can only enter from numbers from 1-9. \n\n | |\n\t 1 | 2 | 3\n\t_____|_____|_____\n\t | |\n\t 4 | 5 | 6\n\t_____|_____|_____\n\t | |\n\t 7 | 8 | 9\n\t | |\n \"\"\"\n )\n\n\ndef print_board(board):\n print('\\n\\t | |')\n print('\\t {} | {} | {}'.format(board[0], board[1], board[2]))\n print('\\t_____|_____|_____')\n print('\\t | |')\n print('\\t {} | {} | {}'.format(board[3], board[4], board[5]))\n print('\\t_____|_____|_____')\n print('\\t | |')\n print('\\t {} | {} | {}'.format(board[6], board[7], board[8]))\n print('\\t | |')\n\n\ndef choose_who_first(player1, player2, player_order):\n flip = random.randint(1, 2)\n if flip == 1:\n print('\\n' + player1, 'goes first.', player1,\n 'can only play odd numbers and', player2,\n 'can only play even numbers from 1-9. ')\n print()\n player_order.append(player1)\n player_order.append(player2)\n return player1\n elif flip == 2:\n print('\\n' + player2, 'goes first.', player2,\n 'can only play odd numbers and', name1,\n 'can only play even numbers from 1-9. ')\n print()\n player_order.append(player2)\n player_order.append(player1)\n return player2\n\n\ndef make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order):\n odd_moves = [1, 3, 5, 7, 9]\n even_moves = [2, 4, 6, 8]\n try:\n if turn == player1:\n print('\\nIts your turn', player1 + ': ')\n print()\n p1_move_input = int(input('Move to which space? (1-9): '))\n if player_order[0] == player1:\n if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1\n ] == 0:\n print()\n p1_num_input = int(input('Enter an ODD NUMBER from 1-9: '))\n if (p1_num_input in odd_moves and p1_num_input not in\n unavailable_moves_p1):\n the_board[p1_move_input - 1] = p1_num_input\n unavailable_moves_p1.append(p1_num_input)\n elif p1_num_input in unavailable_moves_p1:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an ODD number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p1_move_input < 1 or p1_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n elif player_order[1] == player1:\n if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1\n ] == 0:\n print()\n p1_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))\n if (p1_num_input in even_moves and p1_num_input not in\n unavailable_moves_p1):\n the_board[p1_move_input - 1] = p1_num_input\n unavailable_moves_p1.append(p1_num_input)\n elif p1_num_input in unavailable_moves_p1:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter a EVEN number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p1_move_input < 1 or p1_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n if turn == player2:\n print('\\nIts your turn', player2 + ': ')\n print()\n p2_move_input = int(input('Move to which space? (1-9): '))\n if player_order[0] == player2:\n if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1\n ] == 0:\n print()\n p2_num_input = int(input('Enter an ODD NUMBER from 1-9: '))\n if (p2_num_input in odd_moves and p2_num_input not in\n unavailable_moves_p2):\n the_board[p2_move_input - 1] = p2_num_input\n unavailable_moves_p2.append(p2_num_input)\n elif p2_num_input in unavailable_moves_p2:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an ODD number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p2_move_input < 1 or p2_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n elif player_order[1] == player2:\n if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1\n ] == 0:\n print()\n p2_num_input = int(input('Enter a EVEN NUMBER from 1-9: '))\n if (p2_num_input in even_moves and p2_num_input not in\n unavailable_moves_p2):\n the_board[p2_move_input - 1] = p2_num_input\n unavailable_moves_p2.append(p2_num_input)\n elif p2_num_input in unavailable_moves_p2:\n print(\n \"\"\"\nINVALID INPUT, Please try again and enter a number that you haven't used. \"\"\"\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n else:\n print(\n '\\nINVALID INPUT, Please try again and enter a EVEN number. '\n )\n make_move_and_update(the_board, turn, player1,\n player2, unavailable_moves_p1,\n unavailable_moves_p2, player_order)\n elif p2_move_input < 1 or p2_move_input > 9:\n print(\n '\\nINVALID INPUT, Please try again and enter a number between 1-9. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n else:\n print(\n '\\nINVALID, Please try again and enter an unoccupied spot. '\n )\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2,\n player_order)\n except ValueError:\n print('\\nINVALID INPUT, Please try again and enter only in integers. ')\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order)\n\n\ndef check_game(board, winner):\n is_game_over = ''\n if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1\n ] != 0 and board[2] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[1]) + ',', 'and', str(\n board[2]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4\n ] != 0 and board[5] != 0:\n print_board(board)\n print('\\n' + str(board[3]) + ',', str(board[4]) + ',', 'and', str(\n board[5]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[6]) + ',', str(board[7]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3\n ] != 0 and board[6] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[3]) + ',', 'and', str(\n board[6]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4\n ] != 0 and board[7] != 0:\n print_board(board)\n print('\\n' + str(board[1]) + ',', str(board[4]) + ',', 'and', str(\n board[7]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[2]) + ',', str(board[5]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4\n ] != 0 and board[2] != 0:\n print_board(board)\n print('\\n' + str(board[6]) + ',', str(board[4]) + ',', 'and', str(\n board[2]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4\n ] != 0 and board[8] != 0:\n print_board(board)\n print('\\n' + str(board[0]) + ',', str(board[4]) + ',', 'and', str(\n board[8]), 'add up to 15! ')\n print('\\n' + winner, 'wins! ')\n is_game_over = True\n return is_game_over\n\n\ndef score(score1, score2, player1, player2):\n print('\\n\\t------------------')\n print('\\t SCOREBOARD')\n print('\\t------------------')\n print('\\t' + ' ' + player1 + ':', score1)\n print('\\t' + ' ' + player2 + ':', score2)\n print('\\t------------------')\n print()\n\n\ndef play_game(score1, score2, player1, player2):\n unavailable_moves_p1 = []\n unavailable_moves_p2 = []\n player_order = []\n the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n count = 0\n restart = ''\n turn = choose_who_first(player1, player2, player_order)\n input('Enter anything to start the round: ')\n for i in range(10):\n print_board(the_board)\n make_move_and_update(the_board, turn, player1, player2,\n unavailable_moves_p1, unavailable_moves_p2, player_order)\n count += 1\n if check_game(the_board, turn):\n if turn == player1:\n score1 += 1\n elif turn == player2:\n score2 += 1\n break\n if count == 9:\n print(\"No numbers added up to 15, it's a DRAW! \")\n break\n if turn == player1:\n turn = player2\n else:\n turn = player1\n input('\\nEnter anything to continue: ')\n score(score1, score2, player1, player2)\n while (restart != 'yes' or restart != 'y' or restart != 'n' or restart !=\n 'no'):\n restart = input('Do want to play Again? (y/n) ').lower()\n if restart == 'y' or restart == 'yes':\n print('\\nLoading new round...')\n play_game(score1, score2, player1, player2)\n elif restart == 'n' or restart == 'no':\n if score1 > score2:\n print('\\n' + player1, 'is the overall winner! Congratulations!'\n )\n elif score2 > score1:\n print('\\n' + player2, 'is the overall winner! Congratulations!'\n )\n elif score1 == score2:\n print(\n \"\\nBoth players have one the same amount of rounds. It's a draw! \"\n )\n print('\\nThanks for playing! ')\n break\n else:\n print('\\nPlease enter YES or NO ')\n print()\n\n\nif __name__ == '__main__':\n intro()\n input('Enter anything to continue: ')\n print('\\nEnter usernames: ')\n name1 = input('\\nPlayer 1, Enter your name: ').title()\n name2 = input('\\nPlayer 2, Enter your name: ').title()\n p1_score = 0\n p2_score = 0\n play_game(p1_score, p2_score, name1, name2)\n",
"step-5": "# Name: Calvin Liew\r\n# Date: 2021-01-29\r\n# Purpose: Video game final project, Tic-Tac-Toe 15 by Calvin Liew.\r\n\r\nimport random\r\n\r\n\r\n# Function that reminds the users of the game rules and other instructions.\r\n\r\ndef intro():\r\n print(\"\"\"\\n####### ####### ####### # ####### \r\n # # #### # ## #### # #### ###### ## # \r\n # # # # # # # # # # # # # # # # \r\n # # # ##### # # # # ##### # # # ##### # ###### \r\n # # # # ###### # # # # # # # \r\n # # # # # # # # # # # # # # # # \r\n # # #### # # # #### # #### ###### ##### ##### \r\n\r\nHow to play Tic-Tac-Toe 15: \r\n\r\nTo win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers. \r\n\r\nBoard Instructions: Tell the program the position of which you would like to enter by entering the number position of \r\nthe boxes as shown below. Players can can only enter from numbers from 1-9. \r\n\r\n | |\r\n\t 1 | 2 | 3\r\n\t_____|_____|_____\r\n\t | |\r\n\t 4 | 5 | 6\r\n\t_____|_____|_____\r\n\t | |\r\n\t 7 | 8 | 9\r\n\t | |\r\n \"\"\")\r\n\r\n\r\n# Function that prints the tic-tac-toe board.\r\n\r\ndef print_board(board):\r\n print(\"\\n\\t | |\")\r\n print(\"\\t {} | {} | {}\".format(board[0], board[1], board[2]))\r\n print('\\t_____|_____|_____')\r\n\r\n print(\"\\t | |\")\r\n print(\"\\t {} | {} | {}\".format(board[3], board[4], board[5]))\r\n print('\\t_____|_____|_____')\r\n\r\n print(\"\\t | |\")\r\n\r\n print(\"\\t {} | {} | {}\".format(board[6], board[7], board[8]))\r\n print(\"\\t | |\")\r\n\r\n\r\n# Function that chooses who goes first and assigns the player order.\r\n\r\ndef choose_who_first(player1, player2, player_order):\r\n flip = random.randint(1, 2)\r\n\r\n if flip == 1:\r\n print(\"\\n\" + player1, \"goes first.\", player1, \"can only play odd numbers and\", player2,\r\n \"can only play even numbers from 1-9. \")\r\n print()\r\n player_order.append(player1)\r\n player_order.append(player2)\r\n return player1\r\n\r\n elif flip == 2:\r\n print(\"\\n\" + player2, \"goes first.\", player2, \"can only play odd numbers and\", name1,\r\n \"can only play even numbers from 1-9. \")\r\n print()\r\n player_order.append(player2)\r\n player_order.append(player1)\r\n return player2\r\n\r\n\r\n# Function that calls the print_board() function as well as makes the moves that the players provide while checking if the moves are legal or not.\r\n\r\ndef make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order):\r\n odd_moves = [1, 3, 5, 7, 9]\r\n even_moves = [2, 4, 6, 8]\r\n\r\n try:\r\n if turn == player1:\r\n\r\n print(\"\\nIts your turn\", player1 + \": \")\r\n print()\r\n p1_move_input = int(input(\"Move to which space? (1-9): \"))\r\n\r\n if player_order[0] == player1:\r\n if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1] == 0:\r\n print()\r\n p1_num_input = int(input(\"Enter an ODD NUMBER from 1-9: \"))\r\n\r\n if p1_num_input in odd_moves and p1_num_input not in unavailable_moves_p1:\r\n the_board[p1_move_input - 1] = p1_num_input\r\n unavailable_moves_p1.append(p1_num_input)\r\n elif p1_num_input in unavailable_moves_p1:\r\n print(\"\\nINVALID INPUT, Please try again and enter a number that you haven't used. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n else:\r\n print(\"\\nINVALID INPUT, Please try again and enter an ODD number. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n elif p1_move_input < 1 or p1_move_input > 9:\r\n print(\"\\nINVALID INPUT, Please try again and enter a number between 1-9. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n else:\r\n print(\"\\nINVALID INPUT, Please try again and enter an unoccupied spot. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n\r\n elif player_order[1] == player1:\r\n if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1] == 0:\r\n print()\r\n p1_num_input = int(input(\"Enter a EVEN NUMBER from 1-9: \"))\r\n\r\n if p1_num_input in even_moves and p1_num_input not in unavailable_moves_p1:\r\n the_board[p1_move_input - 1] = p1_num_input\r\n unavailable_moves_p1.append(p1_num_input)\r\n elif p1_num_input in unavailable_moves_p1:\r\n print(\"\\nINVALID INPUT, Please try again and enter a number that you haven't used. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n else:\r\n print(\"\\nINVALID INPUT, Please try again and enter a EVEN number. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n elif p1_move_input < 1 or p1_move_input > 9:\r\n print(\"\\nINVALID INPUT, Please try again and enter a number between 1-9. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n else:\r\n print(\"\\nINVALID INPUT, Please try again and enter an unoccupied spot. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n\r\n if turn == player2:\r\n\r\n print(\"\\nIts your turn\", player2 + \": \")\r\n print()\r\n p2_move_input = int(input(\"Move to which space? (1-9): \"))\r\n\r\n if player_order[0] == player2:\r\n if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1] == 0:\r\n print()\r\n p2_num_input = int(input(\"Enter an ODD NUMBER from 1-9: \"))\r\n\r\n if p2_num_input in odd_moves and p2_num_input not in unavailable_moves_p2:\r\n the_board[p2_move_input - 1] = p2_num_input\r\n unavailable_moves_p2.append(p2_num_input)\r\n elif p2_num_input in unavailable_moves_p2:\r\n print(\"\\nINVALID INPUT, Please try again and enter a number that you haven't used. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n else:\r\n print(\"\\nINVALID INPUT, Please try again and enter an ODD number. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n elif p2_move_input < 1 or p2_move_input > 9:\r\n print(\"\\nINVALID INPUT, Please try again and enter a number between 1-9. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n else:\r\n print(\"\\nINVALID INPUT, Please try again and enter an unoccupied spot. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n\r\n elif player_order[1] == player2:\r\n if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1] == 0:\r\n print()\r\n p2_num_input = int(input(\"Enter a EVEN NUMBER from 1-9: \"))\r\n\r\n if p2_num_input in even_moves and p2_num_input not in unavailable_moves_p2:\r\n the_board[p2_move_input - 1] = p2_num_input\r\n unavailable_moves_p2.append(p2_num_input)\r\n elif p2_num_input in unavailable_moves_p2:\r\n print(\"\\nINVALID INPUT, Please try again and enter a number that you haven't used. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n else:\r\n print(\"\\nINVALID INPUT, Please try again and enter a EVEN number. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n elif p2_move_input < 1 or p2_move_input > 9:\r\n print(\"\\nINVALID INPUT, Please try again and enter a number between 1-9. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n else:\r\n print(\"\\nINVALID, Please try again and enter an unoccupied spot. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n\r\n except ValueError:\r\n print(\"\\nINVALID INPUT, Please try again and enter only in integers. \")\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n\r\n\r\n# Function that checks if any three numbers in a row/column/diagonal add up to 15. If there is, the function returns is_game_over and the game ends.\r\n\r\ndef check_game(board, winner):\r\n is_game_over = \"\"\r\n\r\n if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1] != 0 and board[2] != 0:\r\n print_board(board)\r\n print(\"\\n\"+str(board[0])+\",\", str(board[1])+\",\", \"and\", str(board[2]), \"add up to 15! \")\r\n print(\"\\n\"+winner, \"wins! \")\r\n is_game_over = True\r\n elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4] != 0 and board[5] != 0:\r\n print_board(board)\r\n print(\"\\n\"+str(board[3])+\",\", str(board[4])+\",\", \"and\", str(board[5]), \"add up to 15! \")\r\n print(\"\\n\"+winner, \"wins! \")\r\n is_game_over = True\r\n elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7] != 0 and board[8] != 0:\r\n print_board(board)\r\n print(\"\\n\"+str(board[6])+\",\", str(board[7])+\",\", \"and\", str(board[8]), \"add up to 15! \")\r\n print(\"\\n\"+winner, \"wins! \")\r\n is_game_over = True\r\n elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3] != 0 and board[6] != 0:\r\n print_board(board)\r\n print(\"\\n\"+str(board[0])+\",\", str(board[3])+\",\", \"and\", str(board[6]), \"add up to 15! \")\r\n print(\"\\n\"+winner, \"wins! \")\r\n is_game_over = True\r\n elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4] != 0 and board[7] != 0:\r\n print_board(board)\r\n print(\"\\n\"+str(board[1])+\",\", str(board[4])+\",\", \"and\", str(board[7]), \"add up to 15! \")\r\n print(\"\\n\"+winner, \"wins! \")\r\n is_game_over = True\r\n elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5] != 0 and board[8] != 0:\r\n print_board(board)\r\n print(\"\\n\"+str(board[2])+\",\", str(board[5])+\",\", \"and\", str(board[8]), \"add up to 15! \")\r\n print(\"\\n\"+winner, \"wins! \")\r\n is_game_over = True\r\n elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4] != 0 and board[2] != 0:\r\n print_board(board)\r\n print(\"\\n\"+str(board[6])+\",\", str(board[4])+\",\", \"and\", str(board[2]), \"add up to 15! \")\r\n print(\"\\n\"+winner, \"wins! \")\r\n is_game_over = True\r\n elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4] != 0 and board[8] != 0:\r\n print_board(board)\r\n print(\"\\n\"+str(board[0])+\",\", str(board[4])+\",\", \"and\", str(board[8]), \"add up to 15! \")\r\n print(\"\\n\"+winner, \"wins! \")\r\n is_game_over = True\r\n\r\n return is_game_over\r\n\r\n\r\n# Function that prints the scoreboard and the scores of the two players. Prints after a round has ended.\r\n\r\ndef score(score1, score2, player1, player2):\r\n print(\"\\n\\t------------------\")\r\n print(\"\\t SCOREBOARD\")\r\n print(\"\\t------------------\")\r\n print(\"\\t\" + \" \" + player1 + \":\", score1)\r\n print(\"\\t\" + \" \" + player2 + \":\", score2)\r\n print(\"\\t------------------\")\r\n print()\r\n\r\n\r\n# Function that is where most of the game takes place. Function calls other functions such as make_move_and_update, choose_who_first, score and other code that make up the game.\r\n# Function keeps track of the player order, the board, unavailable moves, amount of rounds and other variables. The game ends in a draw when count reaches 9. At the end of the round, it asks the users if they want to play again.\r\n\r\ndef play_game(score1, score2, player1, player2):\r\n unavailable_moves_p1 = []\r\n unavailable_moves_p2 = []\r\n player_order = []\r\n the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n count = 0\r\n restart = \"\"\r\n\r\n turn = choose_who_first(player1, player2, player_order)\r\n\r\n input(\"Enter anything to start the round: \")\r\n\r\n for i in range(10):\r\n\r\n print_board(the_board)\r\n make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)\r\n count += 1\r\n\r\n if check_game(the_board, turn):\r\n if turn == player1:\r\n score1 += 1\r\n elif turn == player2:\r\n score2 += 1\r\n break\r\n\r\n if count == 9:\r\n print(\"No numbers added up to 15, it's a DRAW! \")\r\n break\r\n\r\n if turn == player1:\r\n turn = player2\r\n else:\r\n turn = player1\r\n\r\n input(\"\\nEnter anything to continue: \")\r\n score(score1, score2, player1, player2)\r\n\r\n# Asks if the players want to restart. If yes, it calls the play_game function. If no, it ends the game and congratulates the overall winner.\r\n\r\n while restart != \"yes\" or restart != \"y\" or restart != \"n\" or restart != \"no\":\r\n restart = input(\"Do want to play Again? (y/n) \").lower()\r\n if restart == \"y\" or restart == \"yes\":\r\n print(\"\\nLoading new round...\")\r\n play_game(score1, score2, player1, player2)\r\n elif restart == \"n\" or restart == \"no\":\r\n if score1 > score2:\r\n print(\"\\n\"+player1, \"is the overall winner! Congratulations!\")\r\n elif score2 > score1:\r\n print(\"\\n\"+player2, \"is the overall winner! Congratulations!\")\r\n elif score1 == score2:\r\n print(\"\\nBoth players have one the same amount of rounds. It's a draw! \")\r\n print(\"\\nThanks for playing! \")\r\n break\r\n else:\r\n print(\"\\nPlease enter YES or NO \")\r\n print()\r\n\r\n\r\n# This code manages the important things before the actual game starts such as the instructions, usernames, etc. Calls the play_game function.\r\n\r\nif __name__ == \"__main__\":\r\n intro()\r\n\r\n input(\"Enter anything to continue: \")\r\n\r\n print(\"\\nEnter usernames: \")\r\n name1 = input(\"\\nPlayer 1, Enter your name: \").title()\r\n name2 = input(\"\\nPlayer 2, Enter your name: \").title()\r\n\r\n p1_score = 0\r\n p2_score = 0\r\n play_game(p1_score, p2_score, name1, name2)\r\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from subprocess import call
def query_DB_satellites(outputpath="../data/", user="anonimo", passwd="secreto"):
"""
Queries the multidark database to extract all the haloes in the box within a ID range.
The output is stored as an ascii (CSV) file.
"""
#define the output file
outputfile=outputpath+"milky_way_satellites.csv"
# Build the SQL query
query = "with milky_way_halos as (select * from Bolshoi..BDMW where snapnum=416 and Mvir > 5.0E11 and Mvir < 6.0E11 ) select sub.* from milky_way_halos mwh, Bolshoi..BDMW sub where sub.snapnum = 416 and sub.hostFlag = mwh.bdmId"
# Build the wget command to query the database
website = "http://wget.multidark.org/MyDB?action=doQuery&SQL="
username = user
password = passwd
wget_options=" --content-disposition --cookies=on --keep-session-cookies --save-cookies=cookie.txt --load-cookies=cookie.txt --auth-no-challenge"
wget_options=wget_options+" -O "+outputfile +" "
wget_command="wget --http-user="+username+" --http-passwd="+password+" "+wget_options
command=wget_command + "\""+ website + query+"\""
print ""
print query
print ""
print command
print ""
# execute wget in shell
retcode = call(command,shell=True)
query_DB_satellites(user="x", passwd="x")
|
normal
|
{
"blob_id": "f1021bfbf11886a01a84033b880d648c3286856b",
"index": 4311,
"step-1": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python\nfrom subprocess import call\n\n\ndef query_DB_satellites(outputpath=\"../data/\", user=\"anonimo\", passwd=\"secreto\"):\n \"\"\"\n Queries the multidark database to extract all the haloes in the box within a ID range.\n\n The output is stored as an ascii (CSV) file.\n \"\"\"\n #define the output file\n outputfile=outputpath+\"milky_way_satellites.csv\"\n # Build the SQL query\n \n query = \"with milky_way_halos as (select * from Bolshoi..BDMW where snapnum=416 and Mvir > 5.0E11 and Mvir < 6.0E11 ) select sub.* from milky_way_halos mwh, Bolshoi..BDMW sub where sub.snapnum = 416 and sub.hostFlag = mwh.bdmId\"\n\n # Build the wget command to query the database\n website = \"http://wget.multidark.org/MyDB?action=doQuery&SQL=\"\n username = user\n password = passwd\n \n wget_options=\" --content-disposition --cookies=on --keep-session-cookies --save-cookies=cookie.txt --load-cookies=cookie.txt --auth-no-challenge\" \n wget_options=wget_options+\" -O \"+outputfile +\" \"\n wget_command=\"wget --http-user=\"+username+\" --http-passwd=\"+password+\" \"+wget_options \n command=wget_command + \"\\\"\"+ website + query+\"\\\"\"\n print \"\"\n print query\n print \"\"\n print command\n print \"\"\n # execute wget in shell\n retcode = call(command,shell=True)\n\nquery_DB_satellites(user=\"x\", passwd=\"x\")",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Helper:
def __init__(self):
self.commands = ["help",
"lottery",
"poll",
"polling",
"prophecy",
"roll",
"team",
"ub"]
self.commands_full = ["help [command]",
"lottery [seconds]",
"poll",
"polling [poll number]",
"prophecy",
"roll [number]",
"team [type]",
"ub [role]"]
self.command_dict = {}
self.init_command_info()
# noinspection PyMethodMayBeStatic
def display_help(self):
result = "Help has been requested?\n\n" \
"__**Syntax to Summon Me**__\n" \
"Arise! [command] [options (optional)]\n\n" \
"__**Available Commands**__\n"
for com in self.commands_full:
result += f"{com}\n"
result += "\nIf you want more info on a specific command, " \
"use the command \"help\" followed by a command of your choice. " \
"**For example: Arise! help roll**" \
"\nI'm sure Azir will be glad to help you out... I love him so much..."
return result
# noinspection PyMethodMayBeStatic
def display_command(self, command):
if command not in self.commands:
return "That command doesn't exist :/"
result = f"__**Command: {command[0].upper()}{command[1:]}**__\n\n"
result += self.command_dict[command]
return result
# noinspection PyMethodMayBeStatic
def init_command_info(self):
self.command_dict["help"] = "Did somebody say recursion?"
self.command_dict["lottery"] = "**Syntax:** Arise! lottery [seconds]\n\n" \
"__**Description**__\n" \
"Azir's lottery selects one lucky winner from a pool. To enter the pool, " \
"react to the lottery message with ***any*** emoji. I do not discriminate. " \
"The default timer is **60 seconds**. Upon request, a different number of " \
"seconds may be allowed."
self.command_dict["poll"] = "**Syntax:** Arise! poll\n\n" \
"__**Description**__\n" \
"You have questions and I'll help you set them up. Follow the step-by-step " \
"instructions. When you have finished them all, use the polling command to " \
"ask away."
self.command_dict["polling"] = "**Syntax:** Arise! polling [poll number]\n\n" \
"__**Description**__\n" \
"This command allows you to use the poll you've created. If you have multiple " \
"polls, you may enter a number to specify which poll. The default is the first."
self.command_dict["prophecy"] = "Prepare yourself."
self.command_dict["roll"] = "**Syntax:** Arise! roll [number]\n\n" \
"__**Description**__\n" \
"Azir needs random numbers *all* the time so he thought you may need some too. " \
"This command produces a random number from 1 to the default value of **10**. " \
"If you want to roll up to a different number, let me know."
self.command_dict["team"] = "**Syntax:** Arise! team [type]\n\n" \
"__**Description**__\n" \
"Do you want to play a team with a theme? The Shuriman Empire has just the " \
"solution for you! With 25 different groupings (wow. Wow! WOW!!), you'll be " \
"having fun forever :) The default value for [type] is **0** in which you'd " \
"get any random team. To select a team based on location, use **1**. To select " \
"a *funner* team, use **2**."
self.command_dict["ub"] = "**Syntax:** Arise! ub [role]\n\n" \
"__**Description**__\n" \
"Oh, how I love Ultimate Bravery. No one is as good at this game mode as Azir. " \
"**NO ONE!**... Ahem... So basically, you are given a random champion and a build. " \
"Here are the general guidelines:\n\n" \
"1. Don't play this alone. Azir forbids it.\n" \
"2. No rerolling if the champion or build is undesirable.\n" \
"3. Okay, rerolling is allowed is the majority of the group agrees.\n" \
"4. Feel free to use any rune page. Choose wisely.\n" \
"5.a) Build the items in the order that they've been delivered.\n" \
"5.b) The first two items are interchangeable.\n" \
"6. Try your best to win. That's the whole point of this game.\n\n" \
"The default value for [role] is **1**. To select a jungle specific build, " \
"use **2**. To select a support specific build, use **3**."
|
normal
|
{
"blob_id": "fdf76ff20260c25d95a9bf751fa78156071a7825",
"index": 7487,
"step-1": "class Helper:\n <mask token>\n <mask token>\n\n def display_command(self, command):\n if command not in self.commands:\n return \"That command doesn't exist :/\"\n result = f'__**Command: {command[0].upper()}{command[1:]}**__\\n\\n'\n result += self.command_dict[command]\n return result\n <mask token>\n",
"step-2": "class Helper:\n\n def __init__(self):\n self.commands = ['help', 'lottery', 'poll', 'polling', 'prophecy',\n 'roll', 'team', 'ub']\n self.commands_full = ['help [command]', 'lottery [seconds]', 'poll',\n 'polling [poll number]', 'prophecy', 'roll [number]',\n 'team [type]', 'ub [role]']\n self.command_dict = {}\n self.init_command_info()\n <mask token>\n\n def display_command(self, command):\n if command not in self.commands:\n return \"That command doesn't exist :/\"\n result = f'__**Command: {command[0].upper()}{command[1:]}**__\\n\\n'\n result += self.command_dict[command]\n return result\n <mask token>\n",
"step-3": "class Helper:\n\n def __init__(self):\n self.commands = ['help', 'lottery', 'poll', 'polling', 'prophecy',\n 'roll', 'team', 'ub']\n self.commands_full = ['help [command]', 'lottery [seconds]', 'poll',\n 'polling [poll number]', 'prophecy', 'roll [number]',\n 'team [type]', 'ub [role]']\n self.command_dict = {}\n self.init_command_info()\n <mask token>\n\n def display_command(self, command):\n if command not in self.commands:\n return \"That command doesn't exist :/\"\n result = f'__**Command: {command[0].upper()}{command[1:]}**__\\n\\n'\n result += self.command_dict[command]\n return result\n\n def init_command_info(self):\n self.command_dict['help'] = 'Did somebody say recursion?'\n self.command_dict['lottery'] = \"\"\"**Syntax:** Arise! lottery [seconds]\n\n__**Description**__\nAzir's lottery selects one lucky winner from a pool. To enter the pool, react to the lottery message with ***any*** emoji. I do not discriminate. The default timer is **60 seconds**. Upon request, a different number of seconds may be allowed.\"\"\"\n self.command_dict['poll'] = \"\"\"**Syntax:** Arise! poll\n\n__**Description**__\nYou have questions and I'll help you set them up. Follow the step-by-step instructions. When you have finished them all, use the polling command to ask away.\"\"\"\n self.command_dict['polling'] = \"\"\"**Syntax:** Arise! polling [poll number]\n\n__**Description**__\nThis command allows you to use the poll you've created. If you have multiple polls, you may enter a number to specify which poll. The default is the first.\"\"\"\n self.command_dict['prophecy'] = 'Prepare yourself.'\n self.command_dict['roll'] = \"\"\"**Syntax:** Arise! roll [number]\n\n__**Description**__\nAzir needs random numbers *all* the time so he thought you may need some too. This command produces a random number from 1 to the default value of **10**. If you want to roll up to a different number, let me know.\"\"\"\n self.command_dict['team'] = \"\"\"**Syntax:** Arise! team [type]\n\n__**Description**__\nDo you want to play a team with a theme? The Shuriman Empire has just the solution for you! With 25 different groupings (wow. Wow! WOW!!), you'll be having fun forever :) The default value for [type] is **0** in which you'd get any random team. To select a team based on location, use **1**. To select a *funner* team, use **2**.\"\"\"\n self.command_dict['ub'] = \"\"\"**Syntax:** Arise! ub [role]\n\n__**Description**__\nOh, how I love Ultimate Bravery. No one is as good at this game mode as Azir. **NO ONE!**... Ahem... So basically, you are given a random champion and a build. Here are the general guidelines:\n\n1. Don't play this alone. Azir forbids it.\n2. No rerolling if the champion or build is undesirable.\n3. Okay, rerolling is allowed is the majority of the group agrees.\n4. Feel free to use any rune page. Choose wisely.\n5.a) Build the items in the order that they've been delivered.\n5.b) The first two items are interchangeable.\n6. Try your best to win. That's the whole point of this game.\n\nThe default value for [role] is **1**. To select a jungle specific build, use **2**. To select a support specific build, use **3**.\"\"\"\n",
"step-4": "class Helper:\n\n def __init__(self):\n self.commands = ['help', 'lottery', 'poll', 'polling', 'prophecy',\n 'roll', 'team', 'ub']\n self.commands_full = ['help [command]', 'lottery [seconds]', 'poll',\n 'polling [poll number]', 'prophecy', 'roll [number]',\n 'team [type]', 'ub [role]']\n self.command_dict = {}\n self.init_command_info()\n\n def display_help(self):\n result = \"\"\"Help has been requested?\n\n__**Syntax to Summon Me**__\nArise! [command] [options (optional)]\n\n__**Available Commands**__\n\"\"\"\n for com in self.commands_full:\n result += f'{com}\\n'\n result += \"\"\"\nIf you want more info on a specific command, use the command \"help\" followed by a command of your choice. **For example: Arise! help roll**\nI'm sure Azir will be glad to help you out... I love him so much...\"\"\"\n return result\n\n def display_command(self, command):\n if command not in self.commands:\n return \"That command doesn't exist :/\"\n result = f'__**Command: {command[0].upper()}{command[1:]}**__\\n\\n'\n result += self.command_dict[command]\n return result\n\n def init_command_info(self):\n self.command_dict['help'] = 'Did somebody say recursion?'\n self.command_dict['lottery'] = \"\"\"**Syntax:** Arise! lottery [seconds]\n\n__**Description**__\nAzir's lottery selects one lucky winner from a pool. To enter the pool, react to the lottery message with ***any*** emoji. I do not discriminate. The default timer is **60 seconds**. Upon request, a different number of seconds may be allowed.\"\"\"\n self.command_dict['poll'] = \"\"\"**Syntax:** Arise! poll\n\n__**Description**__\nYou have questions and I'll help you set them up. Follow the step-by-step instructions. When you have finished them all, use the polling command to ask away.\"\"\"\n self.command_dict['polling'] = \"\"\"**Syntax:** Arise! polling [poll number]\n\n__**Description**__\nThis command allows you to use the poll you've created. If you have multiple polls, you may enter a number to specify which poll. The default is the first.\"\"\"\n self.command_dict['prophecy'] = 'Prepare yourself.'\n self.command_dict['roll'] = \"\"\"**Syntax:** Arise! roll [number]\n\n__**Description**__\nAzir needs random numbers *all* the time so he thought you may need some too. This command produces a random number from 1 to the default value of **10**. If you want to roll up to a different number, let me know.\"\"\"\n self.command_dict['team'] = \"\"\"**Syntax:** Arise! team [type]\n\n__**Description**__\nDo you want to play a team with a theme? The Shuriman Empire has just the solution for you! With 25 different groupings (wow. Wow! WOW!!), you'll be having fun forever :) The default value for [type] is **0** in which you'd get any random team. To select a team based on location, use **1**. To select a *funner* team, use **2**.\"\"\"\n self.command_dict['ub'] = \"\"\"**Syntax:** Arise! ub [role]\n\n__**Description**__\nOh, how I love Ultimate Bravery. No one is as good at this game mode as Azir. **NO ONE!**... Ahem... So basically, you are given a random champion and a build. Here are the general guidelines:\n\n1. Don't play this alone. Azir forbids it.\n2. No rerolling if the champion or build is undesirable.\n3. Okay, rerolling is allowed is the majority of the group agrees.\n4. Feel free to use any rune page. Choose wisely.\n5.a) Build the items in the order that they've been delivered.\n5.b) The first two items are interchangeable.\n6. Try your best to win. That's the whole point of this game.\n\nThe default value for [role] is **1**. To select a jungle specific build, use **2**. To select a support specific build, use **3**.\"\"\"\n",
"step-5": "class Helper:\r\n def __init__(self):\r\n self.commands = [\"help\",\r\n \"lottery\",\r\n \"poll\",\r\n \"polling\",\r\n \"prophecy\",\r\n \"roll\",\r\n \"team\",\r\n \"ub\"]\r\n self.commands_full = [\"help [command]\",\r\n \"lottery [seconds]\",\r\n \"poll\",\r\n \"polling [poll number]\",\r\n \"prophecy\",\r\n \"roll [number]\",\r\n \"team [type]\",\r\n \"ub [role]\"]\r\n self.command_dict = {}\r\n self.init_command_info()\r\n\r\n # noinspection PyMethodMayBeStatic\r\n def display_help(self):\r\n result = \"Help has been requested?\\n\\n\" \\\r\n \"__**Syntax to Summon Me**__\\n\" \\\r\n \"Arise! [command] [options (optional)]\\n\\n\" \\\r\n \"__**Available Commands**__\\n\"\r\n for com in self.commands_full:\r\n result += f\"{com}\\n\"\r\n\r\n result += \"\\nIf you want more info on a specific command, \" \\\r\n \"use the command \\\"help\\\" followed by a command of your choice. \" \\\r\n \"**For example: Arise! help roll**\" \\\r\n \"\\nI'm sure Azir will be glad to help you out... I love him so much...\"\r\n\r\n return result\r\n\r\n # noinspection PyMethodMayBeStatic\r\n def display_command(self, command):\r\n if command not in self.commands:\r\n return \"That command doesn't exist :/\"\r\n result = f\"__**Command: {command[0].upper()}{command[1:]}**__\\n\\n\"\r\n result += self.command_dict[command]\r\n return result\r\n\r\n # noinspection PyMethodMayBeStatic\r\n def init_command_info(self):\r\n self.command_dict[\"help\"] = \"Did somebody say recursion?\"\r\n self.command_dict[\"lottery\"] = \"**Syntax:** Arise! lottery [seconds]\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"Azir's lottery selects one lucky winner from a pool. To enter the pool, \" \\\r\n \"react to the lottery message with ***any*** emoji. I do not discriminate. \" \\\r\n \"The default timer is **60 seconds**. Upon request, a different number of \" \\\r\n \"seconds may be allowed.\"\r\n self.command_dict[\"poll\"] = \"**Syntax:** Arise! poll\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"You have questions and I'll help you set them up. Follow the step-by-step \" \\\r\n \"instructions. When you have finished them all, use the polling command to \" \\\r\n \"ask away.\"\r\n self.command_dict[\"polling\"] = \"**Syntax:** Arise! polling [poll number]\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"This command allows you to use the poll you've created. If you have multiple \" \\\r\n \"polls, you may enter a number to specify which poll. The default is the first.\"\r\n self.command_dict[\"prophecy\"] = \"Prepare yourself.\"\r\n self.command_dict[\"roll\"] = \"**Syntax:** Arise! roll [number]\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"Azir needs random numbers *all* the time so he thought you may need some too. \" \\\r\n \"This command produces a random number from 1 to the default value of **10**. \" \\\r\n \"If you want to roll up to a different number, let me know.\"\r\n self.command_dict[\"team\"] = \"**Syntax:** Arise! team [type]\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"Do you want to play a team with a theme? The Shuriman Empire has just the \" \\\r\n \"solution for you! With 25 different groupings (wow. Wow! WOW!!), you'll be \" \\\r\n \"having fun forever :) The default value for [type] is **0** in which you'd \" \\\r\n \"get any random team. To select a team based on location, use **1**. To select \" \\\r\n \"a *funner* team, use **2**.\"\r\n self.command_dict[\"ub\"] = \"**Syntax:** Arise! ub [role]\\n\\n\" \\\r\n \"__**Description**__\\n\" \\\r\n \"Oh, how I love Ultimate Bravery. No one is as good at this game mode as Azir. \" \\\r\n \"**NO ONE!**... Ahem... So basically, you are given a random champion and a build. \" \\\r\n \"Here are the general guidelines:\\n\\n\" \\\r\n \"1. Don't play this alone. Azir forbids it.\\n\" \\\r\n \"2. No rerolling if the champion or build is undesirable.\\n\" \\\r\n \"3. Okay, rerolling is allowed is the majority of the group agrees.\\n\" \\\r\n \"4. Feel free to use any rune page. Choose wisely.\\n\" \\\r\n \"5.a) Build the items in the order that they've been delivered.\\n\" \\\r\n \"5.b) The first two items are interchangeable.\\n\" \\\r\n \"6. Try your best to win. That's the whole point of this game.\\n\\n\" \\\r\n \"The default value for [role] is **1**. To select a jungle specific build, \" \\\r\n \"use **2**. To select a support specific build, use **3**.\"\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import torch
import re
import sys
import os
import shutil
import filecmp
import numpy as np
from collections import defaultdict
from shutil import copyfile
sys.path.append('../')
class BoardParser:
def __init__(self):
self.file = open('../board_output', 'rb')
self.data = None
def update(self):
s = self.file.read()
if len(s) == 200:
self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)
self.file.seek(0)
class StatusParser:
def __init__(self):
self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10))
self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1, ))
self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1, ))
self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1, ))
self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np.int32, shape=(4, ))
class Parser:
def __init__(self, filename):
self.filename = filename
self.last_update = -1
def check_update(self):
latest_update = os.path.getmtime(self.filename)
if latest_update > self.last_update:
self.last_update = latest_update
self.parse()
return True
return False
def parse(self):
score_re = 'Episode:\s*(?P<episode>\d*)\s*' \
'Score:\s*(?P<score>\d*)\s*' \
'Lines Cleared:\s*(?P<lines>\d*)'
train_re = 'Iteration:\s*(?P<iter>\d*)\s*' \
'training loss:\s*(?P<t_loss>\d*\.\d*)\s*' \
'validation loss:\s*(?P<v_loss>\d*\.\d*)±\s*(?P<v_loss_err>\d*\.\d*|nan)\s*' \
'gradient norm:\s*(?P<g_norm>\d*\.\d*)'
datasize_re = 'Training data size:\s*(?P<tsize>\d*)\s*' \
'Validation data size:\s*(?P<vsize>\d*)'
queue_re = 'Memory usage: (?P<filled>\d*) / (?P<size>\d*).*'
self.data = defaultdict(list)
size = 0
filled = 0
rm_since_last_game = 0
with open(self.filename) as f:
lc_avg_tmp = []
sc_avg_tmp = []
data_accum = 0
training = False
for line in f.readlines():
match_score_re = re.search(score_re, line)
match_train_re = re.search(train_re, line)
match_datasize_re = re.search(datasize_re, line)
match_queue_re = re.search(queue_re, line)
if match_score_re:
d = match_score_re.groupdict()
lc = int(d['lines'])
sc = int(d['score'])
self.data['line_cleared'].append(lc)
self.data['score'].append(sc)
self.data['data_accumulated'].append(data_accum)
lc_avg_tmp.append(lc)
sc_avg_tmp.append(sc)
rm_since_last_game = 0
elif match_train_re:
d = match_train_re.groupdict()
self.data['training_loss'].append(float(d['t_loss']))
self.data['validation_loss'].append(float(d['v_loss']))
if d['v_loss_err'] == 'nan':
self.data['validation_loss_err'].append(0)
else:
self.data['validation_loss_err'].append(float(d['v_loss_err']))
self.data['g_norm'].append(float(d['g_norm']))
#print(d['g_norm'])
elif match_datasize_re:
d = match_datasize_re.groupdict()
tsize = int(d['tsize'])
vsize = int(d['vsize'])
data_accum += (tsize + vsize)
elif match_queue_re:
d = match_queue_re.groupdict()
filled = int(d['filled'])
size = int(d['size'])
elif 'REMOVING UNUSED' in line:
rm_since_last_game += 1
elif 'proceed to training' in line:
training = True
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
lc_avg_tmp.clear()
else:
if self.data['line_cleared_per_train']:
self.data['line_cleared_per_train'].append(
self.data['line_cleared_per_train'][-1])
else:
self.data['line_cleared_per_train'].append((0, 0))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
sc_avg_tmp.clear()
else:
if self.data['score_per_train']:
self.data['score_per_train'].append(
self.data['score_per_train'][-1])
else:
self.data['score_per_train'].append((0, 0))
elif 'Training complete' in line:
training = False
if lc_avg_tmp:
mean = np.average(lc_avg_tmp)
std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))
self.data['line_cleared_per_train'].append((mean, std))
if sc_avg_tmp:
mean = np.average(sc_avg_tmp)
std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))
self.data['score_per_train'].append((mean, std))
if not training:
flocal = './model_checkpoint'
ftarget = '../pytorch_model/model_checkpoint'
ex_local = os.path.isfile(flocal)
ex_target = os.path.isfile(ftarget)
if ex_target and ((ex_local and not filecmp.cmp(flocal, ftarget)) or not ex_local):
copyfile(ftarget, flocal)
self.data['filled'] = filled
self.data['size'] = size
self.data['rm_since_last_game'] = rm_since_last_game
class ModelParser:
def __init__(self, distributional=True):
self.last_update = -1
self.data = {}
self.distributional = distributional
def check_update(self):
flocal = './model_checkpoint'
if os.path.isfile(flocal):
latest = os.path.getmtime(flocal)
if latest > self.last_update:
print('New model found, updating...', flush=True)
self.last_update = latest
state = torch.load(flocal, map_location=torch.device('cpu'))
model_state = state['model_state_dict']
self.parse_state(model_state)
return True
return False
def parse(self, model):
self.parse_state(model.state_dict())
def parse_state(self, model_state):
self.data = {}
for k, v in model_state.items():
if 'weight' in k:
k = k.replace('.weight', '')
k = k.replace('seq.', '')
self.data[k] = v.cpu().numpy().ravel()
|
normal
|
{
"blob_id": "3668e8009dca4ea261bdfbd325331c338fdac5a9",
"index": 627,
"step-1": "<mask token>\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n",
"step-2": "<mask token>\n\n\nclass BoardParser:\n <mask token>\n <mask token>\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n",
"step-3": "<mask token>\n\n\nclass BoardParser:\n <mask token>\n\n def update(self):\n s = self.file.read()\n if len(s) == 200:\n self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)\n self.file.seek(0)\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n",
"step-4": "<mask token>\nsys.path.append('../')\n\n\nclass BoardParser:\n\n def __init__(self):\n self.file = open('../board_output', 'rb')\n self.data = None\n\n def update(self):\n s = self.file.read()\n if len(s) == 200:\n self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)\n self.file.seek(0)\n\n\nclass StatusParser:\n\n def __init__(self):\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8,\n shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32,\n shape=(1,))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32,\n shape=(1,))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32,\n shape=(1,))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np\n .int32, shape=(4,))\n\n\nclass Parser:\n\n def __init__(self, filename):\n self.filename = filename\n self.last_update = -1\n\n def check_update(self):\n latest_update = os.path.getmtime(self.filename)\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = (\n 'Episode:\\\\s*(?P<episode>\\\\d*)\\\\s*Score:\\\\s*(?P<score>\\\\d*)\\\\s*Lines Cleared:\\\\s*(?P<lines>\\\\d*)'\n )\n train_re = (\n 'Iteration:\\\\s*(?P<iter>\\\\d*)\\\\s*training loss:\\\\s*(?P<t_loss>\\\\d*\\\\.\\\\d*)\\\\s*validation loss:\\\\s*(?P<v_loss>\\\\d*\\\\.\\\\d*)±\\\\s*(?P<v_loss_err>\\\\d*\\\\.\\\\d*|nan)\\\\s*gradient norm:\\\\s*(?P<g_norm>\\\\d*\\\\.\\\\d*)'\n )\n datasize_re = (\n 'Training data size:\\\\s*(?P<tsize>\\\\d*)\\\\s*Validation data size:\\\\s*(?P<vsize>\\\\d*)'\n )\n queue_re = 'Memory usage: (?P<filled>\\\\d*) / (?P<size>\\\\d*).*'\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d[\n 'v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += tsize + vsize\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n elif self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(self.\n data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n elif self.data['score_per_train']:\n self.data['score_per_train'].append(self.data[\n 'score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n if ex_target and (ex_local and not filecmp.cmp(flocal,\n ftarget) or not ex_local):\n copyfile(ftarget, flocal)\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n\n def __init__(self, distributional=True):\n self.last_update = -1\n self.data = {}\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n",
"step-5": "import torch\nimport re\nimport sys\nimport os\nimport shutil\nimport filecmp\nimport numpy as np\nfrom collections import defaultdict\nfrom shutil import copyfile\n\nsys.path.append('../')\n\n\nclass BoardParser:\n def __init__(self):\n\n self.file = open('../board_output', 'rb')\n\n self.data = None\n\n def update(self):\n\n s = self.file.read()\n\n if len(s) == 200:\n self.data = np.fromstring(s, dtype=np.int8).reshape(20, 10)\n\n self.file.seek(0)\n\n\nclass StatusParser:\n def __init__(self):\n\n self.board = np.memmap('../tmp/board', mode='r', dtype=np.int8, shape=(20, 10))\n self.combo = np.memmap('../tmp/combo', mode='r', dtype=np.int32, shape=(1, ))\n self.lines = np.memmap('../tmp/lines', mode='r', dtype=np.int32, shape=(1, ))\n self.score = np.memmap('../tmp/score', mode='r', dtype=np.int32, shape=(1, ))\n self.line_stats = np.memmap('../tmp/line_stats', mode='r', dtype=np.int32, shape=(4, ))\n\n\nclass Parser:\n def __init__(self, filename):\n\n self.filename = filename\n\n self.last_update = -1\n\n def check_update(self):\n\n latest_update = os.path.getmtime(self.filename)\n\n if latest_update > self.last_update:\n self.last_update = latest_update\n self.parse()\n return True\n return False\n\n def parse(self):\n score_re = 'Episode:\\s*(?P<episode>\\d*)\\s*' \\\n 'Score:\\s*(?P<score>\\d*)\\s*' \\\n 'Lines Cleared:\\s*(?P<lines>\\d*)'\n train_re = 'Iteration:\\s*(?P<iter>\\d*)\\s*' \\\n 'training loss:\\s*(?P<t_loss>\\d*\\.\\d*)\\s*' \\\n 'validation loss:\\s*(?P<v_loss>\\d*\\.\\d*)±\\s*(?P<v_loss_err>\\d*\\.\\d*|nan)\\s*' \\\n 'gradient norm:\\s*(?P<g_norm>\\d*\\.\\d*)'\n datasize_re = 'Training data size:\\s*(?P<tsize>\\d*)\\s*' \\\n 'Validation data size:\\s*(?P<vsize>\\d*)'\n queue_re = 'Memory usage: (?P<filled>\\d*) / (?P<size>\\d*).*'\n\n self.data = defaultdict(list)\n size = 0\n filled = 0\n rm_since_last_game = 0\n\n with open(self.filename) as f:\n lc_avg_tmp = []\n sc_avg_tmp = []\n data_accum = 0\n training = False\n for line in f.readlines():\n match_score_re = re.search(score_re, line)\n match_train_re = re.search(train_re, line)\n match_datasize_re = re.search(datasize_re, line)\n match_queue_re = re.search(queue_re, line)\n if match_score_re:\n d = match_score_re.groupdict()\n lc = int(d['lines'])\n sc = int(d['score'])\n self.data['line_cleared'].append(lc)\n self.data['score'].append(sc)\n self.data['data_accumulated'].append(data_accum)\n lc_avg_tmp.append(lc)\n sc_avg_tmp.append(sc)\n rm_since_last_game = 0\n elif match_train_re:\n d = match_train_re.groupdict()\n self.data['training_loss'].append(float(d['t_loss']))\n self.data['validation_loss'].append(float(d['v_loss']))\n if d['v_loss_err'] == 'nan':\n self.data['validation_loss_err'].append(0)\n else:\n self.data['validation_loss_err'].append(float(d['v_loss_err']))\n self.data['g_norm'].append(float(d['g_norm']))\n #print(d['g_norm'])\n elif match_datasize_re:\n d = match_datasize_re.groupdict()\n tsize = int(d['tsize'])\n vsize = int(d['vsize'])\n data_accum += (tsize + vsize)\n elif match_queue_re:\n d = match_queue_re.groupdict()\n filled = int(d['filled'])\n size = int(d['size'])\n elif 'REMOVING UNUSED' in line:\n rm_since_last_game += 1\n elif 'proceed to training' in line:\n training = True\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n lc_avg_tmp.clear()\n else:\n if self.data['line_cleared_per_train']:\n self.data['line_cleared_per_train'].append(\n self.data['line_cleared_per_train'][-1])\n else:\n self.data['line_cleared_per_train'].append((0, 0))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n sc_avg_tmp.clear()\n else:\n if self.data['score_per_train']:\n self.data['score_per_train'].append(\n self.data['score_per_train'][-1])\n else:\n self.data['score_per_train'].append((0, 0))\n elif 'Training complete' in line:\n training = False\n if lc_avg_tmp:\n mean = np.average(lc_avg_tmp)\n std = np.std(lc_avg_tmp) / np.sqrt(len(lc_avg_tmp))\n self.data['line_cleared_per_train'].append((mean, std))\n if sc_avg_tmp:\n mean = np.average(sc_avg_tmp)\n std = np.std(sc_avg_tmp) / np.sqrt(len(sc_avg_tmp))\n self.data['score_per_train'].append((mean, std))\n\n if not training:\n flocal = './model_checkpoint'\n ftarget = '../pytorch_model/model_checkpoint'\n\n ex_local = os.path.isfile(flocal)\n ex_target = os.path.isfile(ftarget)\n\n if ex_target and ((ex_local and not filecmp.cmp(flocal, ftarget)) or not ex_local):\n copyfile(ftarget, flocal)\n\n self.data['filled'] = filled\n self.data['size'] = size\n self.data['rm_since_last_game'] = rm_since_last_game\n\n\nclass ModelParser:\n def __init__(self, distributional=True):\n\n self.last_update = -1\n\n self.data = {}\n\n self.distributional = distributional\n\n def check_update(self):\n flocal = './model_checkpoint'\n if os.path.isfile(flocal):\n latest = os.path.getmtime(flocal)\n if latest > self.last_update:\n print('New model found, updating...', flush=True)\n self.last_update = latest\n state = torch.load(flocal, map_location=torch.device('cpu'))\n model_state = state['model_state_dict']\n self.parse_state(model_state)\n return True\n return False\n\n def parse(self, model):\n self.parse_state(model.state_dict())\n\n def parse_state(self, model_state):\n self.data = {}\n for k, v in model_state.items():\n if 'weight' in k:\n k = k.replace('.weight', '')\n k = k.replace('seq.', '')\n self.data[k] = v.cpu().numpy().ravel()\n",
"step-ids": [
11,
12,
13,
15,
17
]
}
|
[
11,
12,
13,
15,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@measure_time_of_func
def fib(n):
sequence = [1, 1]
for i in range(2, n, 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def measure_time_of_func(func):
def wrapper_func(n):
start_time = time.time()
fib_seq = func(n)
end_time = time.time()
return fib_seq, end_time - start_time
return wrapper_func
@measure_time_of_func
def fib(n):
sequence = [1, 1]
for i in range(2, n, 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence
<|reserved_special_token_1|>
import time
def measure_time_of_func(func):
def wrapper_func(n):
start_time = time.time()
fib_seq = func(n)
end_time = time.time()
return fib_seq, end_time - start_time
return wrapper_func
@measure_time_of_func
def fib(n):
sequence = [1, 1]
for i in range(2, n, 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence
<|reserved_special_token_1|>
import time
# Decorator
def measure_time_of_func(func):
def wrapper_func(n):
start_time = time.time()
fib_seq = func(n)
end_time = time.time()
return (fib_seq, end_time - start_time)
return wrapper_func
# Returns a list with first n numbers of fibonacci sequence.
@measure_time_of_func
def fib(n):
sequence = [1, 1]
for i in range(2, n, 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence
|
flexible
|
{
"blob_id": "2c39660da8fe839c4634cd73ce069acc7b1b29b4",
"index": 51,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n",
"step-3": "<mask token>\n\n\ndef measure_time_of_func(func):\n\n def wrapper_func(n):\n start_time = time.time()\n fib_seq = func(n)\n end_time = time.time()\n return fib_seq, end_time - start_time\n return wrapper_func\n\n\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n",
"step-4": "import time\n\n\ndef measure_time_of_func(func):\n\n def wrapper_func(n):\n start_time = time.time()\n fib_seq = func(n)\n end_time = time.time()\n return fib_seq, end_time - start_time\n return wrapper_func\n\n\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n",
"step-5": "import time\n\n\n# Decorator\ndef measure_time_of_func(func):\n def wrapper_func(n):\n start_time = time.time()\n fib_seq = func(n)\n end_time = time.time()\n return (fib_seq, end_time - start_time)\n\n return wrapper_func\n\n\n# Returns a list with first n numbers of fibonacci sequence.\n@measure_time_of_func\ndef fib(n):\n sequence = [1, 1]\n for i in range(2, n, 1):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n return sequence\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(N):
x[i], y[i], z[i] = (int(x) for x in input().split())
<|reserved_special_token_0|>
for sx in (-1, 1):
for sy in (-1, 1):
for sz in (-1, 1):
_x, _y, _z = sx * x, sy * y, sz * z
T = np.sort(_x + _y + _z)[::-1][:M].sum()
temp.append(T)
print(max(temp))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
N, M = (int(x) for x in input().split())
x, y, z = np.zeros(N, dtype=int), np.zeros(N, dtype=int), np.zeros(N, dtype=int
)
for i in range(N):
x[i], y[i], z[i] = (int(x) for x in input().split())
temp = []
for sx in (-1, 1):
for sy in (-1, 1):
for sz in (-1, 1):
_x, _y, _z = sx * x, sy * y, sz * z
T = np.sort(_x + _y + _z)[::-1][:M].sum()
temp.append(T)
print(max(temp))
<|reserved_special_token_1|>
import numpy as np
N, M = (int(x) for x in input().split())
x, y, z = np.zeros(N, dtype=int), np.zeros(N, dtype=int), np.zeros(N, dtype=int
)
for i in range(N):
x[i], y[i], z[i] = (int(x) for x in input().split())
temp = []
for sx in (-1, 1):
for sy in (-1, 1):
for sz in (-1, 1):
_x, _y, _z = sx * x, sy * y, sz * z
T = np.sort(_x + _y + _z)[::-1][:M].sum()
temp.append(T)
print(max(temp))
|
flexible
|
{
"blob_id": "af40239551709eff02b8a1f034583ab80845d1d7",
"index": 1532,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(N):\n x[i], y[i], z[i] = (int(x) for x in input().split())\n<mask token>\nfor sx in (-1, 1):\n for sy in (-1, 1):\n for sz in (-1, 1):\n _x, _y, _z = sx * x, sy * y, sz * z\n T = np.sort(_x + _y + _z)[::-1][:M].sum()\n temp.append(T)\nprint(max(temp))\n",
"step-3": "<mask token>\nN, M = (int(x) for x in input().split())\nx, y, z = np.zeros(N, dtype=int), np.zeros(N, dtype=int), np.zeros(N, dtype=int\n )\nfor i in range(N):\n x[i], y[i], z[i] = (int(x) for x in input().split())\ntemp = []\nfor sx in (-1, 1):\n for sy in (-1, 1):\n for sz in (-1, 1):\n _x, _y, _z = sx * x, sy * y, sz * z\n T = np.sort(_x + _y + _z)[::-1][:M].sum()\n temp.append(T)\nprint(max(temp))\n",
"step-4": "import numpy as np\nN, M = (int(x) for x in input().split())\nx, y, z = np.zeros(N, dtype=int), np.zeros(N, dtype=int), np.zeros(N, dtype=int\n )\nfor i in range(N):\n x[i], y[i], z[i] = (int(x) for x in input().split())\ntemp = []\nfor sx in (-1, 1):\n for sy in (-1, 1):\n for sz in (-1, 1):\n _x, _y, _z = sx * x, sy * y, sz * z\n T = np.sort(_x + _y + _z)[::-1][:M].sum()\n temp.append(T)\nprint(max(temp))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.test import TestCase, Client
from django.contrib.auth.models import User
from blog.factories import BlogPostFactory, TagFactory
from blog.models import BlogPost
from faker import Factory
faker = Factory.create()
class ServicesTests(TestCase):
def setUp(self):
self.tag = TagFactory()
self.blog_post = BlogPostFactory()
self.client = Client()
self.user = User.objects.create_user(username=faker.name(), password='Ivoepanda')
def test_create_new_post_service_if_not_logged_user(self):
self.client.logout()
response = self.client.get('/create-new-post/')
self.assertEqual(302, response.status_code)
def test_create_new_post_service_if_logged_user(self):
self.client.login(username=self.user.username, password='Ivoepanda')
self.assertEqual(1, BlogPost.objects.get_private_posts().count())
response = self.client.post('/create-new-post/',
data={"title": faker.name(),
"content": faker.word(),
"author": self.user,
"tags": self.tag})
self.assertEqual(302, response.status_code)
self.assertEqual(2, BlogPost.objects.get_private_posts().count())
def tearDown(self):
self.client.logout()
|
normal
|
{
"blob_id": "c9d25460022bb86c821600dfaed17baa70531c9f",
"index": 7125,
"step-1": "<mask token>\n\n\nclass ServicesTests(TestCase):\n\n def setUp(self):\n self.tag = TagFactory()\n self.blog_post = BlogPostFactory()\n self.client = Client()\n self.user = User.objects.create_user(username=faker.name(),\n password='Ivoepanda')\n\n def test_create_new_post_service_if_not_logged_user(self):\n self.client.logout()\n response = self.client.get('/create-new-post/')\n self.assertEqual(302, response.status_code)\n <mask token>\n\n def tearDown(self):\n self.client.logout()\n",
"step-2": "<mask token>\n\n\nclass ServicesTests(TestCase):\n\n def setUp(self):\n self.tag = TagFactory()\n self.blog_post = BlogPostFactory()\n self.client = Client()\n self.user = User.objects.create_user(username=faker.name(),\n password='Ivoepanda')\n\n def test_create_new_post_service_if_not_logged_user(self):\n self.client.logout()\n response = self.client.get('/create-new-post/')\n self.assertEqual(302, response.status_code)\n\n def test_create_new_post_service_if_logged_user(self):\n self.client.login(username=self.user.username, password='Ivoepanda')\n self.assertEqual(1, BlogPost.objects.get_private_posts().count())\n response = self.client.post('/create-new-post/', data={'title':\n faker.name(), 'content': faker.word(), 'author': self.user,\n 'tags': self.tag})\n self.assertEqual(302, response.status_code)\n self.assertEqual(2, BlogPost.objects.get_private_posts().count())\n\n def tearDown(self):\n self.client.logout()\n",
"step-3": "<mask token>\nfaker = Factory.create()\n\n\nclass ServicesTests(TestCase):\n\n def setUp(self):\n self.tag = TagFactory()\n self.blog_post = BlogPostFactory()\n self.client = Client()\n self.user = User.objects.create_user(username=faker.name(),\n password='Ivoepanda')\n\n def test_create_new_post_service_if_not_logged_user(self):\n self.client.logout()\n response = self.client.get('/create-new-post/')\n self.assertEqual(302, response.status_code)\n\n def test_create_new_post_service_if_logged_user(self):\n self.client.login(username=self.user.username, password='Ivoepanda')\n self.assertEqual(1, BlogPost.objects.get_private_posts().count())\n response = self.client.post('/create-new-post/', data={'title':\n faker.name(), 'content': faker.word(), 'author': self.user,\n 'tags': self.tag})\n self.assertEqual(302, response.status_code)\n self.assertEqual(2, BlogPost.objects.get_private_posts().count())\n\n def tearDown(self):\n self.client.logout()\n",
"step-4": "from django.test import TestCase, Client\nfrom django.contrib.auth.models import User\nfrom blog.factories import BlogPostFactory, TagFactory\nfrom blog.models import BlogPost\nfrom faker import Factory\nfaker = Factory.create()\n\n\nclass ServicesTests(TestCase):\n\n def setUp(self):\n self.tag = TagFactory()\n self.blog_post = BlogPostFactory()\n self.client = Client()\n self.user = User.objects.create_user(username=faker.name(),\n password='Ivoepanda')\n\n def test_create_new_post_service_if_not_logged_user(self):\n self.client.logout()\n response = self.client.get('/create-new-post/')\n self.assertEqual(302, response.status_code)\n\n def test_create_new_post_service_if_logged_user(self):\n self.client.login(username=self.user.username, password='Ivoepanda')\n self.assertEqual(1, BlogPost.objects.get_private_posts().count())\n response = self.client.post('/create-new-post/', data={'title':\n faker.name(), 'content': faker.word(), 'author': self.user,\n 'tags': self.tag})\n self.assertEqual(302, response.status_code)\n self.assertEqual(2, BlogPost.objects.get_private_posts().count())\n\n def tearDown(self):\n self.client.logout()\n",
"step-5": "from django.test import TestCase, Client\nfrom django.contrib.auth.models import User\nfrom blog.factories import BlogPostFactory, TagFactory\nfrom blog.models import BlogPost\n\nfrom faker import Factory\n\nfaker = Factory.create()\n\n\nclass ServicesTests(TestCase):\n def setUp(self):\n self.tag = TagFactory()\n self.blog_post = BlogPostFactory()\n self.client = Client()\n self.user = User.objects.create_user(username=faker.name(), password='Ivoepanda')\n\n def test_create_new_post_service_if_not_logged_user(self):\n self.client.logout()\n response = self.client.get('/create-new-post/')\n self.assertEqual(302, response.status_code)\n\n def test_create_new_post_service_if_logged_user(self):\n self.client.login(username=self.user.username, password='Ivoepanda')\n self.assertEqual(1, BlogPost.objects.get_private_posts().count())\n\n response = self.client.post('/create-new-post/',\n data={\"title\": faker.name(),\n \"content\": faker.word(),\n \"author\": self.user,\n \"tags\": self.tag})\n self.assertEqual(302, response.status_code)\n self.assertEqual(2, BlogPost.objects.get_private_posts().count())\n\n def tearDown(self):\n self.client.logout()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class ProfileInline(admin.StackedInline):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class UserAdmin(BaseUserAdmin):
inlines = ProfileInline,
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(BaseUserAdmin):
inlines = ProfileInline,
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(BaseUserAdmin):
inlines = ProfileInline,
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Thread)
admin.site.register(Comment)
admin.site.register(Experience)
admin.site.register(ThreadTag)
admin.site.register(ExperienceTag)
admin.site.register(UserProfile)
admin.site.register(ExperiencesLike)
admin.site.register(ExperiencesDislike)
admin.site.register(Like)
admin.site.register(Dislike)
admin.site.register(Toolbox)
admin.site.register(ToolboxUser)
admin.site.register(Question)
admin.site.register(Answer)
<|reserved_special_token_1|>
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from app.models import *
class ProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(BaseUserAdmin):
inlines = ProfileInline,
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Thread)
admin.site.register(Comment)
admin.site.register(Experience)
admin.site.register(ThreadTag)
admin.site.register(ExperienceTag)
admin.site.register(UserProfile)
admin.site.register(ExperiencesLike)
admin.site.register(ExperiencesDislike)
admin.site.register(Like)
admin.site.register(Dislike)
admin.site.register(Toolbox)
admin.site.register(ToolboxUser)
admin.site.register(Question)
admin.site.register(Answer)
<|reserved_special_token_1|>
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from app.models import *
# Register your models here.
class ProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Thread)
admin.site.register(Comment)
admin.site.register(Experience)
admin.site.register(ThreadTag)
admin.site.register(ExperienceTag)
admin.site.register(UserProfile)
admin.site.register(ExperiencesLike)
admin.site.register(ExperiencesDislike)
admin.site.register(Like)
admin.site.register(Dislike)
admin.site.register(Toolbox)
admin.site.register(ToolboxUser)
admin.site.register(Question)
admin.site.register(Answer)
|
flexible
|
{
"blob_id": "a9f3d5f11a9f2781571029b54d54b41d9f1f83b3",
"index": 592,
"step-1": "<mask token>\n\n\nclass ProfileInline(admin.StackedInline):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_name_plural = 'profile'\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_name_plural = 'profile'\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Thread)\nadmin.site.register(Comment)\nadmin.site.register(Experience)\nadmin.site.register(ThreadTag)\nadmin.site.register(ExperienceTag)\nadmin.site.register(UserProfile)\nadmin.site.register(ExperiencesLike)\nadmin.site.register(ExperiencesDislike)\nadmin.site.register(Like)\nadmin.site.register(Dislike)\nadmin.site.register(Toolbox)\nadmin.site.register(ToolboxUser)\nadmin.site.register(Question)\nadmin.site.register(Answer)\n",
"step-4": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.models import User\nfrom app.models import *\n\n\nclass ProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_name_plural = 'profile'\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Thread)\nadmin.site.register(Comment)\nadmin.site.register(Experience)\nadmin.site.register(ThreadTag)\nadmin.site.register(ExperienceTag)\nadmin.site.register(UserProfile)\nadmin.site.register(ExperiencesLike)\nadmin.site.register(ExperiencesDislike)\nadmin.site.register(Like)\nadmin.site.register(Dislike)\nadmin.site.register(Toolbox)\nadmin.site.register(ToolboxUser)\nadmin.site.register(Question)\nadmin.site.register(Answer)\n",
"step-5": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.models import User\n\nfrom app.models import *\n\n# Register your models here.\n\nclass ProfileInline(admin.StackedInline):\n\tmodel = UserProfile\n\tcan_delete = False\n\tverbose_name_plural = 'profile'\n\nclass UserAdmin(BaseUserAdmin):\n\tinlines = (ProfileInline, )\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Thread)\nadmin.site.register(Comment)\nadmin.site.register(Experience)\nadmin.site.register(ThreadTag)\nadmin.site.register(ExperienceTag)\nadmin.site.register(UserProfile)\nadmin.site.register(ExperiencesLike)\nadmin.site.register(ExperiencesDislike)\nadmin.site.register(Like)\nadmin.site.register(Dislike)\nadmin.site.register(Toolbox)\nadmin.site.register(ToolboxUser)\nadmin.site.register(Question)\nadmin.site.register(Answer)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/software/python-2.7-2014q3-el6-x86_64/bin/python
import SNANA_Reader as simread
import REAL_Reader as dataread
#import astropy.cosmology as cosmo
import traceback
import scipy
import scipy.stats as stats
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
#import Cosmology
import scipy.stats.mstats as mstats
import scipy.stats as stats
from scipy.interpolate import UnivariateSpline
from sys import argv
import glob
import time
import os
import gzip
import shutil
import numpy.ma as ma
import subprocess
import iminuit as iM
from iminuit import Minuit as M
from discreteChi2Func import discreteChi2Func as chi2func
import pandas as pd
class Rate_Fitter:
def __init__(self, realfilename, realName, simfilename, simName, simgenfilename, MCBeta, MCK, zminSamp=0.1, zmaxSamp=1.20 , zminFit = 0.1, zmaxFit = 1.20, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95, Rate_Model = 'powerlaw', cheatType = False, cheatZ = False, cheatCCSub = False, cheatCCScale = False, cuts = None, nprint = 5, MURESCuts = None, noCCMC = False, priorRate = None, priorZEff = None, ratePriorErrUp = None, ratePriorErrDown =None, ratePriorErrAll = None, fixCCScale = False):
print "Rate_Fitter"
print "np version {0}".format(np.__version__)
self.zminSamp = zminSamp
self.zmaxSamp = zmaxSamp
self.zminFit = zminFit
self.zmaxFit = zmaxFit
self.MCBeta = MCBeta
self.MCK = MCK
self.Rate_Model = Rate_Model
self.cheatType = cheatType
self.cheatZ = cheatZ
self.cheatCCSub = cheatCCSub
self.cheatCCScale = cheatCCScale
self.cuts = cuts
self.nprint = nprint
self.MURESCuts = MURESCuts
self.priorRate = priorRate
self.priorZEff = priorZEff
self.ratePriorErrUp = ratePriorErrUp
self.ratePriorErrDown = ratePriorErrDown
self.ratePriorErrAll = ratePriorErrAll
self.fixCCScale = fixCCScale
#print "PRIORS"
#print priorRate
#print priorZEff
#print ratePriorErrUp
#print ratePriorErrDown
if self.cheatZ:
self.ztype = 'SIM_ZCMB'
else:
#self.ztype = 'zHD'
self.ztype = 'zPHOT'
self.shiftFlagData = False
self.shiftFlagSim = False
self.globalChi2Storage = []
self.globalNDataStorage = []
'''
self.globalZPhotBinStorage = []
self.globalNDataIaPhotBinStorage = []
self.globalNDataCCPhotBinStorage = []
self.globalZTrueBinStorage = []
self.globalNDataIaTrueBinStorage = []
self.globalNDataCCTrueBinStorage = []
'''
print 'a'
try:
self.simcat = simread.SNANA_Cat(simfilename, simName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95)
except:
try:
self.simcat = simread.SNANA_Cat(simfilename, simName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95, skip_header = 5)
except:
self.simcat = simread.SNANA_Cat(simfilename, simName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95, skip_header = 6)
print 'b'
self.simName = simName
self.simgencat = simread.SNANA_Cat(simfilename, simName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95)
print 'c'
try:
#with np.load(simgenfilename+'.npz', allow_pickle = True) as data0:
# SIMGEN = data0['a']
SIMGEN = np.load(simgenfilename + '.npy', allow_pickle = True)
except:
SIMGEN = np.genfromtxt(simgenfilename, dtype=None, names = True, skip_footer=3, invalid_raise=False)
print "Compress save A"
SIMGEN.dtype.names = map(str, SIMGEN.dtype.names)
#np.savez_compressed(simgenfilename+'.npz', a = SIMGEN)
np.save(simgenfilename+'.npy', SIMGEN)
print "WHY DO YOU HATE ME WHEN I SHOW YOU NOTHING BUT LOVE"
print simgenfilename
#SIMGEN = pd.read_csv(simgenfilename, delim_whitespace=True, comment="#").to_records(index = False)
print 'd'
SIMGEN = SIMGEN[SIMGEN['GENZ'] != 'GENZ']
self.simgencat.params = {'flat':True, 'H0': simH0, 'Om0':simOmegaM, 'Ob0': simOb0, 'sigma8': simSigma8, 'ns': simNs}
#self.simgencat.cosmo = Cosmology.setCosmology('simCosmo', self.simcat.params)
self.simgencat.OrigCatalog = np.copy(SIMGEN)
self.simgencat.Catalog = np.copy(SIMGEN)
self.simgencat.Catalog = self.simgencat.Catalog[self.simgencat.Catalog['GENZ'] != 'GENZ']
self.simgencat.simname = simName
self.simgencat.NSN = self.simgencat.Catalog['GENZ'].shape[2]
print "SIMGEN NUMBER"
print self.simgencat.NSN
print "TEST2"
print self.simgencat.Catalog['GENZ'].shape[0]
print self.simgencat.Catalog['GENZ'].shape[1]
print self.simgencat.Catalog['GENZ'].shape[2]
print "SIMGENCAT FILE"
print simfilename
self.realName = realName
try:
print 'q'
self.realcat = simread.SNANA_Cat(realfilename, realName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95, skip_header = 6)
except:
#self.realcat = simread.SNANA_Cat(realfilename, realName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95)
try:
print 'r'
self.realcat = simread.SNANA_Cat(realfilename, realName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95)
except:
print 's'
self.realcat = dataread.REAL_Cat(realfilename, realName, skip_header =11 )
if self.cheatType:
print "WARNING, THE FITTER IS CHEATING AND ELIMINATED NON-IAs USING SIM INFO"
self.realcat.Catalog = self.realcat.Catalog[self.realcat.Catalog['SIM_TYPE_INDEX'].astype(int) == 1]
self.simcat.Catalog = self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) == 1]
print "Pre cut Catalog"
print self.realcat.Catalog.shape
for cut in cuts:
print 'a'
print cut
print self.realcat.Catalog.shape
self.realcat.Catalog = self.realcat.Catalog[(self.realcat.Catalog[cut[0]].astype(type(cut[1])) > cut[1]) & (self.realcat.Catalog[cut[0]].astype(type(cut[2])) < cut[2])]
self.simcat.Catalog = self.simcat.Catalog[(self.simcat.Catalog[cut[0]].astype(type(cut[1])) > cut[1]) & (self.simcat.Catalog[cut[0]].astype(type(cut[2])) < cut[2])]
print 'b'
print cut
print self.realcat.Catalog.shape
self.postCutRealCat = np.copy(self.realcat.Catalog)
self.postCutSimCat = np.copy(self.simcat.Catalog)
self.realcat.Catalog = self.realcat.Catalog[(self.realcat.Catalog[self.ztype].astype(float) > self.zminSamp) & (self.realcat.Catalog[self.ztype].astype(float) < self.zmaxSamp)]
self.simcat.Catalog = self.simcat.Catalog[(self.simcat.Catalog[self.ztype].astype(float) > self.zminSamp) & (self.simcat.Catalog[self.ztype].astype(float) < self.zmaxSamp)]
print 'zCut Pre MURESCut'
print np.sum((self.realcat.Catalog[self.ztype].astype(float) > self.zminFit) & (self.realcat.Catalog[self.ztype].astype(float) < self.zmaxFit))
print 'MURESCUT'
print self.MURESCuts
print self.realcat.Catalog.shape
if not (self.MURESCuts is None):
'''
#MURES Cut format: (zmin, zmax, neg Cut, pos Cut)
for mc in self.MURESCuts:
realCond = (self.realcat.Catalog[self.ztype] < mc[0]) | (self.realcat.Catalog[self.ztype] > mc[1])| ((self.realcat.Catalog['MURES'] > mc[2])& (self.realcat.Catalog['MURES'] < mc[3]))
simCond = (self.simcat.Catalog[self.ztype] < mc[0]) | (self.simcat.Catalog[self.ztype] > mc[1])| ((self.simcat.Catalog['MURES'] > mc[2])& (self.simcat.Catalog['MURES'] < mc[3]))
self.realcat.Catalog = self.realcat.Catalog[realCond]
self.simcat.Catalog = self.simcat.Catalog[simCond]
'''
self.realcat.Catalog = self.realcat.Catalog[ np.abs( self.realcat.Catalog['MURES'] * 1.0 / self.realcat.Catalog['MUERR'] ) < MURESCuts]
self.simcat.Catalog = self.simcat.Catalog[ np.abs( self.simcat.Catalog['MURES'] * 1.0 / self.simcat.Catalog['MUERR'] ) < MURESCuts]
print "PostMURESCut Shape"
print self.realcat.Catalog.shape
print 'zCut Post MURESCut'
print np.sum((self.realcat.Catalog[self.ztype].astype(float) > self.zminFit) & (self.realcat.Catalog[self.ztype].astype(float) < self.zmaxFit))
print "Post cut Catalog"
print self.realcat.Catalog.shape
if noCCMC:
self.simgencat.Catalog = self.simgencat.Catalog[self.simgencat.Catalog['GENTYPE'] == 1]
self.simcat.Catalog = self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] == 1]
def newData(self, realfilename, realName, simInd =100):
self.realName = realName
self.shiftFlagData = False
try:
self.realcat = simread.SNANA_Cat(realfilename, realName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95)
except:
self.realcat = simread.SNANA_Cat(realfilename, realName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95, skip_header = 6 )
if self.cheatType:
print "WARNING, THE FITTER IS CHEATING AND ELIMINATED NON-IAs USING SIM INFO"
self.realcat.Catalog = self.realcat.Catalog[self.realcat.Catalog['SIM_TYPE_INDEX'].astype(int) == 1]
if simInd < self.nprint:
print 'N precuts'
print self.realcat.Catalog['FITPROB'].shape
print "Pre cut Catalog"
print self.realcat.Catalog.shape
for cut in cuts:
self.realcat.Catalog = self.realcat.Catalog[(self.realcat.Catalog[cut[0]].astype(type(cut[1])) > cut[1]) & (self.realcat.Catalog[cut[0]].astype(type(cut[2])) < cut[2])]
self.realcat.Catalog = self.realcat.Catalog[(self.realcat.Catalog[self.ztype].astype(float) > self.zminSamp) & (self.realcat.Catalog[self.ztype].astype(float) < self.zmaxSamp)]
print "Post cut Catalog"
print self.realcat.Catalog.shape
self.postCutRealCat = np.copy(self.realcat.Catalog)
print 'MURESCUT'
print self.MURESCuts
print self.realcat.Catalog.shape
if not (self.MURESCuts is None):
#MURES Cut format: (zmin, zmax, neg Cut, pos Cut)
'''
for mc in self.MURESCuts:
realCond = (self.realcat.Catalog[self.ztype] < mc[0]) | (self.realcat.Catalog[self.ztype] > mc[1])| ((self.realcat.Catalog['MURES'] > mc[2])& (self.realcat.Catalog['MURES'] < mc[3]))
self.realcat.Catalog = self.realcat.Catalog[realCond]
'''
self.realcat.Catalog = self.realcat.Catalog[np.abs(self.realcat.Catalog['MURES']*1.0/self.realcat.Catalog['MUERR']) < MURESCuts]
print "PostMURESCut Shape"
print self.realcat.Catalog.shape
if simInd < self.nprint:
print "Minimum Fitprob"
print np.min(self.realcat.Catalog['FITPROB'])
print 'N postcuts'
print self.realcat.Catalog['FITPROB'].shape
def zSystematic(self, binList = None, nbins = None):
assert(0)
if nbins is None:
try:
self.nbins = len(binList) - 1
self.binList = binList
except:
self.nbins = binList.shape[0] - 1
self.binList = binList
else:
binList = np.linspace(self.zmin, self.zmax, nbins+1)
self.nbins = nbins
self.binList = binList
if self.shiftFlagData:
print "DONT DOUBLE SHIFT"
return 0
if not self.shiftFlagSim:
oldsimz = self.simcat.Catalog['zPHOT']
oldsimtruez = self.simcat.Catalog['SIM_ZCMB']
stat, bins, binnum = stats.binned_statistic(oldsimz, oldsimz - oldsimtruez, bins = self.binList, statistic = 'mean')
self.zBiasShifts = stat
newsimz = oldsimz - stat[binnum]
assert(np.sum(np.abs(newsimz - oldsimz)) > 0)
assert((oldzshape - np.arange(0, oldz.shape[0]).shape[0])< 1)
self.shiftFlagSim = True
oldz = self.realcat.Catalog['zPHOT']
_,_, binnum = stats.binned_statistic(oldz, oldz , bins = self.binList, statistic = 'mean')
newz = oldz - self.zBiasShifts[binnum]
oldzshape = oldz.shape[0]
self.realcat.Catalog['zPHOT'].put(np.arange(0, oldz.shape[0]), newz)
assert(np.sum(np.abs(newz - oldz)) > 0)
assert((oldzshape - np.arange(0, oldz.shape[0]).shape[0])< 1)
self.simFlagData = True
def effCalc(self, fracContamCut = 0.0, nbinsSamp = None, nbinsFit = None, binListSamp = None, binListFit = None, simInd =100):
#### Do we want SNIas or all SN for efficiency?
import matplotlib as mpl
if nbinsSamp is None:
try:
self.nbinsSamp = len(binListSamp) - 1
self.binListSamp = binListSamp
except:
self.nbinsSamp = binListSamp.shape[0] - 1
self.binListSamp = binListSamp
else:
binListSamp = np.linspace(self.zminSamp, self.zmaxSamp, nbinsSamp+1)
self.nbinsSamp = nbinsSamp
self.binListSamp = binListSamp
if nbinsFit is None:
try:
self.nbinsFit = len(binListFit) - 1
self.binListFit = binListFit
except:
self.nbinsFit = binListFit.shape[0] - 1
self.binListFit = binListFit
else:
binListFit = np.linspace(self.zminFit, self.zmaxFit, nbinsFit+1)
self.nbinsFit = nbinsFit
self.binListFit = binListFit
self.typeString = ''
#if self.cheatZ:
# self.ztype = 'SIM_ZCMB'
#else:
# self.ztype = 'zPHOT'
'''
if (fracContamCut > 0.000000001) & (fracContamCut < 1.0):
print " Cutting based on Frac Contam"
histTot, binsX, binsY = np.histogram2d(self.simcat.Catalog[ztype], self.simcat.Catalog['MURES'], bins = nbins)
histCC, binsX, binsY = np.histogram2d(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) != 1][ztype], self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) != 1]['MURES'], bins = (binsX, binsY))
fracContam = histCC.astype(np.float)/histTot.astype(np.float)
for fcRow, i in zip(fracContam, xrange(binsX.shape[0])):
for fc, j in zip(fcRow, xrange(binsY.shape[0])):
if fc < fracContamCut:
continue
else:
simInBin = (self.simcat.Catalog[ztype] > binsX[i]) & (self.simcat.Catalog[ztype] < binsX[i+1]) & (self.simcat.Catalog['MURES'] > binsY[j]) & (self.simcat.Catalog['MURES'] < binsY[j+1])
realInBin = (self.realcat.Catalog[ztype] > binsX[i]) & (self.realcat.Catalog[ztype] < binsX[i+1]) & (self.realcat.Catalog['MURES'] > binsY[j]) & (self.realcat.Catalog['MURES'] < binsY[j+1])
self.simcat.Catalog = self.simcat.Catalog[np.invert(simInBin)]
self.realcat.Catalog = self.realcat.Catalog[np.invert(realInBin)]
'''
zPHOTs = self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) == 1][self.ztype].astype(float)
zTRUEs = self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) == 1]['SIM_ZCMB'].astype(float)
self.typeString = self.typeString + 'A1'
print "Type Location A"
print "Choice A1"
print zPHOTs.shape
print zTRUEs.shape
print binList
counts, zPhotEdges, zTrueEdges, binnumber = scipy.stats.binned_statistic_2d(zPHOTs, zTRUEs, zTRUEs, statistic = 'count', bins = (self.binListFit, self.binListSamp))
assert(zPhotEdges.shape[0] == (self.nbinsFit + 1))
print "Type Location B"
print "Choice B1"
self.typeString = self.typeString + 'B1'
zGenHist, zGenBins = np.histogram(self.simgencat.Catalog[self.simgencat.Catalog['GENTYPE'].astype(int) == 1]['GENZ'].astype(float), bins = self.binListSamp)
#zSim1Hist, zSim1Bins = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) ==1]['SIM_ZCMB'].astype(float), bins = self.binListSamp)
print "counts of zTrue in each zPhot vs zTrue bin"
print counts.astype(int)
print "zGen Bins"
print zGenBins
print 'zGen Histogram'
print zGenHist
print "sum zGen events"
print np.sum(zGenHist)
print "sum zPhot events"
print np.sum(counts)
#print "DEBUG HERE"
#assert(0)
self.effmat = np.zeros((self.nbinsFit, self.nbinsSamp))
xMax = zPhotEdges.shape[0] - 2
yMax = zTrueEdges.shape[0] - 2
print zGenHist
print counts.astype(int)
'''
for zPhotLedge, zPhotRedge, row, i in zip(zPhotEdges[:-1], zPhotEdges[1:], counts, xrange(xMax + 1)):
zPhotCenter = (zPhotLedge + zPhotRedge)/2.0
for zTrueLedge, zTrueRedge, count, j in zip(zTrueEdges[:-1], zTrueEdges[1:], row, xrange(yMax + 1)):
zTrueCenter = (zTrueLedge + zTrueRedge)/2.0
inCell = (zPHOTs > zPhotLedge) & (zPHOTs < zPhotRedge) & (zTRUEs > zTrueLedge)& (zTRUEs < zTrueRedge)
zPhotCell = zPHOTs[inCell];zTrueCell = zTRUEs[inCell]
self.effmat[i][j] = count # np.sum(inCell)
#print "inCell"
#print np.sum(inCell)
#print "count"
#print count
#try:
# assert(np.abs(np.sum(inCell) - count) < 2)
#except:
# print "CHECK ABOVE"
for row, i in zip(self.effmat, xrange(self.effmat.shape[0])):
for j in xrange(row.shape[0]):
self.effmat[i][j] /= zGenHist[j]
'''
self.effmat = counts/zGenHist
#if simInd < self.nprint:
print 'effmat'
print self.effmat
extent = [zPhotEdges[0], zPhotEdges[-1], zTrueEdges[0], zTrueEdges[-1]]
if (simInd == 0) or (not ('sim' in self.realName.lower())):
plt.figure()
plt.imshow(np.flipud(counts.T), extent = extent, cmap = 'Blues')
plt.colorbar()
plt.savefig(self.realName + 'redshiftDistro.png')
plt.clf()
plt.close()
plt.figure()
plt.imshow(np.flipud(self.effmat.T), extent = extent, cmap = 'Blues', norm=mpl.colors.LogNorm())
plt.colorbar()
plt.savefig(self.realName + 'efficiencyMatrixLog.png')
plt.clf()
plt.close()
plt.figure()
plt.imshow(np.flipud(self.effmat.T), extent = extent, cmap = 'Blues')
plt.colorbar()
plt.savefig(self.realName + 'efficiencyMatrix.png')
plt.clf()
plt.close()
def fit_rate(self, fixK = False, fixBeta = False, simInd =100, trueBeta = 0, CCScale = 1.0, CCScaleErr = None, TrueCCScale = 1.0, BetaInit = 0.0, kInit = 1.0, BetaErr = 1, kErr = 1, f_Js = None, CCZbins = None, scaleZBins = None, Blind = False):
#import iminuit as iM
#from iminuit import Minuit as M
#import numpy as np
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#if self.cheatZ:
# self.ztype = 'SIM_ZCMB'
#else:
# self.ztype = 'zPHOT'
plt.switch_backend('Agg')
if simInd < self.nprint:
print "Type Location C"
print "Choice C1"
if len(self.typeString) <= 4:
self.typeString = self.typeString + 'C1'
nSim, simBins = np.histogram(self.simgencat.Catalog[self.simgencat.Catalog['GENTYPE'].astype(int) == 1]['GENZ'].astype(float), bins=self.binListSamp)
if simInd < self.nprint:
print "nSim1"
print nSim
print self.simgencat.Catalog.shape
print "FIGURE OUT WHY YOU MADE THIS ASSERT STATEMENT LATER"
#assert(0)
nSim2, simBins2 = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) ==1][self.ztype].astype(float), bins=self.binListFit)
nSim3, simBins3 = np.histogram(self.simcat.Catalog[self.ztype].astype(float), bins=self.binListFit)
NCC , _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] != 1][self.ztype].astype(float), bins=self.binListFit)
if self.fixCCScale:
print "Fix CC Scale at 1"
else:
if simInd < self.nprint:
print "nSim2"
print nSim2
print "nSim3"
print nSim3
print "nCC"
print NCC
OrigNCC = np.copy(NCC)
if self.cheatCCSub:
if self.cheatCCScale:
print "WARNING: Only cheating on CC Subtraction not scale"
print "Setting NCC to infinity to make sure that cheating correctly"
print "Diagnostics after this point may be nonsense"
print self.cheatCCSub
print "NCC BeforeFck"
print NCC
NCC = NCC*1E100
print "NCC AfterFck"
print NCC
elif self.cheatCCScale:
print "NCC Before1"
print NCC
print TrueCCScale
NCC = applyCCScale(NCC, TrueCCScale, CCScaleErr, zbins = CCZbins, datazbins = self.binListFit)
print "NCC After1"
print NCC
else:
print "NCC Before2"
print NCC
print CCScale
NCC = applyCCScale(NCC, CCScale, CCScaleErr, zbins = CCZbins, datazbins = self.binListFit)
print "NCC After2"
print NCC
#assert(0)
NIa , _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] == 1][self.ztype].astype(float), bins=self.binListFit)
'''
DebugNIaPhot, _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] == 1]['zPHOT'].astype(float), bins=self.binListFit)
DebugNCCPhot, _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] != 1]['zPHOT'].astype(float), bins=self.binListFit)
DebugNCCPhot = applyCCScale(DebugNCCPhot, CCScale, CCScaleErr, zbins = scaleZBins, datazbins = self.binListFit)
DebugNIaTrue, _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] == 1]['SIM_ZCMB'].astype(float), bins=self.binListSamp)
DebugNCCTrue, _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] != 1]['SIM_ZCMB'].astype(float), bins=self.binListSamp)
DebugNCCTrue = applyCCScale(DebugNCCTrue, CCScale, CCScaleErr, zbins = scaleZBins, datazbins = self.binListSamp)
uselessCtr = 0
for niap, nccp, niat, ncct, zp, zt in zip(DebugNIaPhot, DebugNCCPhot, DebugNIaTrue, DebugNCCTrue,(self.binListFit[1:] + self.binListFit[:-1])/2.0, (self.binListSamp[1:] + self.binListSamp[:-1])/2.0 ):
uselessCtr +=1
self.globalZTrueBinStorage.append(zt)
self.globalZPhotBinStorage.append(zp)
self.globalNDataIaPhotBinStorage.append(niap)
self.globalNDataCCPhotBinStorage.append(nccp)
self.globalNDataIaTrueBinStorage.append(niat)
self.globalNDataCCTrueBinStorage.append(ncct)
print "UselessCtr"
print uselessCtr
'''
try:
TrueNCC, _ = np.histogram(self.realcat.Catalog[self.realcat.Catalog['SIM_TYPE_INDEX'] !=1][self.ztype].astype(float), bins=self.binListFit)
if simInd < self.nprint:
print "True NCC Data"
print TrueNCC
except:
print "Using real data"
TrueNCC = 0.0
nData, dataBins = np.histogram(self.realcat.Catalog[self.ztype].astype(float), bins=self.binListFit)
print "nData"
print nData
if not(self.cheatCCSub):
FracBad = NCC*1.0/(1.0*(NCC+NIa))
nCCData = nData*FracBad
else:
nCCData = TrueNCC*1.0
FracBad = TrueNCC*1.0/nData
if simInd < self.nprint:
print "PreScale NCC/nSim"
print OrigNCC*1.0/(OrigNCC+NIa)
print "PreScale Pred NCC Data"
print OrigNCC*1.0/(OrigNCC+NIa)*nData
print "PreScale Pred NCC Data if 2NCC"
print OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData
print "TrueNCC"
print TrueNCC
if type(TrueNCC) != int:
if simInd < self.nprint:
print "PreScale PredNCCData - TrueNCCData"
print OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData - TrueNCC
print "PreScale PredNCCData - TrueNCCData/ PredNCCData"
print (OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData - TrueNCC)/(OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData)
else:
print "Using real data"
print "Mean of PreScale PredNCCData - TrueNCCData/ PredNCCData"
print np.nanmean((OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData - TrueNCC)/(OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData))
print "PostScale NCC/nData"
print NCC*1.0/(NCC+NIa)
if simInd < self.nprint:
print "Fraction of CCs in each bin"
print FracBad
print 'NCC'
print NCC
print 'nSim2'
print nSim2
print "nData, dataBins, realcat shape pre contam correction"
print nData
print dataBins
print np.sum(self.realcat.Catalog[self.ztype].astype(float) > self.zmaxFit)
print np.sum(self.realcat.Catalog[self.ztype].astype(float) < self.zminFit)
print self.realcat.Catalog[self.ztype].shape
print "Ratio nData/nSim"
print 1.0*nData/(1.0*nSim3)
print "Ratio nSim2/nData"
print 1.0*nSim3/(1.0*nData)
print "FracBad"
print FracBad
print 'NCCData'
print nCCData
if simInd < self.nprint:
print "overall Contam"
print np.sum(NCC)*1.0/(np.sum(nSim3)*1.0)
def chi2func(nData, nSim, effmat, fnorm, zCentersSamp, zCentersFit, k = 1.0, Beta = 0.0, zBreak = 1.0, dump = False, complexdump = False, modelError = False, nIA = None, nCC = None, Rate_Model = 'powerlaw', zbins = None, simInd = 100, BetaPrior = (-3, 3), KPrior = (0.0, 50.0), priorRate = None, priorZEff = None, ratePriorErrUp = None, ratePriorErrDown =None, ratePriorErrAll = None, TrueNCCData = None, f_1 = 1.0, f_2 = 1.0, f_3 = 1.0, f_4 = 1.0, f_5 = 1.0, f_6 = 1.0, f_7 = 1.0, f_8 = 1.0, f_9 = 1.0, f_10 = 1.0, f_11 = 1.0):
if simInd < self.nprint:
print "PRIORS2"
print priorRate
print priorZEff
print ratePriorErrUp
print ratePriorErrDown
Chi2Temp = 0.0
if Rate_Model == 'powerlaw':
f_Js = k*(1+zCentersSamp)**Beta
elif Rate_Model == 'discrete':
f_Js = np.array([f_1, f_2, f_3, f_4, f_5, f_6, f_7, f_8, f_9, f_10, f_11])
elif (Rate_Model == 'brokenpowerlaw') | (Rate_Model == 'brokenpowerlawVar'):
f_Js = []
#zCenters = (zbins[1:]+zbins[:-1])/2.0
temp = None
for zC in zCentersSamp:
if zC < zBreak:
f_Js.append(k*(1+zC)**Beta)
elif not(temp is None):
f_Js.append(temp)
else:
temp = f_Js[-1]
f_Js.append(temp)
f_Js = np.array(f_Js)
else:
assert(0)
if simInd < self.nprint:
if Rate_Model == 'discrete':
print "f_Js init"
print f_Js
else:
print "Beta init"
print Beta
print "k init"
print k
#chi2Mat = np.zeros((self.nbinsFit))
#adjNMC = np.zeros((self.nbinsFit))
if Rate_Model == 'discrete':
kprior = 0
betaprior = 0
else:
kprior = weakPrior(k, KPrior)
betaprior = weakPrior(Beta, BetaPrior)
if dump and (self.nprint > simInd):
print "kprior"
print kprior
print "betaprior"
print betaprior
if (nIA is None) or (nCC is None):
if dump:
print "No CC Cut"
fracCCData = np.zeros(nData.shape)
elif self.cheatCCSub:
fracCCData = TrueNCC*1.0/nData
else:
if Rate_Model == 'discrete':
if dump and (self.nprint > simInd):
print 'f_J adjusted CC Cut'
print Rate_Model
print nCC
print nIA
print np.array(f_Js)
fracCCData = (nCC*1.0)/((1.0*nCC + nIA*np.array(f_Js)))
print fracCCData
else:
if dump and (self.nprint > simInd):
print "Beta Adjusted CC Cut"
print Rate_Model
#BetaRatio = k*(1+zCenters)**(Beta)#/(1+zCenters)**MCBeta
BetaRatio = (1+zCentersFit)**(Beta)#/(1+zCenters)**MCBeta
if dump and (self.nprint > simInd):
print "Beta Ratio"
print BetaRatio
print "BadFracCCData"
print (nCC*1.0)/((1.0*nCC + nIA*BetaRatio))
print "bad NCCData"
print (nCC*1.0)/((1.0*nCC + nIA*BetaRatio))*nData
fracCCData = (nCC*1.0)/((1.0*nCC + nIA*BetaRatio))
if dump and (self.nprint > simInd):
print 'abc'
print "fracCCData2"
print fracCCData
print "unscaled fracCCData"
print (1.0*nCC)/(1.0*(nCC+nIA))
if self.cheatCCSub:
nCCData = TrueNCCData
if dump and (self.nprint < simInd):
print "Cheating CC Sub"
assert(not(TrueNCCData is None))
elif dump and (self.nprint > simInd):
print 'def'
print "Normal CC Sub"
if not self.cheatCCSub:
nCCData = nData*fracCCData
if dump and (self.nprint > simInd):
print "nCCData2"
print nCCData
if not(TrueNCCData is None):
print "TrueNCCData"
print TrueNCCData
#print f_Js
#Check if I am scaling errors down with increasing MC size. Make MC twice as large as "Data" to test.
if dump: chi2Storage = []
if dump: scaledNSimStor = []
if dump: JSumTempNumStor = []
if dump: JSumTempDenStor = []
if dump:
print "actually used NCC"
#print nCC
print nCCData
if dump and (simInd < self.nprint):
print "effmat"
print effmat
print "nData"
print nData
print "nCCData"
print nCCData
print "nSim"
print nSim
print nCCData
for row, nDataI, nCCDataI, i, zc in zip(effmat, nData, nCCData, range(self.nbinsFit), zCentersFit):
if dump and (self.nprint > simInd):
print 'effmat row'
print row
print 'nDataI'
print nDataI
print 'nCCDataI'
print nCCDataI
scaledNSimTemp = 0.0
JSumTempNum = 0.0
JSumTempDen = 0.0
if dump and (simInd < self.nprint):
print "nBinsSamp"
print self.nbinsSamp
assert(row.shape[0] == self.nbinsSamp)
assert(nSim.shape[0] == self.nbinsSamp)
assert(len(f_Js) == self.nbinsSamp)
for eff, nSimJ, f_J, j in zip(row, nSim, f_Js, range(self.nbinsSamp)):
if dump and (self.nprint > simInd):
print 'NGen J'
print nSimJ
print 'JSumTempNum contr'
print nSimJ*f_J*eff*fnorm
print 'JSumTempDen contr'
print nSimJ*f_J*eff*fnorm*f_J*fnorm
#if dump and (i != j) and self.cheatZ and (self.nprint < simInd):
# if nSimJ*f_J*eff*fnorm > 0:
# print " This should be zero but isnt "
# print nSimJ*f_J*eff*fnorm
# assert(0)
JSumTempNum += nSimJ*f_J*eff*fnorm
JSumTempDen += nSimJ*f_J*eff*fnorm*f_J*fnorm
dataFunc = np.maximum(nDataI ,1)
#CCFunc = np.ceil(np.maximum(nCCDataI, 1))
CCFunc = np.maximum(nCCDataI, 1)
c2t = (nDataI - nCCDataI - JSumTempNum)**2/( dataFunc + CCFunc + JSumTempDen)
if dump:
JSumTempNumStor.append(JSumTempNum)
JSumTempDenStor.append(JSumTempDen)
if dump and (self.nprint > simInd):
print i
print 'nDataI'
print nDataI
print 'fnCCDataI'
print nCCDataI
print 'fnorm'
print fnorm
print "JSumTempNum tot"
print JSumTempNum
print "JSumTempDen tot"
print JSumTempDen
print "Chi2Bin"
print c2t
if dump:
chi2Storage.append(c2t)
if c2t > 5:
print 'INSANITY CHECK ABOVE'
# Chi2Temp += ((nDataI - nCCDataI - JSumTempNum)**2/(JSumTempNum + JSumTempDen))#*fnorm**2
if nDataI > 1E-11 or JSumTempDen > 1E-11:
Chi2Temp += c2t
if dump and (self.nprint > simInd):
print "JSumTempNum/Den"
print JSumTempNumStor
print JSumTempDenStor
if dump:
if (self.nprint >simInd):
print Chi2Temp
print kprior
print betaprior
print chi2Storage
print "nData"
print nData
print "nCCData"
print nCCData
if priorRate is None:
return Chi2Temp+kprior+betaprior , chi2Storage
else:
print "PRIORS3"
print priorRate
print "fit k"
print k
print 'MCK'
print self.MCK
print "fit beta"
print Beta
print 'MCBeta'
print self.MCBeta
print ratePrior(k*self.MCK, Beta + self.MCBeta, priorRate, priorZEff, ratePriorErrUp, ratePriorErrDown, ratePriorErrAll)
return Chi2Temp+kprior+betaprior + ratePrior(k*self.MCK, Beta+self.MCBeta, priorRate, priorZEff, ratePriorErrUp, ratePriorErrDown, ratePriorErrAll), chi2Storage
else:
if dump and (self.nprint > simInd):
print 'C2T'
print Chi2Temp
print kprior
print betaprior
if priorRate is None:
return Chi2Temp+kprior+betaprior
else:
print "PRIORS3"
print priorRate
print "fit k"
print k
print 'MCK'
print self.MCK
print "fit beta"
print Beta
print 'MCBeta'
print self.MCBeta
print ratePrior(k*self.MCK, Beta+self.MCBeta, priorRate, priorZEff, ratePriorErrUp, ratePriorErrDown, ratePriorErrAll)
return Chi2Temp+kprior+betaprior + ratePrior(k*self.MCK, Beta+self.MCBeta, priorRate, priorZEff, ratePriorErrUp, ratePriorErrDown, ratePriorErrAll)
zCentersSamp = (self.binListSamp[1:] + self.binListSamp[:-1])/2.0
zCentersFit = (self.binListFit[1:] + self.binListFit[:-1])/2.0
#Is this right? Everything else in the other side of the chi2 function should be Ia only
if self.cheatCCSub:
self.fracCCData = TrueNCC*1.0/nData
else:
self.fracCCData = (NCC*1.0)/(1.0*(NCC + NIa))
if (self.nprint > simInd):
print "nSim"
print nSim
print 'fracCCData'
print self.fracCCData
print "nData"
print nData
#fnorm = float(np.sum(nData*(1-self.fracCCData)))/float(np.sum(nSim))
fnorm = 1.0/240.0
#print "PRIORS"
#print self.priorZEff
#print self.priorRate
#print self.ratePriorErrUp
#print self.ratePriorErrDown
if self.Rate_Model == 'powerlaw':
lamChi2 = lambda k, Beta: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)
lamChi2Dump = lambda k, Beta: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, dump = True, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)
MinObj = M(lamChi2, k = kInit, error_k = kErr , Beta = BetaInit, error_Beta = BetaErr, limit_k = (0.0, None), limit_Beta = (-100, 100), fix_k = fixK, fix_Beta = fixBeta)
c2i, _ = lamChi2Dump(1.0, 0.0)
print "Chi2 init = {0}".format(round(c2i, 4))
elif self.Rate_Model == 'brokenpowerlaw':
lamChi2 = lambda k, Beta: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, 1.0, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, Rate_Model = 'brokenpowerlaw', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)
lamChi2Dump = lambda k, Beta: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, 1.0, dump = True, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, Rate_Model = 'brokenpowerlaw', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)
MinObj = M(lamChi2, k = kInit, error_k = kErr , Beta = BetaInit, error_Beta = BetaErr, limit_k = (0.0, None), limit_Beta = (-100, 100), fix_k = fixK, fix_Beta = fixBeta)
c2i, _ = lamChi2Dump(1.0, 0.0)
print "Chi2 init = {0}".format(round(c2i, 4))
elif self.Rate_Model == 'brokenpowerlawVar':
lamChi2 = lambda k, Beta, zBreak: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, zBreak, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, Rate_Model = 'brokenpowerlawVar', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)
lamChi2Dump = lambda k, Beta, zBreak: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, zBreak, dump = True, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, Rate_Model = 'brokenpowerlawVar', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)
MinObj = M(lamChi2, k = kInit, error_k = kErr , Beta = BetaInit, error_Beta = BetaErr, limit_k = (0.0, None), limit_Beta = (-100, 100), fix_k = fixK, fix_Beta = fixBeta, zBreak = 1.0, error_zBreak = 0.1, limit_zBreak = (self.zminFit, self.zmaxFit))
c2i, _ = lamChi2Dump(1.0, 0.0)
print "Chi2 init = {0}".format(round(c2i, 4))
elif self.Rate_Model == 'discrete':
lamChi2 = lambda f_1, f_2, f_3, f_4, f_5, f_6, f_7, f_8, f_9, f_10, f_11: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, 1.0, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, f_1 = f_1, f_2 = f_2,f_3 = f_3, f_4 = f_4,f_5 = f_5, f_6 = f_6,f_7 = f_7, f_8 = f_8,f_9 = f_9, f_10 = f_10, f_11 = f_11, Rate_Model = 'discrete', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit )
lamChi2Dump = lambda f_1, f_2, f_3, f_4, f_5, f_6, f_7, f_8, f_9, f_10, f_11: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, 1.0, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, f_1 = f_1, f_2 = f_2,f_3 = f_3, f_4 = f_4,f_5 = f_5, f_6 = f_6,f_7 = f_7, f_8 = f_8,f_9 = f_9, f_10 = f_10, f_11 = f_11, dump = True, Rate_Model = 'discrete', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)
c2i, _ = lamChi2Dump(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
print "Chi2 init = {0}".format(round(c2i, 4))
MinObj = M(lamChi2, f_1 = 1.0, error_f_1 = 1.0, limit_f_1 = (0.0, None), f_2 = 1.0, error_f_2 = 1.0, limit_f_2 = (0.0, None), f_3 = 1.0, error_f_3 = 1.0, limit_f_3 = (0.0, None), f_4 = 1.0, error_f_4 = 1.0, limit_f_4 = (0.0, None), f_5 = 1.0, error_f_5 = 1.0, limit_f_5 = (0.0, None), f_6 = 1.0, error_f_6 = 1.0, limit_f_6 = (0.0, None), f_7 = 1.0, error_f_7 = 1.0, limit_f_7 = (0.0, None), f_8 = 1.0, error_f_8 = 1.0, limit_f_8 = (0.0, None), f_9 = 1.0, error_f_9 = 1.0, limit_f_9 = (0.0, None), f_10 = 1.0, error_f_10 = 1.0, limit_f_10 = (0.0, None), f_11 = 1.0,error_f_11 = 1.0, limit_f_11 = (0.0, None))
if self.Rate_Model == 'discrete':
c2f, c2stor = lamChi2Dump(MinObj.values['f_1'],MinObj.values['f_2'],MinObj.values['f_3'],MinObj.values['f_4'],MinObj.values['f_5'],MinObj.values['f_6'],MinObj.values['f_7'],MinObj.values['f_8'],MinObj.values['f_9'],MinObj.values['f_10'],MinObj.values['f_11'])
else:
print "TEST DUMP HERE"
c2f, c2stor = lamChi2Dump(MinObj.values['k'], MinObj.values['Beta'])
#MinObj = M(lamChi2, k = 1.0, fix_k = True, Beta = 0.0, error_Beta = 0.1)
MinObj.set_strategy(2)
fmin, param = MinObj.migrad(nsplit= 10)
#fmin, param = MinObj.migrad()
#ErrDict = MinObj.minos()
self.covar = MinObj.np_covariance()
ErrDict = MinObj.minos(maxcall = 1000)
#plt.scatter(nData, c2stor)
#plt.xlabel('nData')
#plt.ylabel('chi2 in bin')
#plt.savefig(self.realName + 'Chi2VsnData.png')
#plt.clf()
if self.nprint > simInd:
print "Shapes of things"
print len(c2stor)
print nData.shape
print dataBins.shape
print self.binListFit.shape
print self.binListSamp.shape
#print DebugNIaPhot.shape
#print DebugNCCPhot.shape
#print DebugNIaTrue.shape
#print DebugNCCTrue.shape
for c2, nd in zip(c2stor, nData):
self.globalChi2Storage.append(c2)
self.globalNDataStorage.append(nd)
if self.Rate_Model == 'discrete':
fJList = [MinObj.values['f_1'],MinObj.values['f_2'],MinObj.values['f_3'],MinObj.values['f_4'],MinObj.values['f_5'],MinObj.values['f_6'],MinObj.values['f_7'],MinObj.values['f_8'],MinObj.values['f_9'],MinObj.values['f_10'],MinObj.values['f_11']]
fJErrList = [MinObj.errors['f_1'],MinObj.errors['f_2'],MinObj.errors['f_3'],MinObj.errors['f_4'],MinObj.errors['f_5'],MinObj.errors['f_6'],MinObj.errors['f_7'],MinObj.errors['f_8'],MinObj.errors['f_9'],MinObj.errors['f_10'],MinObj.errors['f_11']]
self.fJList = fJList
self.fJErrList = fJErrList
self.Beta = None
self.k = None
self.kErr = None
self.BetaErr = None
print fJList
print fJErrList
else:
k = MinObj.values['k']
#kErr = MinObj.errors['k']
kErr = (np.abs(ErrDict['k']['lower']) + np.abs(ErrDict['k']['upper']))/2.0
Beta = MinObj.values['Beta']
#BetaErr = MinObj.errors['Beta']
BetaErr = (np.abs(ErrDict['Beta']['lower']) + np.abs(ErrDict['Beta']['upper']))/2.0
if self.Rate_Model == 'brokenpowerlawVar':
zBreak = MinObj.values['zBreak']
zBreakErr = MinObj.values['zBreakErr']
self.k = k
self.Beta = Beta
self.kErr = kErr
self.BetaErr = BetaErr
#/(self.nbins - 2)
self.BetaRatio = (1+zCentersFit)**(Beta)
self.fJList = None
print 'SCALE DEBUG'
print NCC
print NIa
print self.BetaRatio
print 'SCALE DEBUG2'
print np.sum(NCC)
print np.sum(NIa)
print np.sum(NIa*self.BetaRatio)
self.fracCCData = (NCC*1.0)/(1.0*(1.0*NCC + NIa*self.BetaRatio))
self.fracCCDataTot = (np.sum(NCC)*1.0)/(1.0*(1.0*np.sum(NCC) + np.sum(NIa*self.BetaRatio)))
print 'SCALE DEBUG3'
print self.fracCCData
print self.fracCCDataTot
print 'SCALE DEBUG4'
print OrigNCC
print np.sum(OrigNCC)
print CCScale
#print self.fracCCDataTot
#print type(self.fracCCDataTot)
#assert(type(self.fracCCDataTot) == float)
print "Chi2 final = {0}".format(round(lamChi2Dump(self.k, self.Beta)[0], 4))
self.chi2 = fmin.fval
print "Chi2final? = {0}".format(round(fmin.fval, 4))
if not(self.priorRate is None):
ratePriorFinalVal = ratePrior(self.k*self.MCK, self.Beta+self.MCBeta, self.priorRate, self.priorZEff, self.ratePriorErrUp, self.ratePriorErrDown, self.ratePriorErrAll )
c2NoPrior = chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, self.k, self.Beta, dump = False, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC)
print "RATE PRIOR FINAL"
print ratePriorFinalVal
print "Chi2final? = {0}".format(round(fmin.fval, 4))
print "Chi2FinalNoPrior"
print c2NoPrior
#fJs = np.ones(zCenters.shape)
'''
try:
if (Rate_Model != 'discrete'):
plt.clf()
MinObj.draw_contour('k','Beta', nsigma=3)
plt.savefig('{0}_{1}_k_beta_contour.png'.format(self.realName, self.simName))
if Blind:
locs, labels = plt.xticks()
labels = locs + np.cos(cosVal)
plt.xticks(labels)
locs, labels = plt.yticks()
labels = locs + np.cos(cosVal)
plt.yticks(labels)
plt.clf()
#xgrid,ygrid, sigma, rawdata = MinObj.mncontour_grid('k', 'Beta', numpoints=30, sigma_res = 1, nsigma = 2.0)
#fig, ax = plt.subplots(1)
#plt.clf()
#CS = ax.contour(xgrid, ygrid + self.MCBeta, sigma, levels = [ 1.0, 2.0])
#ax.clabel(CS, fontsize=7, inline=1)
#ax.set_xlabel('k')
#ax.set_ylabel('Beta')
#if Blind:
# ax.set_xticklabels([])
# ax.set_yticklabels([])
#plt.savefig('{0}_{1}_k_beta_contour.png'.format(self.realName, self.simName))
#plt.close()
except:
print "Plot Fail A"
try:
if (Rate_Model != 'discrete'):
plt.clf()
MinObj.draw_profile('Beta', text = False)
if Blind:
locs, labels = plt.xticks()
labels = locs + np.cos(cosVal)
plt.xticks(labels)
plt.savefig('{0}_{1}_beta_contour.png'.format(self.realName, self.simName))
plt.clf()
except:
print "Plot Fail C"
try:
if Rate_Model != 'discrete':
Betas = np.linspace(self.Beta - 0.5, self.Beta + 0.5, 51)
FCNs = []
for bTemp in Betas:
FCN = lamChi2( self.k, bTemp)
FCNs.append(FCN)
plt.plot(Betas, FCNs, c = 'k', label = 'Non Minuit Contour')
plt.legend()
plt.xlabel('Beta')
plt.ylabel('Chi2')
if Blind:
locs, labels = plt.xticks()
labels = locs + np.cos(cosVal)
plt.xticks(labels)
plt.savefig('{0}_{1}_beta_mycontour.png'.format(self.realName, self.simName))
plt.clf()
except:
print "Plot Fail D"
if Rate_Model != 'discrete':
plt.clf()
ax = plt.axes()
Betas = np.linspace(self.Beta - 0.1, self.Beta + 0.1, 501)
FCNs = []
for bTemp in Betas:
FCN = lamChi2( self.k, bTemp)
FCNs.append(FCN)
plt.plot(Betas, FCNs, c = 'k', label = 'Non Minuit Contour')
plt.legend()
plt.xlabel('Beta')
plt.ylabel('Chi2')
if Blind:
locs, labels = plt.xticks()
labels = locs + np.cos(cosVal)
ax.set_xticklabels(labels)
print "FCNs"
print FCNs
plt.savefig('{0}_{1}_beta_myzoomcontour.png'.format(self.realName, self.simName))
plt.clf()
plt.clf()
ax = plt.axes()
ks = np.linspace(self.k - 0.1, self.k + 0.1, 501)
FCNs = []
for kTemp in ks:
FCN = lamChi2( kTemp,self.Beta)
FCNs.append(FCN)
plt.plot(ks, FCNs, c = 'k', label = 'Non Minuit Contour')
plt.legend()
plt.xlabel('k')
plt.ylabel('Chi2')
print "FCNs"
print FCNs
plt.savefig('{0}_{1}_k_myzoomcontour.png'.format(self.realName, self.simName))
plt.clf()
df = np.array(FCNs[1:]) - np.array(FCNs[:-1])
inds = np.where(df > 0)[0]
print 'inds'
print inds
print inds < 250
print np.where(inds < 250)
inds = inds[np.where(inds < 250)]
print 'inds'
print inds
print "INDSSHAPE"
print inds.shape
if inds.shape[0]:
print "MINUIT IS PROBABLY MAD. HERES WHY"
print inds
print Betas[inds]
if inds.shape[0] > 1:
inds = inds[-1]
print inds
print Betas[inds]
lamChi2Dump(self.k, Betas[inds -3])
print "MINUIT MAD 2"
lamChi2Dump(self.k, Betas[inds -2])
print "MINUIT MAD 3"
lamChi2Dump(self.k, Betas[inds -1])
print "MINUIT MAD 4"
lamChi2Dump(self.k, Betas[inds])
print "MINUIT MAD 5"
lamChi2Dump(self.k, Betas[inds + 1])
print "MINUIT MAD 6"
lamChi2Dump(self.k, Betas[inds + 2])
print "MINUIT MAD 7"
lamChi2Dump(self.k, Betas[inds + 3])
print "END MINUIT MAD"
try:
if (Rate_Model != 'discrete'):
plt.clf()
MinObj.draw_mncontour('k','Beta', nsigma=3)
plt.savefig('{0}_{1}_k_beta_mncontour.png'.format(self.realName, self.simName))
if Blind:
locs, labels = plt.xticks()
labels = locs + np.cos(cosVal)
plt.xticks(labels)
locs, labels = plt.yticks()
labels = locs + np.cos(cosVal)
plt.yticks(labels)
plt.clf()
MinObj.draw_mnprofile('Beta', text = False, subtract_min = True)
if Blind:
locs, labels = plt.xticks()
labels = locs + np.cos(cosVal)
plt.xticks(labels)
plt.savefig('{0}_{1}_beta_mncontour.png'.format(self.realName, self.simName))
plt.clf()
#xgrid,ygrid, sigma, rawdata = MinObj.mncontour_grid('k', 'Beta', numpoints=30, sigma_res = 1, nsigma = 2.0)
#fig, ax = plt.subplots(1)
#plt.clf()
#CS = ax.contour(xgrid, ygrid + self.MCBeta, sigma, levels = [ 1.0, 2.0])
#ax.clabel(CS, fontsize=7, inline=1)
#ax.set_xlabel('k')
#ax.set_ylabel('Beta')
#if Blind:
# ax.set_xticklabels([])
# ax.set_yticklabels([])
#plt.savefig('{0}_{1}_k_beta_contour.png'.format(self.realName, self.simName))
#plt.close()
except:
print "Plot Fail B"
pass
#plt.axhline(y = self.MCBeta, c = 'k', label = 'True Beta')
#plt.axhline(y = Beta + self.MCBeta, c = 'g', label= 'Best Fit Beta')
#plt.axvline(x = k, label = 'Best Fit k')
'''
'''
def chi2V2(self, fJs, fJErrs, zCenters, k, Beta):
fitfJs = k*(1+zCenters)**Beta
Chi2Temp = 0
for fJ, fitfJ, fJErr in zip(fJs, fitfJs, fJErrs):
Chi2Temp += (fJ - fitfJ)**2/(fJ + fJErr)
return Chi2Temp
'''
def weakPrior(value, priorTuple):
if value < priorTuple[1]:
if value > priorTuple[0]:
return 1
else:
return (value - priorTuple[0])**4
else:
return (value - priorTuple[1])**4
def ratePrior(fitK, fitBeta, priorRate, zEffPrior, priorRateErrUp = None, priorRateErrDown = None, priorRateErrAll = None):
print "PRIOR"
print priorRate
print zEffPrior
print priorRateErrUp
print priorRateErrDown
print "Fit Beta/k"
print fitBeta
print fitK
fitRate = fitK*(1+zEffPrior)**fitBeta
print 'Fit Rate'
print fitRate
print "PriorChi2"
if fitRate > priorRate:
if not (priorRateErrUp is None):
print (fitRate - priorRate)**2/priorRateErrUp**2
return (fitRate - priorRate)**2/priorRateErrUp**2
else:
print (fitRate - priorRate)**2/priorRateErrAll**2
return (fitRate - priorRate)**2/priorRateErrAll**2
else:
if not (priorRateErrDown is None):
print (fitRate - priorRate)**2/priorRateErrDown**2
return (fitRate - priorRate)**2/priorRateErrDown**2
else:
print (fitRate - priorRate)**2/priorRateErrAll**2
return (fitRate - priorRate)**2/priorRateErrAll**2
def getCCScale(simCat, dataCat, MURESWindow = (-1, 1), zbins = [0.0, 0.3, 0.6, 0.9, 1.2], Beta = None, binList = None, fracCCData = None, outfilePrefix = 'Test', Rate_Model = 'powerlaw', f_Js = None, returnHist = False, debug = False, simInd = 100, ztype = 'zPHOT'):
#import iminuit as iM
#from iminuit import Minuit as M
if debug:
print "Check this"
print Rate_Model
print f_Js
print Beta
print fracCCData
print "Done Checking"
CCScales = []
CCScaleErrs = []
simIaHists = []
simCCHists = []
dataHists = []
if not(f_Js is None):
f_Js = np.array(f_Js)
allSimCC = simCat[simCat['SIM_TYPE_INDEX'].astype(int) != 1]
allSimIa = simCat[simCat['SIM_TYPE_INDEX'].astype(int) == 1]
allData = np.copy(dataCat)
#fnorm2 = float(dataCat.shape[0])/float(np.sum(simHist))
simCat = simCat[(simCat['MURES'] < MURESWindow[0]) | (simCat['MURES'] > MURESWindow[1]) ]
dataCat = dataCat[(dataCat['MURES'] < MURESWindow[0]) | (dataCat['MURES'] > MURESWindow[1]) ]
for zl, zh in zip(zbins[:-1], zbins[1:]):
tempSim = simCat[(simCat[ztype] < zh) & (simCat[ztype] > zl)]
tempData = dataCat[(dataCat[ztype] < zh) & (dataCat[ztype] > zl)]
allSimCCZbin = allSimCC[(allSimCC[ztype] < zh) & (allSimCC[ztype] > zl)]
allSimIaZbin = allSimIa[(allSimIa[ztype] < zh) & (allSimIa[ztype] > zl)]
if debug:
print "all Sim CC Zbin/IaZbin"
print allSimCCZbin.shape[0]
print allSimIaZbin.shape[0]
allDataZbin = allData[(allData[ztype] < zh) & (allData[ztype] > zl)]
tempSimCC = tempSim[tempSim['SIM_TYPE_INDEX'] != 1]
tempSimIa = tempSim[tempSim['SIM_TYPE_INDEX'] == 1]
R = float(tempData.shape[0])/float(allDataZbin.shape[0])
if debug:
print "R"
print R
print "Hist CC, outlier and total"
print tempSim.shape[0]
print allSimCCZbin.shape[0]
print "pre Beta Correction allSimIa"
print tempData.shape[0]
print allSimIaZbin.shape[0]
if Rate_Model == 'discrete':
hist, bins = np.histogram(allSimIaZbin[ztype], bins = 11)
if debug:
print 'fJ shape'
print f_Js.shape
print f_Js
print hist
print bins
betaCorrAllSimIaZbin =np.sum(hist*f_Js)
else:
betaCorrAllSimIaZbin =np.sum((1+ allSimIaZbin[ztype])**Beta)
#S = float(np.array(R*histSAllIa) - np.array(tempSimIa.shape[0]))/float(np.array(tempSimCC.shape[0]) - np.array(R*histSAllCC))
try:
if debug:
print "Test S"
print R
print betaCorrAllSimIaZbin
print tempSimIa.shape[0]
print tempSimCC.shape[0]
print allSimCCZbin.shape
print 'EEE'
print np.array(R*betaCorrAllSimIaZbin)
print 'DDD'
print np.array(tempSimIa.shape[0])
print 'CCC'
print (np.array(tempSimCC.shape[0]) - np.array(R*allSimCCZbin.shape[0]))
print "AAA"
print (np.array(R*betaCorrAllSimIaZbin) - np.array(tempSimIa.shape[0]))/(np.array(tempSimCC.shape[0]) - np.array(R*allSimCCZbin.shape[0]))
print "BBB"
#S = (np.array(R*betaCorrAllSimIaZbin) - np.array(tempSimIa.shape[0]))/(np.array(tempSimCC.shape[0]) - np.array(R*allSimCCZbin.shape[0]))
S = float(np.array(R*betaCorrAllSimIaZbin) - np.array(tempSimIa.shape[0]))/float(np.array(tempSimCC.shape[0]) - np.array(R*allSimCCZbin.shape[0]))
except:
S = np.nan
if debug:
print "S WTF"
print S
print "Uncertainty Related Bullshit"
'''
print "Delta R"
dR = np.sqrt(histD + histDAll)
print dR
num1 = np.sqrt(np.sqrt((dR/R)**2 + histSAllIa) + tempSimIa.shape[0])
num2 = np.sqrt(np.sqrt((dR/R)**2 + histSAllCC) + tempSimCC.shape[0])
den1 = (R*histSAllIa - tempSimIa.shape[0])
den2 = (tempSimCC.shape[0] - R*histSAllCC)
dS = np.sqrt((num1/den1)**2 + (num2/den2)**2)
'''
#ddnCC = np.sqrt(tempSimCC.shape[0])*(tempSimIa.shape[0] - histSAllIa*R)/(tempSimCC.shape[0] - R*histSAllCC)**2
#ddNCC = np.sqrt(histSAllCC)*R*(histSAllIa*R - tempSimIa.shape[0])/(tempSimCC.shape[0] - R*histSAllCC)**2
#ddnIa = np.sqrt(tempSimIa.shape[0])/(tempSimCC.shape[0] - R*histSAllCC)
#ddNIa = np.sqrt(histSAllIa)*R/(tempSimCC.shape[0] - R*histSAllCC)
ddnCC = np.sqrt(tempSimCC.shape[0])*(tempSimIa.shape[0] - allSimIaZbin.shape[0]*R)/(tempSimCC.shape[0] - R*allSimCCZbin.shape[0])**2
ddNCC = np.sqrt(allSimCCZbin.shape[0])*R*(allSimIaZbin.shape[0]*R - tempSimIa.shape[0])/(tempSimCC.shape[0] - R*allSimCCZbin.shape[0])**2
ddnIa = np.sqrt(tempSimIa.shape[0])/(tempSimCC.shape[0] - R*allSimCCZbin.shape[0])
ddNIa = np.sqrt(allSimIaZbin.shape[0])*R/(tempSimCC.shape[0] - R*allSimCCZbin.shape[0])
#ddR = (histSAllIa*tempSimCC.shape[0] - histSAllCC * tempSimIa.shape[0])/(tempSimCC.shape[0] - R*histSAllCC)**2
dS = np.sqrt(ddnCC**2 + ddNCC**2 + ddnIa**2 + ddNIa**2)# + ddR**2)
if debug:
print "ddnCC"
print ddnCC
print "ddNCC"
print ddNCC
print "ddnIa"
print ddnIa
print "ddNIa"
print ddNIa
#print "ddR"
#print ddR
print "Delta S"
print dS
#assert(S > 0)
if S < 0:
S = np.nan
if np.isnan(S):
print 'SCALE IS NAN'
if len(CCScales) > 0:
#CCScales.append(CCScales[-1])
CCScales.append(1.0)
else:
CCScales.append(1.0)
else:
CCScales.append(S)
if type(dS) == np.ndarray:
if np.isnan(dS[0]):
CCScaleErrs.append(1.0)
else:
CCScaleErrs.append(dS[0])
else:
if np.isnan(dS):
CCScaleErrs.append(1.0)
else:
CCScaleErrs.append(dS)
#if debug:
# print "CC PlotDebug"
# print (simBinsCC[1:] + simBinsCC[:-1])/2.0
# print simHistCC
# print CCScales[0]
# print dS
# print fnorm2
# print histD
# print (muresBins[1:] + muresBins[:-1])/2.0
#if simInd ==1:
# plt.step((simBinsCC[1:] + simBinsCC[:-1])/2.0, simHistCC*fnorm2, c = 'b', where = 'mid', label = 'prescaled Sim CC')
# plt.step((simBinsCC[1:] + simBinsCC[:-1])/2.0, CCScales[0]*simHistCC*fnorm2, c = 'g', where = 'post', label = 'postscaledSimCC')
# plt.step((muresBins[1:] + muresBins[:-1])/2.0, histD, c = 'r', where = 'mid', label = 'data')
# plt.legend()
# plt.savefig(outfilePrefix + 'ScaledHist.png')
# plt.clf()
if debug:
print "CCScaleErrs"
print CCScaleErrs
if returnHist:
return CCScales, CCScaleErrs, simIaHists, simCCHists, dataHists
return CCScales, CCScaleErrs
def applyCCScale(NCC, CCScales, CCScaleErrs, datazbins = None, zbins = None):
if not(zbins is None):
zbins = np.array(zbins)
if not (datazbins is None):
datazbins = np.array(datazbins)
if type(CCScaleErrs) == list:
CCScaleErrs = np.array(CCScaleErrs)
if type(CCScales) == list:
CCScales = np.array(CCScales)
print 'CCScaleErrs'
print CCScaleErrs
print datazbins
print zbins
if type(CCScales) == np.ndarray:
if CCScales.shape[0] == 1:
NCCScaled = CCScales[0]*NCC
else:
if (datazbins is None) | (zbins is None):
assert(0)
if CCScales.shape[0] < 4:
k = CCScales.shape[0] -1
else:
k = 3
nancond = np.isnan(CCScales)
if np.sum(nancond) > 0:
CCScales[nancond] = 1.
CCScaleErrs[nancond] = 1.
zCenters = (zbins[1:]+ zbins[:-1])/2.0
print zCenters
print CCScales
#spline = UnivariateSpline(zbins, CCScales, w = 1.0/CCScaleErrs, k = k)
spline = UnivariateSpline(zCenters, CCScales, w = 1.0/CCScaleErrs, k = k)
print datazbins.shape
print datazbins
print NCC.shape
datazcents = (datazbins[1:]+ datazbins[:-1])/2.0
NCCScaled = spline(datazcents)*NCC
elif (type(CCScales) == int) | (type(CCScales) == float):
NCCScaled = CCScales*NCC
else:
assert(0)
NCCScaled = NCCScaled.clip(0)
print NCCScaled
assert(not bool(np.sum(NCCScaled < 0)))
return NCCScaled
if __name__ == '__main__':
from sys import argv
print "argv"
print argv
datadir = argv[1]
simdir = argv[2]
dataname = argv[3]
print "dataname"
simname = argv[4]
print simname
simgenfile = argv[5]
print simgenfile
NNCut = False
cheatType = bool(int(argv[6]))
cheatZ = bool(int(argv[7]))
trueBeta = float(argv[8])
paramFile = argv[9]
cutFiles = [argv[10]]
try:
debug = bool(int(argv[11]))
except:
debug = False
#if( ('Combine' in simdir) or ('SALT2' in simdir)) & (('Combine' in datadir) or ('SALT2' in simdir)):
#NNCut = True
#NNProbCut = 0.95
#if len(argv) > 6:
# NNCut = True
# NNProbCut = 0.9
# NNData = argv[6]
# NNSim = argv[7]
#default params
zminFit = 0.1
zmaxFit = 1.2
zminSamp = 0.1
zmaxSamp = 1.2
MJDMin = 0.0
MJDMax = np.inf
bins = "equalSize"
runFit = True
fracContamCuts = [-1]
fixBeta = True
fixK = False
nbins = None
binList = None
ScaleMuResCutLow = -1
ScaleMuResCutHigh = 1
#muresBins = 1
muresBinsLow = 3
muresBinsHigh = 3
scaleZBins = [0.0, 1.2]
nScaleZBins = None
cheatCCSub = False
cheatCCScale = False
ZSysFlag = False
Blind = False
Rate_Model = 'powerlaw'
MURESCuts = 2.0 #[(0.0, 0.8, -0.5, 0.5), (0.8, 1.5, -1, 1)]
noCCMC = False
fixCCScale = False
trueMCBeta = 1.65
trueMCK = 1.97E-5
priorRate = None
priorZEff = None
ratePriorErrUp = None
ratePriorErrDown =None
ratePriorErrAll = None
priors = None
#override file
params = open(paramFile, 'r').readlines()
for p in params:
print p
exec(p)
if nScaleZBins is None :
redoScaleZBinFlag = False
else:
redoScaleZBinFlag = True
if not(priors is None):
if len(priors) == 3:
priorRate, priorZEff, ratePriorErrAll = priors
ratePriorErrUp = None
ratePriorErrDown = None
elif len(priors) == 4:
priorRate, priorZEff, ratePriorErrUp, ratePriorErrDown = priors
ratePriorErrAll =None
cosVal = 47392945716038.134971247
kmean = []
ksigma = []
kErr = []
BetaMean = []
#BetaWeightMean = []
#KWeightMean = []
BetaSigma= []
BetaErr = []
zBreakMeans = []
zBreakSigmas =[]
zBreakErrs = []
Chi2Mean = []
Chi2Sigma = []
f_JStorage = []
f_JErrStorage = []
SampleSizes = []
CCScaleStorageGlobal = []
CCScaleErrStorageGlobal = []
#MURES_Cuts = [2.0]
#MURES_Cuts = [1.0, 1.5, 2.0, 3.0, 4.0, 99.0, 2.0]
#for MURES_Cut in MURES_Cuts:
fcc = -1
for cf in cutFiles:
cuts = [] # cuts = [('FITPROB', 0.01, np.inf), ('NN_PROB_IA', NNProbCut, np.inf)]
cutlist = open(cf, 'r').readlines()
for l in cutlist:
spl = l.split()
cuts.append(('{0}'.format(spl[0]), float('{0}'.format(spl[1])), float('{0}'.format(spl[2]))))
ks = []
kErrs = []
Betas = []
BetaErrs = []
zBreaks =[]
zBreakErrs = []
Chi2s = []
CCScaleStorage = []
CCScaleErrStorage = []
nFail = 0
simLoaded = False
#print "FUCK MPI"
#if Rate_Model == 'discrete':
# subprocess.call(['python', 'constructChi2Func.py', str(nbins)], shell = False)
#print "MPI Fucked"
if '{' in datadir:
if os.path.exists(datadir.format(98)):
print "MOAR SIMS"
nfile = 101
else:
print "FEWAR SIMS"
nfile = 49
else:
nfile = 2
for simInd in range(1,nfile):
#print "Sim {0}".format(simInd)
#SimBeta = 2.1 # simdir.split('_')[-3]
#SimR0 = 1.7*10**-5 #simdir.split('_')[-5]
#print "Sim R0 = {1}; Sim Beta = {0}".format(SimBeta, SimR0)
print datadir.format(simInd)
if simLoaded:
try:
RateTest.newData(datadir.format(simInd), dataname.format(simInd), simInd =simInd)
if ZSysFlag:
assert(0)
RateTest.zSystematic(nbins = nbins, binList = binList)
if redoScaleZBinFlag:
RealCat = RateTest.postCutRealCat
RealOutlierCat = RealCat[(RealCat['MURES'] > muresBinsHigh)| (RealCat['MURES'] < muresBinsLow)]
zArray =RealOutlierCat[RateTest.ztype]
zArray.sort()
splitZs = np.array_split(zArray, nScaleZBins)
#[(0[0], (0[-1] + 1[0]), (1[-1] + 2[0]), 2[1]]
scaleZBins = [splitZs[0][0]]
for i in range(1,nScaleZBins):
scaleZBins.append((splitZs[i-1][-1] + splitZs[i][0] )/2.0)
scaleZBins.append(splitZs[i][-1])
#RateTest.effCalc(nbins = nbins, fracContamCut = fcc, simInd =simInd)
#RateTest.effCalc(nbins = 20)
BetaIter = []
BetaErrIter = []
CCIter = []
CCErrIter = []
RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, simInd =simInd, trueBeta = trueBeta - trueMCBeta, CCScale = 1.0, TrueCCScale = TrueCCScale, scaleZBins = scaleZBins, Blind = Blind)
if Rate_Model != 'discrete':
if Blind:
print "Blinding A"
BetaIter.append(RateTest.Beta+ np.cos(cosVal))
else:
BetaIter.append(RateTest.Beta)
BetaErrIter.append(RateTest.BetaErr)
for iteration in range(nIter):
if not fixCCScale:
if not noCCMC:
CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname,Rate_Model = Rate_Model, f_Js =RateTest.fJList, simInd = simInd, debug = debug, ztype = RateTest.ztype)
CCIter.append(CCScale)
CCErrIter.append(CCScaleErr)
RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = CCScale, CCScaleErr = CCScaleErr, TrueCCScale = TrueCCScale, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, f_Js =RateTest.fJList, CCZbins = scaleZBins , scaleZBins = scaleZBins, Blind = Blind)
else:
CCIter.append(0.0)
CCErrIter.append(0.0)
RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = 0.0, CCScaleErr = 1.0, TrueCCScale = 0.0, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, f_Js =RateTest.fJList, CCZbins = scaleZBins , scaleZBins = scaleZBins, Blind = Blind)
else:
CCIter.append(1.0)
CCErrIter.append(0.0)
RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = 1.0, CCScaleErr = 1.0, TrueCCScale = 0.0, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, f_Js =RateTest.fJList, CCZbins = scaleZBins , scaleZBins = scaleZBins, Blind = Blind)
if Blind:
print "Blinding b"
BetaIter.append(RateTest.Beta+ np.cos(cosVal))
else:
BetaIter.append(RateTest.Beta)
BetaErrIter.append(RateTest.BetaErr)
if not fixCCScale:
if not noCCMC:
CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname,Rate_Model = Rate_Model, f_Js =RateTest.fJList, simInd = simInd, debug = debug, ztype = RateTest.ztype)
CCIter.append(CCScale)
CCErrIter.append(CCScaleErr)
else:
CCIter.append(1.0)
CCErrIter.append(0.0)
print "CCScale Progression"
print CCIter
print "CCScale Err Progression"
print CCErrIter
if Rate_Model != 'discrete':
print "Beta Progression"
print BetaIter
print "Beta Err Progressions"
print BetaErrIter
print "Mean Betas"
print np.nanmean(BetaIter)
print "Mean CCScales"
print np.nanmean(CCIter)
else:
f_JStorage.append(RateTest.fJList)
f_JErrStorage.append(RateTest.fJErrList)
#print "AAA CC Scales"
if not fixCCScale:
if not noCCMC:
CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname, Rate_Model = Rate_Model, f_Js =RateTest.fJList, simInd = simInd, debug = debug, ztype = RateTest.ztype)
print CCScale
CCScaleStorage.append(CCScale)
CCScaleErrStorage.append(CCScaleErr)
else:
CCScaleStorage.append(0.0)
CCScaleErrStorage.append(1.0)
else:
CCScaleStorage.append(1.0)
CCScaleErrStorage.append(1.0)
ks.append(RateTest.k)
kErrs.append(RateTest.kErr)
if Blind:
print "Blinding c"
Betas.append(RateTest.Beta+ np.cos(cosVal))
else:
Betas.append(RateTest.Beta)
BetaErrs.append(RateTest.BetaErr)
if Rate_Model == 'brokenpowerlawVar':
zBreaks.append(Rate_Fitter.zBreak)
zBreakErrs.append(Rate_Fitter.zBreakErr)
Chi2s.append(RateTest.chi2)
print "CCScale Storage Iter {0}".format(simInd)
print CCScaleStorage
if not noCCMC:
print CCScale
print CCScale[0]
dnamestr = datadir.format(simInd)
cutdnamestr = dnamestr.split('.')[0] + '+CUTS.FITRES.gz'
#if saveCuts:
# np.savetxt(cutdnamestr, RateTest.realcat.Catalog, delimiter = ' ', fmt='%s')
lowzCut = zminFit
highzCut = zmaxFit
SampleSizes.append( RateTest.realcat.Catalog[(RateTest.realcat.Catalog[RateTest.ztype] < zmaxFit) & (RateTest.realcat.Catalog[RateTest.ztype] > zminFit)].shape[0])
if saveCuts:
np.savetxt(cutdnamestr, RateTest.realcat.Catalog[(RateTest.realcat.Catalog[RateTest.ztype] < zmaxFit) & (RateTest.realcat.Catalog[RateTest.ztype] > zminFit)], delimiter = ' ', fmt='%s')
#with open(cutdnamestr, 'rb') as f_in:
# with gzip.open(cutdnamestr + '.gz', 'wb') as f_out:
# shutil.copyfileobj(f_in, f_out)
except Exception, e:
print "FAILURE"
print e
traceback.print_exc()
nFail +=1
else:
try:
RateTest = Rate_Fitter(datadir.format(simInd), dataname.format(simInd), simdir, simname,simgenfile, trueMCBeta, trueMCK, zminSamp =zminSamp, zmaxSamp =zmaxSamp, zminFit =zminFit, zmaxFit =zmaxFit, cheatZ = cheatZ, cheatType = cheatType, cuts = cuts, cheatCCSub = cheatCCSub, cheatCCScale = cheatCCScale, Rate_Model = Rate_Model, MURESCuts = MURESCuts, noCCMC = noCCMC, priorRate = priorRate, priorZEff = priorZEff, ratePriorErrUp = ratePriorErrUp, ratePriorErrDown =ratePriorErrDown, ratePriorErrAll = ratePriorErrAll)# , MJDMin = 0, MJDMax = np.inf)
if ZSysFlag:
RateTest.zSystematic(nbins = nbins, binList = binList)
simLoaded = True
RateTest.effCalc(nbinsSamp = nbinsSamp,nbinsFit = nbinsFit, fracContamCut = fcc)
#RateTest.effCalc(nbins = 20)
BetaIter = []
BetaErrIter = []
CCIter = []
CCErrIter = []
if redoScaleZBinFlag:
RealCat = RateTest.postCutRealCat
RealOutlierCat = RealCat[(RealCat['MURES'] > muresBinsHigh)| (RealCat['MURES'] < muresBinsLow)]
zArray =RealOutlierCat[RateTest.ztype]
zArray.sort()
print 'zArray'
print zArray
print 'nScaleZBins'
print nScaleZBins
splitZs = np.array_split(zArray, nScaleZBins)
#[(0[0], (0[-1] + 1[0]), (1[-1] + 2[0]), 2[1]]
scaleZBins = [splitZs[0][0]]
for i in range(1,nScaleZBins):
scaleZBins.append((splitZs[i-1][-1] + splitZs[i][0] )/2.0)
scaleZBins.append(splitZs[i][-1])
RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, simInd =simInd, trueBeta = trueBeta - trueMCBeta, CCScale = 1.0, TrueCCScale = TrueCCScale, scaleZBins = scaleZBins, Blind = Blind)
if Rate_Model != 'discrete':
if Blind:
print "Blinding d"
BetaIter.append(RateTest.Beta+ np.cos(cosVal))
else:
BetaIter.append(RateTest.Beta)
BetaErrIter.append(RateTest.BetaErr)
for iteration in range(nIter):
print "interation Number"
print iteration
if not fixCCScale:
if not noCCMC:
CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname, Rate_Model = Rate_Model, f_Js =RateTest.fJList, simInd = simInd, debug = debug, ztype = RateTest.ztype)
CCIter.append(CCScale)
CCErrIter.append(CCScaleErr)
RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = CCScale, CCScaleErr = CCScaleErr, TrueCCScale = TrueCCScale, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, CCZbins = scaleZBins, scaleZBins = scaleZBins, Blind = Blind)
else:
CCIter.append(0.0)
CCErrIter.append(1.0)
RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = 0.0, CCScaleErr = 1.0, TrueCCScale = 0.0, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, CCZbins = scaleZBins, scaleZBins = scaleZBins, Blind = Blind)
else:
CCIter.append(1.0)
CCErrIter.append(1.0)
RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = 1.0, CCScaleErr = 1.0, TrueCCScale = 0.0, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, CCZbins = scaleZBins, scaleZBins = scaleZBins, Blind = Blind)
if Rate_Model != 'discrete':
if Blind:
print "Blinding e"
BetaIter.append(RateTest.Beta+ np.cos(cosVal))
else:
BetaIter.append(RateTest.Beta)
BetaErrIter.append(RateTest.BetaErr)
if not fixCCScale:
if not noCCMC:
CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname, Rate_Model = Rate_Model, f_Js =RateTest.fJList, simInd = simInd, debug = debug, ztype = RateTest.ztype)
CCIter.append(CCScale)
CCErrIter.append(CCScaleErr)
if Rate_Model != 'discrete':
print "Beta Progression"
print BetaIter
print "Beta Err Progressions"
print BetaErrIter
print "Mean Betas"
print np.nanmean(BetaIter)
else:
f_JStorage.append(RateTest.fJList)
f_JErrStorage.append(RateTest.fJErrList)
print "CCScale Progression"
print CCIter
print "CCScale Err Progression"
print CCErrIter
print "Mean CCScales"
print np.nanmean(CCIter)
if not fixCCScale:
if not noCCMC:
print "AAA CC Scales"
CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname, f_Js =RateTest.fJList, Rate_Model = Rate_Model, simInd = simInd, debug = debug, ztype = RateTest.ztype)
print 'CC Scale'
print CCScale
CCScaleStorage.append(CCScale)
CCScaleErrStorage.append(CCScaleErr)
else:
CCScaleStorage.append(0.0)
CCScaleErrStorage.append(1.0)
else:
CCScaleStorage.append(1.0)
CCScaleErrStorage.append(1.0)
dnamestr = datadir.format(simInd)
cutdnamestr = dnamestr.split('.')[0] + '+CUTS.FITRES.gz'
np.savetxt(cutdnamestr, RateTest.realcat.Catalog, delimiter = ' ', fmt='%s')
#with open(cutdnamestr, 'rb') as f_in:
# with gzip.open(cutdnamestr + '.gz', 'wb') as f_out:
# shutil.copyfileobj(f_in, f_out)
cutsnamestr = simname.split('.')[0] + '+CUTS.FITRES.gz'
np.savetxt(cutsnamestr, RateTest.realcat.Catalog[(RateTest.realcat.Catalog[RateTest.ztype] < zmaxFit) & (RateTest.realcat.Catalog[RateTest.ztype] > zminFit)], delimiter = ' ', fmt = '%s')
lowzCut = zminFit
highzCut = zmaxFit
SampleSizes.append( RateTest.realcat.Catalog[(RateTest.realcat.Catalog[RateTest.ztype] < zmaxFit) & (RateTest.realcat.Catalog[RateTest.ztype] > zminFit)].shape[0])
#with open(cutsnamestr, 'rb') as f_in:
# with gzip.open(cutsnamestr + '.gz', 'wb') as f_out:
# shutil.copyfileobj(f_in, f_out)
ks.append(RateTest.k)
kErrs.append(RateTest.kErr)
if Rate_Model != 'discrete':
if Blind:
print "Blinding f"
Betas.append(RateTest.Beta+ np.cos(cosVal))
else:
Betas.append(RateTest.Beta)
BetaErrs.append(RateTest.BetaErr)
if Rate_Model == 'brokenpowerlawVar':
zBreaks.append(Rate_Fitter.zBreak)
zBreakErrs.append(Rate_Fitter.zBreakErr)
Chi2s.append(RateTest.chi2)
print "CCScale Storage Iter {0}".format(simInd)
print CCScaleStorage
if not noCCMC:
print CCScale
print CCScale[0]
if Rate_Model != 'discrete':
if np.isnan(RateTest.Beta):
nFail +=1
except Exception, e:
print "FAILURE"
print e
traceback.print_exc()
nFail +=1
#if Blind:
# Betas = np.array(Betas) + np.cos(47392945716038.134971247)
print "Number of Failures"
print nFail
if Rate_Model != 'discrete':
badSims = np.invert(np.isfinite(Betas) & (BetaErrs > 0) & np.isfinite(ks) & (kErrs > 0))
mBetas = ma.masked_array(Betas, mask=badSims)
mBetaErrs = ma.masked_array(BetaErrs, mask=badSims)
mks = ma.masked_array(ks, mask=badSims)
mkErrs = ma.masked_array(kErrs, mask=badSims)
print "mean k"
print np.nanmean(ks)
print "mean kerrs"
print np.nanmean(kErrs)
print "std. k"
print np.nanstd(ks)
print "Mean beta"
print np.nanmean(Betas)
print "Mean betaerrs"
print np.nanmean(BetaErrs)
print "std. beta"
print np.nanstd(Betas)
if len(Betas) == 1:
kmean.append(ks[0])
ksigma.append(0.0)
kErr.append(kErrs[0])
BetaMean.append(Betas[0])
BetaSigma.append(0.0)
BetaErr.append(BetaErrs[0])
else:
print "test here"
print ks
print mks
print Betas
print mBetas
print 'end test here'
kmean.append(np.average(mks, weights = 1.0/mkErrs**2))
ksigma.append(np.std(mks))
kErr.append(np.mean(mkErrs))
BetaMean.append(np.average(mBetas, weights = 1.0/mBetaErrs**2))
#BetaWeightMean.append(np.average(Betas, weights = 1.0/ma.masked_invalid(BetaErrs)**2))
#KWeightMean.append(np.average(ks, weights = 1.0/ma.masked_invalid(kErrs)**2))
BetaSigma.append(np.std(mBetas))
BetaErr.append(np.mean(mBetaErrs))
else:
print "mean f_Js"
print np.nanmean(f_JStorage, axis =0)
print "mean f_JErrs"
print np.nanmean(f_JErrStorage, axis =0)
if Rate_Model == 'brokenpowerlawVar':
zBreakMeans.append(np.nanmean(zBreaks))
zBreakSigmas.append(np.nanstd(zBreaks))
Chi2Mean.append(np.nanmean(Chi2s))
Chi2Sigma.append(np.nanstd(Chi2s))
#if simInd == 1:
print "Indiv Chi2s"
print Chi2s
bins0 = np.linspace(1.0, 20.0, 10)
hist, bins = np.histogram(Chi2s, bins = bins0)
xs = (bins[1:] + bins[:-1])/2.0
plt.bar(xs, hist, width = bins[1:] - bins[:-1])
print "Chi2 Hist"
print bins
print hist
chi2s = scipy.stats.chi2.pdf(xs, nbinsFit - 2)
norm = np.max(hist)*1.0/np.max(chi2s)
plt.plot(xs, chi2s*norm, color = 'g')
if cheatType and not cheatZ:
plt.savefig(dataname +'Chi2Plot_CheatType.png')
elif cheatZ and not cheatType:
plt.savefig(dataname +'Chi2Plot_CheatZ.png')
elif cheatZ and cheatType:
plt.savefig(dataname +'Chi2Plot_CheatTypeZ.png')
else:
plt.savefig(dataname +'Chi2Plot.png')
if not noCCMC:
print "AAA CC Scale means (weighted, unweighted)"
#print np.average(ma.masked_invalid(np.array(CCScaleStorage)),weights = 1.0/ma.masked_invalid(CCScaleErrStorage)**2, axis = 0)
#print np.nanmean(ma.masked_invalid(np.array(CCScaleStorage)), axis = 0)
#print CCScaleStorage
#print CCScaleErrStorage
print np.average(np.array(CCScaleStorage),weights = 1.0/ma.masked_invalid(CCScaleErrStorage)**2, axis = 0)
print np.nanmean(np.array(CCScaleStorage), axis = 0)
print "AAA CC Scale stds"
print np.nanstd(np.array(CCScaleStorage), axis = 0)
CCScaleStorageGlobal.append(CCScaleStorage)
print "All Betas"
print Betas
if cheatType:
print "THESE RESULTS ONLY INCLUDE TRUE Ias BECAUSE WE CHEATED AND USED THE SIM INFORMATION"
if cheatZ:
print "THESE RESULTS Use Simulated Redshift info"
'''
print "lengths of lists"
print len(RateTest.globalNDataStorage)
print len(RateTest.globalChi2Storage)
print len(RateTest.globalZPhotBinStorage)
print len(RateTest.globalNDataIaPhotBinStorage)
plt.clf()
plt.scatter(RateTest.globalNDataStorage, RateTest.globalChi2Storage)
plt.xlabel('nData')
plt.ylabel('chi2 in bin')
string = ''
if cheatType: string += 'CheatType'
if cheatZ: string += 'CheatZ'
print 'string here'
print string
plt.savefig(RateTest.realName + 'Chi2VsnData' + string +'.png')
plt.clf()
plt.scatter(RateTest.globalZPhotBinStorage, RateTest.globalChi2Storage)
plt.xlabel('zPhot bin center')
plt.ylabel('chi2 in bin')
plt.savefig(RateTest.realName + 'Chi2VsZPhot' + string +'.png')
plt.clf()
plt.clf()
plt.scatter(RateTest.globalZPhotBinStorage, RateTest.globalNDataIaPhotBinStorage, s = 1, c = 'r', label = 'Type Ia Data, zPhot')
plt.scatter(RateTest.globalZPhotBinStorage, RateTest.globalNDataCCPhotBinStorage, s = 1, c = 'b', label = 'CC Data, zPhot')
plt.scatter(RateTest.globalZTrueBinStorage, RateTest.globalNDataIaTrueBinStorage, s = 1, c = 'Pink', label = 'Type Ia Data, zTrue')
plt.scatter(RateTest.globalZTrueBinStorage, RateTest.globalNDataCCTrueBinStorage, s = 1, c = 'Cyan', label = 'CC Data, zTrue')
plt.yscale('log')
plt.xlabel('redshift either true or phot')
plt.legend()
plt.savefig(RateTest.realName + 'AggregateZDistro' + string +'.png')
'''
#print "MURES CUTS"
#print MURES_Cuts
print "Frac Contam Cuts"
print fracContamCuts
if Rate_Model != 'discrete':
print "Kmeans"
print kmean
print "Ksigmas"
print ksigma
print "BetaMeans"
print BetaMean
print "BetaSigmas"
print BetaSigma
print "BetaErrs"
print BetaErr
else:
print "f_J mean unweighted"
print np.mean(f_JStorage, axis = 0)
print "f_J mean weighted"
print np.average(f_JStorage, weights = 1.0/(np.array(f_JErrStorage))**2, axis = 0)
print "f_J Errors"
print np.mean(f_JErrStorage, axis = 0)
if Rate_Model == 'brokenpowerlawVar':
print "mean powerlaw break z"
print zBreakMeans
print "st. dev powerlaw break z"
print zBreakSigmas
print "Chi2Means"
print Chi2Mean
print "Chi2Sigma"
print Chi2Sigma
assert(fracContamCuts[0] == -1)
outfile = dataname
if Rate_Model != 'discrete':
print "outfile Pre Prefix"
print outfile
if cheatType:
outfile = outfile + '_CheatType'
if cheatZ:
outfile = outfile + 'Z'
elif cheatZ:
outfile = outfile + '_CheatZ'
outfile1 = outfile + '.txt'
outfile2 = outfile + '-IndivBetaK.txt'
output2 = open(outfile2, 'w')
output2.write('i Beta_i k_i BetaErr_i kErr_i\n')
for i, b, k, berr, kerr in zip(range(len(Betas)),Betas, ks, BetaErrs, kErrs):
output2.write('{0} {1:.4f} {2:.4f} {3:.4f} {4:.4f}\n'.format(i, b, k, berr, kerr))
output2.close()
print "Outfile Name"
if not(os.path.isfile(outfile1)):
output = open(outfile1, 'w')
output.write('#Date Date/time at which job finished\n')
output.write('#DataBeta Input beta for the simulated data sample. Will be 0.0 for real data.\n')
output.write('#N_sims Number of datalike sims that go into the subsequent means\n')
output.write('#SampleSize Mean Number of Events in data post cut\n')
output.write('#delta_Beta mean difference between large MC sim beta (2.11 for the time being) and the measured beta for the data (not the beta in column 2.\n')
output.write('#sigma_Beta stdev of delta_Beta over N_sims sims\n')
output.write('#BetaStdErr std. error in the mean of delta_Beta over N_sims sims\n')
output.write('#Beta_err mean statistical error on beta\n')
output.write('#K mean ratio between large MC sim K (1.7E-5 for the time being) and the measured K for the data \n')
output.write('#sigma_K stdev of K over N_sims sims\n')
output.write('#KStdErr std. error in the mean of K over N_sims sims\n')
output.write('#KStaterr mean statistical error on K\n')
output.write('#meanZ mean photoZ of the large MC sim\n')
output.write('#sigmaZ std. deviation of the photoZs for the large Sim\n')
output.write('#sigmaDZ std. deviation of (zSim - zPHOT)\n')
output.write('#NCC/NTotScaled overall CC Contamination after adjusting CC Frac to data\n')
output.write('#NCC/NTot overall CC Contamination in sim only\n')
output.write('#CCScales relative sim vs. CC rate in z-bins \n')
output.write('#TypeChoice Internal Diagnostic, check code comments\n')
output.write('#NNProbCut Threshold for NN probability of Ia\n')
output.write('#NBins Number of Analysis Bins\n')
output.write('#MRSLow Threshold for Neg Mures Outliers\n')
output.write('#MRSHigh Threshold for Pos Mures Outliers\n')
output.write('#FitprobCut Lowest Fitprob in sim\n')
output.write('#MRSCut NSigma Hubble residual cut\n')
output.write('#Chi2 minimum value of Chi2 function\n')
output.write('#Correlation cov[0,1]/np.sqrt(cov[0,0]*cov[1,1])\n')
output.write('#Date \t\tDataBeta N_sims SampleSize delta_Beta sigma_Beta BetaStdErr BetaStatErr K sigma_K KStdErr KStatErr meanZ sigmaZ sigmaDz NCC/NTotScaled NCC/NTot CCScales TypeChoice NNProbCut NBins MRSLow MRSHigh FitprobCut MRSCut Chi2 Correlation\n')
else:
output = open(outfile1, 'a')
print 'outfile'
print outfile
cat = RateTest.simcat.Catalog
t = time.strftime('%b-%d-%H:%M')
N_Sims = np.sum(np.invert(np.isnan(ks)))
SigBeta = float(BetaSigma[0])
SigK = float(ksigma[0])
kStdErr = float(ksigma[0])/np.sqrt(N_Sims)
BetaStdErr = float(BetaSigma[0])/np.sqrt(N_Sims)
meanZ = np.nanmean(cat[RateTest.ztype])
sigZ = np.nanstd(cat[RateTest.ztype])
sigDZ = np.nanstd(cat[RateTest.ztype] - cat['SIM_ZCMB'])
lowzCut = zminFit
highzCut = zmaxFit
contam2 = np.sum(cat[(cat[RateTest.ztype] > lowzCut) & (cat[RateTest.ztype] < highzCut)]['SIM_TYPE_INDEX'] !=1).astype(float)/ float(cat[(cat[RateTest.ztype] > lowzCut) & (cat[RateTest.ztype] < highzCut)].shape[0])
contam = RateTest.fracCCDataTot
ccscales = np.average(np.array(CCScaleStorage),weights = 1.0/ma.masked_invalid(CCScaleErrStorage)**2, axis = 0)
cov = RateTest.covar
correlation = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
print "Outfile debug"
print t
print trueBeta
print N_Sims
print BetaMean[0]
print BetaStdErr
print BetaErrs[0]
print meanZ
print sigZ
print sigDZ
print contam
print RateTest.typeString
print RateTest.postCutSimCat['NN_PROB_IA'].min()
print SigBeta
print kmean[0]
print kErrs[0]
print kStdErr
print SigK
print np.nanmean(SampleSizes)
print int(nbinsFit)
print ScaleMuResCutLow
print ScaleMuResCutHigh
print RateTest.postCutSimCat['FITPROB'].min()
print MURESCuts
print np.mean(Chi2Mean)
print contam2
print ccscales
print correlation
ccscales = ','.join(str(ccscales).split())
output.write('{0}\t\t{1:.2f}\t{2}\t{17:.3f}\t{3:.3f}\t{12:.3f}\t{4:.3f}\t{5:.3f}\t{13:.3f}\t{14:.3f}\t{15:.3f}\t{16:.3f}\t{6:.3f}\t{7:.3f}\t{8:.3f}\t{9:.3f}\t{24:.3f}\t{25}\t{10}\t{11:.3f}\t{18:d}\t{19:.3f}\t{20:.3f}\t{21:.3f}\t{22:.2f}\t{23:.3f}\t{26:.3f}\n'.format(t, trueBeta, N_Sims, BetaMean[0], BetaStdErr, BetaErrs[0],meanZ, sigZ, sigDZ, contam, RateTest.typeString, RateTest.postCutSimCat['NN_PROB_IA'].min(), SigBeta, kmean[0], kErrs[0], kStdErr, SigK, np.nanmean(SampleSizes), int(nbinsFit), ScaleMuResCutLow, ScaleMuResCutHigh, RateTest.postCutSimCat['FITPROB'].min(), MURESCuts, np.mean(Chi2Mean), contam2, ccscales, correlation) )
print "BetaMean[0]"
print BetaMean[0]
print BetaMean
print "KMean[0]"
print kmean[0]
print kmean
print "Correlation"
print correlation
#print "BetaWeightMean[0]"
#print BetaWeightMean[0]
#print BetaWeightMean
#print "KWeightMean[0]"
#print KWeightMean[0]
#print KWeightMean
if not noCCMC:
print "Individual Scales"
print CCScaleStorage
print "Individual ScaleErrs"
print CCScaleErrStorage
print "average ScaleErrs"
print np.nanmean(CCScaleErrStorage)
print "AAA CC Scale means (weighted, unweighted)2"
print np.average(ma.masked_invalid(np.array(CCScaleStorage)), weights = 1.0/ma.masked_invalid(CCScaleErrStorage)**2)
print np.nanmean(ma.masked_invalid(np.array(CCScaleStorage)))
print "AAA CC Scale stds"
print np.nanstd(np.array(CCScaleStorage))
if simInd == 1:
plt.clf()
hist, bins = np.histogram(CCScaleStorage, bins = np.linspace(0.0, 5.0, 10))
plt.step((bins[1:]+bins[:-1])/2.0, hist, where = 'mid', c = 'g')
plt.savefig(dataname + 'ScaleDistro.png')
plt.clf()
print "nIter"
print nIter
if not (priorRate is None):
kPriorPlots = np.linspace(0.8, 1.5, 300)
kPriors = []
for ktemp in kPriorPlots:
kPriors.append(ratePrior(ktemp*trueMCK, BetaMean[0]*trueMCBeta, priorRate, priorZEff, priorRateErrUp = ratePriorErrUp, priorRateErrDown = ratePriorErrDown, priorRateErrAll = ratePriorErrAll))
betaPriorPlots = np.linspace(-0.5, 0.5, 300)
betaPriors = []
for btemp in betaPriorPlots:
betaPriors.append(ratePrior(kmean[0]*trueMCK, b*trueMCBeta, priorRate, priorZEff, priorRateErrUp = ratePriorErrUp, priorRateErrDown = ratePriorErrDown, priorRateErrAll = ratePriorErrAll))
actualPrior = ratePrior(kmean[0]*trueMCK, BetaMean[0]*trueMCBeta, priorRate, priorZEff, priorRateErrUp = ratePriorErrUp, priorRateErrDown = ratePriorErrDown, priorRateErrAll = ratePriorErrAll)
kPriors = np.array(kPriors)
betaPriors = np.array(betaPriors)
plt.clf()
plt.figure()
plt.plot(kPriorPlots, np.log10(kPriors) )
plt.hlines(np.log10(actualPrior), kPriorPlots[0], kPriorPlots[-1], label = 'Best Fit Prior = {0:.03f}'.format(actualPrior))
plt.vlines(kmean[0], np.log10(kPriors).min(), np.log10(kPriors).max(), label = 'Best Fit K = {0:.03f}'.format(kmean[0]))
plt.xlabel('k')
plt.ylabel('ratePrior')
plt.legend()
plt.savefig(dataname + '_LogKPriorPlot.png')
plt.clf()
plt.figure()
plt.plot(kPriorPlots, kPriors)
plt.hlines(actualPrior, kPriorPlots[0], kPriorPlots[-1], label = 'Best Fit Prior = {0:.03f}'.format(actualPrior))
plt.vlines(kmean[0], kPriors.min(), kPriors.max(), label = 'Best Fit K = {0:.03f}'.format(kmean[0]))
plt.xlabel('k')
plt.ylabel('ratePrior')
plt.legend()
plt.savefig(dataname + '_KPriorPlot.png')
plt.clf()
plt.figure()
plt.plot(betaPriorPlots, betaPriors)
plt.hlines(actualPrior, betaPriorPlots[0], betaPriorPlots[-1], label = 'Best Fit Prior = {0:.03f}'.format(actualPrior))
plt.vlines(BetaMean[0], betaPriors.min(), betaPriors.max(), label = 'Best Fit Beta = {0:.03f}'.format(BetaMean[0]))
plt.xlabel('beta')
plt.ylabel('ratePrior')
plt.legend()
plt.savefig(dataname + '_BetaPriorPlot.png')
'''
argList = ''
minObjList = ''
chi2Initargs = ''
for i in xrange(zCenters.shape[0]):
argList += 'f{0},'.format(i)
minObjList += 'f{0} = 1.0, error_f{0} = 0.1, limit_f{0} = (0.0, None),'.format(i)
chi2Initargs += '1.0,'
argList = argList[:-1]
minObjList = minObjList[:-1]
chi2Initargs = chi2Initargs[:-1]
#print argList
#print minObjList
#print chi2Initargs
exec('''
'''
def chi2func(nData, nSim, effmat, fnorm, zCenters, {0}, dump = False, complexdump = False):
Chi2Temp = 0.0
f_Js = [{0}]
chi2Mat = np.zeros((self.nbins))
adjNMC = np.zeros((self.nbins))
#print f_Js
#Check if I am scaling errors down with increasing MC size. Make MC twice as large as "Data" to test.
for row, nDataI, i in zip(effmat, nData, xrange(self.nbins)):
#if dump:
# print "nDataI"
# print nDataI
JSumTemp = 0.0
for eff, nSimJ, f_J, j in zip(row, nSim, f_Js, xrange(self.nbins)):
JSumTemp += nSimJ*f_J*eff*fnorm
if dump and i == j:
print "nDataI"
print nDataI
print "Bin Contribution to scaled nSim"
print nSimJ*f_J*eff*fnorm
#print "Product of nSimJ, f_J, eff, fnorm"
#print nSimJ
#print f_J
#print eff
#print fnorm
if nDataI > 1E-11 or JSumTemp > 1E-11:
if dump and i == j:
print "nDataI"
print nDataI
print "scaled nSim"
print JSumTemp
print "fnorm"
print fnorm
print "error"
print nDataI + JSumTemp*fnorm
if (nDataI + JSumTemp*fnorm) <= 0:
print (nDataI + JSumTemp*fnorm)
assert(0)
Chi2Temp += ((nDataI - JSumTemp)**2/(nDataI + JSumTemp*fnorm))#*fnorm**2
return Chi2Temp
''''''.format(argList), locals())
fnorm = float(np.sum(nData))/float(self.simcat.Catalog['zPHOT'].shape[0])
#print type(chi2func)
#print 'lamChi2 = lambda {0}: chi2func(nData, nSim, self.effmat, fnorm, zCenters, {0})'.format(argList)
exec('lamChi2 = lambda {0}: chi2func(nData, nSim, self.effmat, fnorm, zCenters, {0})'.format(argList),locals())
exec('lamChi2Dump = lambda {0}: chi2func(nData, nSim, self.effmat, fnorm, zCenters, {0}, dump = True)'.format(argList),locals())
#print type(lamChi2)
#print type(lamChi2Dump)
#print 'MinObj = M(lamChi2, {0})'.format(minObjList)
exec('MinObj = M(lamChi2, {0})'.format(minObjList),locals())
exec('chi2Init = lamChi2Dump({0})'.format(chi2Initargs),locals())
#print "Chi2 init = {0}".format(round(chi2Init, 4))
MinObj.set_strategy(2)
MinObj.migrad()
#MinObj.minos()
zCenters = (simBins[1:] + simBins[:-1])/2.0
print MinObj.values
fJs = []
fJErrs = []
for v in MinObj.values.keys():
fJs.append(MinObj.values[v])
fJErrs.append(MinObj.errors[v])
exec('lamChi22 = lambda k, Beta: self.chi2V2(fJs, fJErrs, zCenters, k, Beta)',locals())
exec('MinObj2 = M(lamChi22, k = 1.0, error_k = 0.1, limit_k = (0.0, None), Beta = 0.0, error_Beta = 0.1)',locals())
#print "Large Perfect Sim {0}".format(simInd)
#print "Sim R0 = 1.7E-5; Sim Beta = 4.2"
##print "Sim Beta = 1.5; Data Beta = 1.5"
##RateTest = Rate_Fitter('DES_FULLSURVEY_TEST/JLDESFULLSURVEYIaOnly+zPHOT+smearC11/FITOPT000+SALT2mu.FITRES', 'JLDESFULLSURVEYIaOnly+zPHOT+smearC11','JLDES_R0_7E-5_Beta_1-5_Shallow/JLDES_R0_7E-5_Beta_1-5_Shallow/FITOPT000+SALT2mu.FITRES', 'JLDES_R0_7E-5_Beta_1-5_Shallow','/project/rkessler/SN/SNDATA_ROOT/SIM/JLDES_R0_7E-5_Beta_1-5_Shallow/JLDES_R0_7E-5_Beta_1-5_Shallow.DUMP')
#print '/project/rkessler/jlasker/Rate_Analysis/TestSameK2Beta/outFit_datasize/JLDES_R0_1-7E-5_Beta_4-2_Datasize_Perfect-00{0:02d}/FITOPT000.FITRES'.format(simInd)
#RateTest = Rate_Fitter('/project/rkessler/jlasker/Rate_Analysis/TestSameK2Beta/outFit_datasize/JLDES_R0_1-7E-5_Beta_4-2_Datasize_Perfect-00{0:02d}/FITOPT000.FITRES'.format(simInd), 'TestSameK2Beta/JLDES_R0_1-7E-5_Beta_4-2-00{0:02d}'.format(simInd),'/project/rkessler/jlasker/Rate_Analysis/outFit_datalike/JLDES_R0_1-7E-5_Beta_2-1_Datalike_PERFECT/FITOPT000.FITRES', 'JLDES_R0_1-7E-5_Beta_2-1_DataLikePhotZ','/scratch/midway2/rkessler/SNDATA_ROOT/SIM/JLDES_R0_1-7E-5_Beta_2-1_Datalike_PERFECT/JLDES_R0_1-7E-5_Beta_2-1_Datalike_PERFECT.DUMP', 2.1, zmin = 0.1, zmax = 1.3)# , MJDMin = 0, MJDMax = np.inf)
#RateTest.effCalc(nbins = 12)
##RateTest.effCalc(nbins = 20)
#RateTest.fit_rate()
#ksPerf.append(RateTest.k)
#kErrsPerf.append(RateTest.kErr)
#BetasPerf.append(RateTest.Beta)
#BetaErrsPerf.append(RateTest.BetaErr)
#print "Sim Beta = 1.5; Data Beta = 1.5"
#RateTest = Rate_Fitter('DES_FULLSURVEY_TEST/JLDESFULLSURVEYIaOnly+zPHOT+smearC11/FITOPT000+SALT2mu.FITRES', 'JLDESFULLSURVEYIaOnly+zPHOT+smearC11','JLDES_R0_7E-5_Beta_1-5_Shallow/JLDES_R0_7E-5_Beta_1-5_Shallow/FITOPT000+SALT2mu.FITRES', 'JLDES_R0_7E-5_Beta_1-5_Shallow','/project/rkessler/SN/SNDATA_ROOT/SIM/JLDES_R0_7E-5_Beta_1-5_Shallow/JLDES_R0_7E-5_Beta_1-5_Shallow.DUMP')
try:
optfname = argv[1]
opts = open(optfname, 'r')
optlist = opts.readlines()
zmin = None; zmax = None; MJDMin = None; MJDMax = None; bins = None; runFit = None
for opt in optlist:
try:
optName, optVal = opt.split()
except:
print "{0} not formatted correctly".format(opt)
continue
if (optName.lower() == 'zmin') & (not zmin): zmin = optVal
if (optName.lower() == 'zmax') & (not zmax): zmax = optVal
if (optName.lower() == 'mjdmin') & (not MJDMin): MJDMin = optVal
if (optName.lower() == 'mjdmax') & (not MJDMax): MJDMax = optVal
if (optName.lower() == 'bins') & (not bins): zmin = optVal
if (optName.lower() == 'runfit') & (not runFit == None): zmin = optVal
if zmin == None: zmin = 0.1
if zmax == None: zmax = 1.2
if MJDMin == None: MJDMin = 0.0
if MJDMax == None: MJDMax = np.inf
if bins == None: bins = "equalSize"
if runFit == None: runFit = True
except:
print "Option File not working/Nonexistent. Using default values"
'''
|
normal
|
{
"blob_id": "27976e9f7fbe030910b3595ea1a13e0e505183e5",
"index": 7131,
"step-1": "#!/software/python-2.7-2014q3-el6-x86_64/bin/python\nimport SNANA_Reader as simread\nimport REAL_Reader as dataread\n#import astropy.cosmology as cosmo\nimport traceback\nimport scipy\nimport scipy.stats as stats\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.switch_backend('Agg')\n#import Cosmology\nimport scipy.stats.mstats as mstats\nimport scipy.stats as stats\nfrom scipy.interpolate import UnivariateSpline\nfrom sys import argv\nimport glob\nimport time\nimport os\nimport gzip\nimport shutil\nimport numpy.ma as ma\nimport subprocess\nimport iminuit as iM\nfrom iminuit import Minuit as M\nfrom discreteChi2Func import discreteChi2Func as chi2func\nimport pandas as pd\n\n\nclass Rate_Fitter:\n def __init__(self, realfilename, realName, simfilename, simName, simgenfilename, MCBeta, MCK, zminSamp=0.1, zmaxSamp=1.20 , zminFit = 0.1, zmaxFit = 1.20, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95, Rate_Model = 'powerlaw', cheatType = False, cheatZ = False, cheatCCSub = False, cheatCCScale = False, cuts = None, nprint = 5, MURESCuts = None, noCCMC = False, priorRate = None, priorZEff = None, ratePriorErrUp = None, ratePriorErrDown =None, ratePriorErrAll = None, fixCCScale = False):\n print \"Rate_Fitter\"\n print \"np version {0}\".format(np.__version__)\n \n self.zminSamp = zminSamp\n self.zmaxSamp = zmaxSamp\n self.zminFit = zminFit\n self.zmaxFit = zmaxFit\n self.MCBeta = MCBeta\n self.MCK = MCK\n self.Rate_Model = Rate_Model\n self.cheatType = cheatType\n self.cheatZ = cheatZ\n self.cheatCCSub = cheatCCSub\n self.cheatCCScale = cheatCCScale\n self.cuts = cuts\n self.nprint = nprint\n self.MURESCuts = MURESCuts\n self.priorRate = priorRate\n self.priorZEff = priorZEff\n self.ratePriorErrUp = ratePriorErrUp\n self.ratePriorErrDown = ratePriorErrDown\n self.ratePriorErrAll = ratePriorErrAll\n self.fixCCScale = fixCCScale\n\n #print \"PRIORS\"\n #print priorRate\n #print priorZEff\n #print ratePriorErrUp\n #print ratePriorErrDown\n\n if self.cheatZ:\n self.ztype = 'SIM_ZCMB'\n else:\n #self.ztype = 'zHD'\n self.ztype = 'zPHOT'\n\n self.shiftFlagData = False\n self.shiftFlagSim = False\n\n\n self.globalChi2Storage = []\n self.globalNDataStorage = []\n '''\n \n self.globalZPhotBinStorage = []\n self.globalNDataIaPhotBinStorage = []\n self.globalNDataCCPhotBinStorage = []\n self.globalZTrueBinStorage = []\n self.globalNDataIaTrueBinStorage = []\n self.globalNDataCCTrueBinStorage = []\n '''\n print 'a'\n try: \n self.simcat = simread.SNANA_Cat(simfilename, simName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95)\n except:\n try:\n self.simcat = simread.SNANA_Cat(simfilename, simName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95, skip_header = 5)\n\n except: \n self.simcat = simread.SNANA_Cat(simfilename, simName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95, skip_header = 6)\n print 'b' \n self.simName = simName\n self.simgencat = simread.SNANA_Cat(simfilename, simName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95)\n print 'c' \n try:\n #with np.load(simgenfilename+'.npz', allow_pickle = True) as data0:\n # SIMGEN = data0['a']\n \n SIMGEN = np.load(simgenfilename + '.npy', allow_pickle = True)\n except:\n \n SIMGEN = np.genfromtxt(simgenfilename, dtype=None, names = True, skip_footer=3, invalid_raise=False)\n print \"Compress save A\"\n SIMGEN.dtype.names = map(str, SIMGEN.dtype.names)\n #np.savez_compressed(simgenfilename+'.npz', a = SIMGEN)\n np.save(simgenfilename+'.npy', SIMGEN)\n \n print \"WHY DO YOU HATE ME WHEN I SHOW YOU NOTHING BUT LOVE\"\n print simgenfilename\n #SIMGEN = pd.read_csv(simgenfilename, delim_whitespace=True, comment=\"#\").to_records(index = False)\n print 'd'\n SIMGEN = SIMGEN[SIMGEN['GENZ'] != 'GENZ']\n\n self.simgencat.params = {'flat':True, 'H0': simH0, 'Om0':simOmegaM, 'Ob0': simOb0, 'sigma8': simSigma8, 'ns': simNs}\n #self.simgencat.cosmo = Cosmology.setCosmology('simCosmo', self.simcat.params)\n self.simgencat.OrigCatalog = np.copy(SIMGEN)\n self.simgencat.Catalog = np.copy(SIMGEN)\n self.simgencat.Catalog = self.simgencat.Catalog[self.simgencat.Catalog['GENZ'] != 'GENZ']\n self.simgencat.simname = simName\n self.simgencat.NSN = self.simgencat.Catalog['GENZ'].shape[2]\n\n print \"SIMGEN NUMBER\"\n print self.simgencat.NSN\n print \"TEST2\"\n print self.simgencat.Catalog['GENZ'].shape[0]\n print self.simgencat.Catalog['GENZ'].shape[1]\n print self.simgencat.Catalog['GENZ'].shape[2]\n print \"SIMGENCAT FILE\"\n print simfilename\n\n self.realName = realName\n try:\n print 'q'\n self.realcat = simread.SNANA_Cat(realfilename, realName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95, skip_header = 6)\n except:\n #self.realcat = simread.SNANA_Cat(realfilename, realName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95)\n try:\n print 'r'\n self.realcat = simread.SNANA_Cat(realfilename, realName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95)\n except:\n print 's'\n self.realcat = dataread.REAL_Cat(realfilename, realName, skip_header =11 )\n\n if self.cheatType:\n print \"WARNING, THE FITTER IS CHEATING AND ELIMINATED NON-IAs USING SIM INFO\"\n self.realcat.Catalog = self.realcat.Catalog[self.realcat.Catalog['SIM_TYPE_INDEX'].astype(int) == 1]\n self.simcat.Catalog = self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) == 1]\n\n print \"Pre cut Catalog\"\n print self.realcat.Catalog.shape\n for cut in cuts:\n print 'a'\n print cut\n print self.realcat.Catalog.shape\n self.realcat.Catalog = self.realcat.Catalog[(self.realcat.Catalog[cut[0]].astype(type(cut[1])) > cut[1]) & (self.realcat.Catalog[cut[0]].astype(type(cut[2])) < cut[2])]\n self.simcat.Catalog = self.simcat.Catalog[(self.simcat.Catalog[cut[0]].astype(type(cut[1])) > cut[1]) & (self.simcat.Catalog[cut[0]].astype(type(cut[2])) < cut[2])]\n print 'b'\n print cut\n print self.realcat.Catalog.shape\n\n self.postCutRealCat = np.copy(self.realcat.Catalog)\n self.postCutSimCat = np.copy(self.simcat.Catalog)\n\n self.realcat.Catalog = self.realcat.Catalog[(self.realcat.Catalog[self.ztype].astype(float) > self.zminSamp) & (self.realcat.Catalog[self.ztype].astype(float) < self.zmaxSamp)]\n self.simcat.Catalog = self.simcat.Catalog[(self.simcat.Catalog[self.ztype].astype(float) > self.zminSamp) & (self.simcat.Catalog[self.ztype].astype(float) < self.zmaxSamp)]\n print 'zCut Pre MURESCut'\n print np.sum((self.realcat.Catalog[self.ztype].astype(float) > self.zminFit) & (self.realcat.Catalog[self.ztype].astype(float) < self.zmaxFit))\n print 'MURESCUT'\n print self.MURESCuts\n print self.realcat.Catalog.shape\n\n if not (self.MURESCuts is None):\n '''\n #MURES Cut format: (zmin, zmax, neg Cut, pos Cut)\n\n for mc in self.MURESCuts:\n\n realCond = (self.realcat.Catalog[self.ztype] < mc[0]) | (self.realcat.Catalog[self.ztype] > mc[1])| ((self.realcat.Catalog['MURES'] > mc[2])& (self.realcat.Catalog['MURES'] < mc[3]))\n\n simCond = (self.simcat.Catalog[self.ztype] < mc[0]) | (self.simcat.Catalog[self.ztype] > mc[1])| ((self.simcat.Catalog['MURES'] > mc[2])& (self.simcat.Catalog['MURES'] < mc[3]))\n\n self.realcat.Catalog = self.realcat.Catalog[realCond]\n self.simcat.Catalog = self.simcat.Catalog[simCond]\n '''\n\n self.realcat.Catalog = self.realcat.Catalog[ np.abs( self.realcat.Catalog['MURES'] * 1.0 / self.realcat.Catalog['MUERR'] ) < MURESCuts]\n self.simcat.Catalog = self.simcat.Catalog[ np.abs( self.simcat.Catalog['MURES'] * 1.0 / self.simcat.Catalog['MUERR'] ) < MURESCuts]\n print \"PostMURESCut Shape\"\n print self.realcat.Catalog.shape\n print 'zCut Post MURESCut'\n print np.sum((self.realcat.Catalog[self.ztype].astype(float) > self.zminFit) & (self.realcat.Catalog[self.ztype].astype(float) < self.zmaxFit))\n\n print \"Post cut Catalog\"\n\n print self.realcat.Catalog.shape\n\n if noCCMC:\n self.simgencat.Catalog = self.simgencat.Catalog[self.simgencat.Catalog['GENTYPE'] == 1]\n self.simcat.Catalog = self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] == 1]\n\n \n \n def newData(self, realfilename, realName, simInd =100):\n self.realName = realName\n self.shiftFlagData = False\n try:\n self.realcat = simread.SNANA_Cat(realfilename, realName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95)\n except:\n self.realcat = simread.SNANA_Cat(realfilename, realName, simOmegaM=0.3, simOmegaL=0.7, simH0=70.0, simw=-1.0, simOb0=0.049, simSigma8=0.81, simNs=0.95, skip_header = 6 )\n if self.cheatType:\n print \"WARNING, THE FITTER IS CHEATING AND ELIMINATED NON-IAs USING SIM INFO\"\n self.realcat.Catalog = self.realcat.Catalog[self.realcat.Catalog['SIM_TYPE_INDEX'].astype(int) == 1]\n if simInd < self.nprint:\n print 'N precuts'\n print self.realcat.Catalog['FITPROB'].shape\n print \"Pre cut Catalog\"\n print self.realcat.Catalog.shape\n\n for cut in cuts:\n self.realcat.Catalog = self.realcat.Catalog[(self.realcat.Catalog[cut[0]].astype(type(cut[1])) > cut[1]) & (self.realcat.Catalog[cut[0]].astype(type(cut[2])) < cut[2])]\n\n self.realcat.Catalog = self.realcat.Catalog[(self.realcat.Catalog[self.ztype].astype(float) > self.zminSamp) & (self.realcat.Catalog[self.ztype].astype(float) < self.zmaxSamp)]\n print \"Post cut Catalog\"\n print self.realcat.Catalog.shape \n \n\n self.postCutRealCat = np.copy(self.realcat.Catalog)\n print 'MURESCUT'\n print self.MURESCuts\n print self.realcat.Catalog.shape\n if not (self.MURESCuts is None):\n \n #MURES Cut format: (zmin, zmax, neg Cut, pos Cut)\n '''\n for mc in self.MURESCuts:\n \n realCond = (self.realcat.Catalog[self.ztype] < mc[0]) | (self.realcat.Catalog[self.ztype] > mc[1])| ((self.realcat.Catalog['MURES'] > mc[2])& (self.realcat.Catalog['MURES'] < mc[3]))\n\n self.realcat.Catalog = self.realcat.Catalog[realCond]\n '''\n self.realcat.Catalog = self.realcat.Catalog[np.abs(self.realcat.Catalog['MURES']*1.0/self.realcat.Catalog['MUERR']) < MURESCuts]\n print \"PostMURESCut Shape\"\n print self.realcat.Catalog.shape\n\n \n if simInd < self.nprint:\n print \"Minimum Fitprob\"\n print np.min(self.realcat.Catalog['FITPROB'])\n print 'N postcuts'\n print self.realcat.Catalog['FITPROB'].shape\n\n def zSystematic(self, binList = None, nbins = None):\n assert(0)\n if nbins is None:\n try: \n self.nbins = len(binList) - 1\n self.binList = binList\n except:\n self.nbins = binList.shape[0] - 1\n self.binList = binList\n else:\n binList = np.linspace(self.zmin, self.zmax, nbins+1)\n self.nbins = nbins\n self.binList = binList\n if self.shiftFlagData:\n print \"DONT DOUBLE SHIFT\"\n return 0\n if not self.shiftFlagSim:\n \n oldsimz = self.simcat.Catalog['zPHOT']\n oldsimtruez = self.simcat.Catalog['SIM_ZCMB']\n stat, bins, binnum = stats.binned_statistic(oldsimz, oldsimz - oldsimtruez, bins = self.binList, statistic = 'mean')\n self.zBiasShifts = stat\n newsimz = oldsimz - stat[binnum]\n assert(np.sum(np.abs(newsimz - oldsimz)) > 0)\n assert((oldzshape - np.arange(0, oldz.shape[0]).shape[0])< 1)\n self.shiftFlagSim = True\n oldz = self.realcat.Catalog['zPHOT']\n _,_, binnum = stats.binned_statistic(oldz, oldz , bins = self.binList, statistic = 'mean')\n newz = oldz - self.zBiasShifts[binnum]\n oldzshape = oldz.shape[0]\n self.realcat.Catalog['zPHOT'].put(np.arange(0, oldz.shape[0]), newz)\n assert(np.sum(np.abs(newz - oldz)) > 0)\n assert((oldzshape - np.arange(0, oldz.shape[0]).shape[0])< 1)\n self.simFlagData = True\n \n def effCalc(self, fracContamCut = 0.0, nbinsSamp = None, nbinsFit = None, binListSamp = None, binListFit = None, simInd =100):\n #### Do we want SNIas or all SN for efficiency?\n import matplotlib as mpl\n if nbinsSamp is None:\n try: \n self.nbinsSamp = len(binListSamp) - 1\n self.binListSamp = binListSamp\n except:\n self.nbinsSamp = binListSamp.shape[0] - 1\n self.binListSamp = binListSamp\n else:\n binListSamp = np.linspace(self.zminSamp, self.zmaxSamp, nbinsSamp+1)\n self.nbinsSamp = nbinsSamp\n self.binListSamp = binListSamp\n\n if nbinsFit is None:\n try: \n self.nbinsFit = len(binListFit) - 1\n self.binListFit = binListFit\n except:\n self.nbinsFit = binListFit.shape[0] - 1\n self.binListFit = binListFit\n else:\n binListFit = np.linspace(self.zminFit, self.zmaxFit, nbinsFit+1)\n self.nbinsFit = nbinsFit\n self.binListFit = binListFit\n\n \n self.typeString = ''\n\n #if self.cheatZ:\n # self.ztype = 'SIM_ZCMB'\n #else:\n # self.ztype = 'zPHOT'\n\n '''\n if (fracContamCut > 0.000000001) & (fracContamCut < 1.0):\n print \" Cutting based on Frac Contam\"\n histTot, binsX, binsY = np.histogram2d(self.simcat.Catalog[ztype], self.simcat.Catalog['MURES'], bins = nbins)\n \n histCC, binsX, binsY = np.histogram2d(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) != 1][ztype], self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) != 1]['MURES'], bins = (binsX, binsY))\n\n fracContam = histCC.astype(np.float)/histTot.astype(np.float)\n\n for fcRow, i in zip(fracContam, xrange(binsX.shape[0])):\n for fc, j in zip(fcRow, xrange(binsY.shape[0])):\n if fc < fracContamCut:\n continue\n else:\n simInBin = (self.simcat.Catalog[ztype] > binsX[i]) & (self.simcat.Catalog[ztype] < binsX[i+1]) & (self.simcat.Catalog['MURES'] > binsY[j]) & (self.simcat.Catalog['MURES'] < binsY[j+1])\n realInBin = (self.realcat.Catalog[ztype] > binsX[i]) & (self.realcat.Catalog[ztype] < binsX[i+1]) & (self.realcat.Catalog['MURES'] > binsY[j]) & (self.realcat.Catalog['MURES'] < binsY[j+1])\n self.simcat.Catalog = self.simcat.Catalog[np.invert(simInBin)]\n self.realcat.Catalog = self.realcat.Catalog[np.invert(realInBin)]\n \n '''\n zPHOTs = self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) == 1][self.ztype].astype(float)\n\n zTRUEs = self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) == 1]['SIM_ZCMB'].astype(float)\n\n self.typeString = self.typeString + 'A1'\n \n \n print \"Type Location A\"\n print \"Choice A1\"\n print zPHOTs.shape\n print zTRUEs.shape\n print binList\n \n counts, zPhotEdges, zTrueEdges, binnumber = scipy.stats.binned_statistic_2d(zPHOTs, zTRUEs, zTRUEs, statistic = 'count', bins = (self.binListFit, self.binListSamp))\n assert(zPhotEdges.shape[0] == (self.nbinsFit + 1))\n print \"Type Location B\"\n print \"Choice B1\"\n \n self.typeString = self.typeString + 'B1'\n zGenHist, zGenBins = np.histogram(self.simgencat.Catalog[self.simgencat.Catalog['GENTYPE'].astype(int) == 1]['GENZ'].astype(float), bins = self.binListSamp)\n\n #zSim1Hist, zSim1Bins = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) ==1]['SIM_ZCMB'].astype(float), bins = self.binListSamp)\n \n \n \n print \"counts of zTrue in each zPhot vs zTrue bin\"\n print counts.astype(int)\n print \"zGen Bins\"\n print zGenBins\n print 'zGen Histogram'\n print zGenHist\n print \"sum zGen events\"\n print np.sum(zGenHist)\n print \"sum zPhot events\"\n print np.sum(counts)\n #print \"DEBUG HERE\"\n #assert(0)\n self.effmat = np.zeros((self.nbinsFit, self.nbinsSamp))\n xMax = zPhotEdges.shape[0] - 2\n yMax = zTrueEdges.shape[0] - 2\n print zGenHist\n print counts.astype(int)\n '''\n for zPhotLedge, zPhotRedge, row, i in zip(zPhotEdges[:-1], zPhotEdges[1:], counts, xrange(xMax + 1)):\n zPhotCenter = (zPhotLedge + zPhotRedge)/2.0\n for zTrueLedge, zTrueRedge, count, j in zip(zTrueEdges[:-1], zTrueEdges[1:], row, xrange(yMax + 1)):\n zTrueCenter = (zTrueLedge + zTrueRedge)/2.0\n inCell = (zPHOTs > zPhotLedge) & (zPHOTs < zPhotRedge) & (zTRUEs > zTrueLedge)& (zTRUEs < zTrueRedge)\n zPhotCell = zPHOTs[inCell];zTrueCell = zTRUEs[inCell]\n self.effmat[i][j] = count # np.sum(inCell)\n #print \"inCell\"\n #print np.sum(inCell)\n #print \"count\"\n #print count\n #try:\n # assert(np.abs(np.sum(inCell) - count) < 2)\n #except:\n # print \"CHECK ABOVE\"\n \n for row, i in zip(self.effmat, xrange(self.effmat.shape[0])):\n for j in xrange(row.shape[0]):\n self.effmat[i][j] /= zGenHist[j]\n '''\n self.effmat = counts/zGenHist\n\n #if simInd < self.nprint:\n print 'effmat'\n print self.effmat\n\n\n\n\n extent = [zPhotEdges[0], zPhotEdges[-1], zTrueEdges[0], zTrueEdges[-1]]\n if (simInd == 0) or (not ('sim' in self.realName.lower())):\n plt.figure()\n plt.imshow(np.flipud(counts.T), extent = extent, cmap = 'Blues')\n plt.colorbar()\n plt.savefig(self.realName + 'redshiftDistro.png')\n plt.clf()\n plt.close()\n plt.figure()\n plt.imshow(np.flipud(self.effmat.T), extent = extent, cmap = 'Blues', norm=mpl.colors.LogNorm())\n plt.colorbar()\n plt.savefig(self.realName + 'efficiencyMatrixLog.png')\n plt.clf()\n plt.close()\n plt.figure()\n plt.imshow(np.flipud(self.effmat.T), extent = extent, cmap = 'Blues')\n plt.colorbar()\n plt.savefig(self.realName + 'efficiencyMatrix.png')\n plt.clf()\n plt.close()\n \n def fit_rate(self, fixK = False, fixBeta = False, simInd =100, trueBeta = 0, CCScale = 1.0, CCScaleErr = None, TrueCCScale = 1.0, BetaInit = 0.0, kInit = 1.0, BetaErr = 1, kErr = 1, f_Js = None, CCZbins = None, scaleZBins = None, Blind = False):\n #import iminuit as iM\n #from iminuit import Minuit as M\n #import numpy as np\n #import matplotlib as mpl\n #import matplotlib.pyplot as plt\n #if self.cheatZ:\n # self.ztype = 'SIM_ZCMB'\n #else:\n # self.ztype = 'zPHOT'\n plt.switch_backend('Agg')\n\n if simInd < self.nprint:\n print \"Type Location C\"\n print \"Choice C1\"\n\n if len(self.typeString) <= 4:\n self.typeString = self.typeString + 'C1'\n\n\n nSim, simBins = np.histogram(self.simgencat.Catalog[self.simgencat.Catalog['GENTYPE'].astype(int) == 1]['GENZ'].astype(float), bins=self.binListSamp)\n if simInd < self.nprint:\n print \"nSim1\"\n print nSim\n print self.simgencat.Catalog.shape\n \n print \"FIGURE OUT WHY YOU MADE THIS ASSERT STATEMENT LATER\"\n #assert(0)\n nSim2, simBins2 = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'].astype(int) ==1][self.ztype].astype(float), bins=self.binListFit)\n \n \n \n nSim3, simBins3 = np.histogram(self.simcat.Catalog[self.ztype].astype(float), bins=self.binListFit)\n \n\n NCC , _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] != 1][self.ztype].astype(float), bins=self.binListFit)\n if self.fixCCScale:\n print \"Fix CC Scale at 1\"\n else:\n if simInd < self.nprint:\n print \"nSim2\"\n print nSim2\n print \"nSim3\"\n print nSim3\n print \"nCC\"\n print NCC\n OrigNCC = np.copy(NCC)\n if self.cheatCCSub:\n if self.cheatCCScale:\n print \"WARNING: Only cheating on CC Subtraction not scale\"\n print \"Setting NCC to infinity to make sure that cheating correctly\"\n print \"Diagnostics after this point may be nonsense\"\n print self.cheatCCSub\n print \"NCC BeforeFck\"\n print NCC\n NCC = NCC*1E100\n print \"NCC AfterFck\"\n print NCC \n elif self.cheatCCScale:\n print \"NCC Before1\"\n print NCC\n print TrueCCScale\n NCC = applyCCScale(NCC, TrueCCScale, CCScaleErr, zbins = CCZbins, datazbins = self.binListFit)\n print \"NCC After1\"\n print NCC\n else: \n print \"NCC Before2\"\n print NCC\n print CCScale\n NCC = applyCCScale(NCC, CCScale, CCScaleErr, zbins = CCZbins, datazbins = self.binListFit)\n print \"NCC After2\"\n print NCC\n #assert(0)\n\n \n NIa , _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] == 1][self.ztype].astype(float), bins=self.binListFit)\n '''\n DebugNIaPhot, _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] == 1]['zPHOT'].astype(float), bins=self.binListFit)\n DebugNCCPhot, _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] != 1]['zPHOT'].astype(float), bins=self.binListFit)\n DebugNCCPhot = applyCCScale(DebugNCCPhot, CCScale, CCScaleErr, zbins = scaleZBins, datazbins = self.binListFit)\n DebugNIaTrue, _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] == 1]['SIM_ZCMB'].astype(float), bins=self.binListSamp)\n DebugNCCTrue, _ = np.histogram(self.simcat.Catalog[self.simcat.Catalog['SIM_TYPE_INDEX'] != 1]['SIM_ZCMB'].astype(float), bins=self.binListSamp)\n DebugNCCTrue = applyCCScale(DebugNCCTrue, CCScale, CCScaleErr, zbins = scaleZBins, datazbins = self.binListSamp)\n\n uselessCtr = 0\n for niap, nccp, niat, ncct, zp, zt in zip(DebugNIaPhot, DebugNCCPhot, DebugNIaTrue, DebugNCCTrue,(self.binListFit[1:] + self.binListFit[:-1])/2.0, (self.binListSamp[1:] + self.binListSamp[:-1])/2.0 ):\n uselessCtr +=1\n self.globalZTrueBinStorage.append(zt)\n self.globalZPhotBinStorage.append(zp)\n self.globalNDataIaPhotBinStorage.append(niap)\n self.globalNDataCCPhotBinStorage.append(nccp)\n self.globalNDataIaTrueBinStorage.append(niat)\n self.globalNDataCCTrueBinStorage.append(ncct)\n print \"UselessCtr\"\n print uselessCtr\n \n '''\n\n try:\n TrueNCC, _ = np.histogram(self.realcat.Catalog[self.realcat.Catalog['SIM_TYPE_INDEX'] !=1][self.ztype].astype(float), bins=self.binListFit)\n if simInd < self.nprint:\n\n print \"True NCC Data\"\n print TrueNCC\n except:\n print \"Using real data\"\n\n TrueNCC = 0.0\n\n nData, dataBins = np.histogram(self.realcat.Catalog[self.ztype].astype(float), bins=self.binListFit)\n print \"nData\"\n print nData\n if not(self.cheatCCSub):\n FracBad = NCC*1.0/(1.0*(NCC+NIa))\n nCCData = nData*FracBad\n else: \n nCCData = TrueNCC*1.0\n FracBad = TrueNCC*1.0/nData\n if simInd < self.nprint:\n print \"PreScale NCC/nSim\"\n print OrigNCC*1.0/(OrigNCC+NIa)\n \n print \"PreScale Pred NCC Data\"\n print OrigNCC*1.0/(OrigNCC+NIa)*nData\n\n print \"PreScale Pred NCC Data if 2NCC\"\n print OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData\n\n print \"TrueNCC\"\n print TrueNCC\n if type(TrueNCC) != int:\n if simInd < self.nprint:\n print \"PreScale PredNCCData - TrueNCCData\"\n print OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData - TrueNCC\n\n print \"PreScale PredNCCData - TrueNCCData/ PredNCCData\"\n print (OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData - TrueNCC)/(OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData)\n else:\n print \"Using real data\"\n \n print \"Mean of PreScale PredNCCData - TrueNCCData/ PredNCCData\"\n print np.nanmean((OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData - TrueNCC)/(OrigNCC*2.0/(2.0*OrigNCC+NIa)*nData))\n\n print \"PostScale NCC/nData\"\n print NCC*1.0/(NCC+NIa)\n\n if simInd < self.nprint:\n print \"Fraction of CCs in each bin\"\n print FracBad\n\n print 'NCC'\n print NCC\n\n print 'nSim2'\n print nSim2\n print \"nData, dataBins, realcat shape pre contam correction\"\n print nData\n print dataBins\n print np.sum(self.realcat.Catalog[self.ztype].astype(float) > self.zmaxFit)\n print np.sum(self.realcat.Catalog[self.ztype].astype(float) < self.zminFit)\n print self.realcat.Catalog[self.ztype].shape\n \n print \"Ratio nData/nSim\"\n print 1.0*nData/(1.0*nSim3)\n \n\n print \"Ratio nSim2/nData\"\n print 1.0*nSim3/(1.0*nData)\n\n print \"FracBad\"\n print FracBad\n print 'NCCData'\n print nCCData\n\n if simInd < self.nprint:\n\n print \"overall Contam\"\n print np.sum(NCC)*1.0/(np.sum(nSim3)*1.0)\n \n def chi2func(nData, nSim, effmat, fnorm, zCentersSamp, zCentersFit, k = 1.0, Beta = 0.0, zBreak = 1.0, dump = False, complexdump = False, modelError = False, nIA = None, nCC = None, Rate_Model = 'powerlaw', zbins = None, simInd = 100, BetaPrior = (-3, 3), KPrior = (0.0, 50.0), priorRate = None, priorZEff = None, ratePriorErrUp = None, ratePriorErrDown =None, ratePriorErrAll = None, TrueNCCData = None, f_1 = 1.0, f_2 = 1.0, f_3 = 1.0, f_4 = 1.0, f_5 = 1.0, f_6 = 1.0, f_7 = 1.0, f_8 = 1.0, f_9 = 1.0, f_10 = 1.0, f_11 = 1.0):\n if simInd < self.nprint:\n print \"PRIORS2\"\n print priorRate\n print priorZEff\n print ratePriorErrUp\n print ratePriorErrDown\n Chi2Temp = 0.0\n if Rate_Model == 'powerlaw':\n f_Js = k*(1+zCentersSamp)**Beta\n elif Rate_Model == 'discrete':\n f_Js = np.array([f_1, f_2, f_3, f_4, f_5, f_6, f_7, f_8, f_9, f_10, f_11])\n elif (Rate_Model == 'brokenpowerlaw') | (Rate_Model == 'brokenpowerlawVar'):\n f_Js = []\n #zCenters = (zbins[1:]+zbins[:-1])/2.0\n temp = None\n for zC in zCentersSamp:\n if zC < zBreak:\n f_Js.append(k*(1+zC)**Beta)\n elif not(temp is None):\n f_Js.append(temp)\n else:\n temp = f_Js[-1]\n f_Js.append(temp)\n f_Js = np.array(f_Js)\n else: \n assert(0)\n if simInd < self.nprint:\n if Rate_Model == 'discrete':\n print \"f_Js init\"\n print f_Js\n else:\n print \"Beta init\"\n print Beta\n print \"k init\"\n print k\n #chi2Mat = np.zeros((self.nbinsFit))\n #adjNMC = np.zeros((self.nbinsFit))\n if Rate_Model == 'discrete':\n kprior = 0\n betaprior = 0\n else:\n kprior = weakPrior(k, KPrior)\n betaprior = weakPrior(Beta, BetaPrior)\n\n if dump and (self.nprint > simInd):\n print \"kprior\"\n print kprior\n print \"betaprior\"\n print betaprior\n if (nIA is None) or (nCC is None):\n if dump:\n print \"No CC Cut\"\n fracCCData = np.zeros(nData.shape)\n elif self.cheatCCSub:\n fracCCData = TrueNCC*1.0/nData \n\n else:\n if Rate_Model == 'discrete':\n if dump and (self.nprint > simInd):\n print 'f_J adjusted CC Cut'\n print Rate_Model\n print nCC\n print nIA\n print np.array(f_Js)\n fracCCData = (nCC*1.0)/((1.0*nCC + nIA*np.array(f_Js)))\n print fracCCData\n else:\n if dump and (self.nprint > simInd):\n print \"Beta Adjusted CC Cut\"\n print Rate_Model\n #BetaRatio = k*(1+zCenters)**(Beta)#/(1+zCenters)**MCBeta\n BetaRatio = (1+zCentersFit)**(Beta)#/(1+zCenters)**MCBeta\n if dump and (self.nprint > simInd):\n print \"Beta Ratio\"\n print BetaRatio\n print \"BadFracCCData\"\n print (nCC*1.0)/((1.0*nCC + nIA*BetaRatio))\n print \"bad NCCData\"\n print (nCC*1.0)/((1.0*nCC + nIA*BetaRatio))*nData\n fracCCData = (nCC*1.0)/((1.0*nCC + nIA*BetaRatio))\n \n \n\n if dump and (self.nprint > simInd):\n print 'abc'\n print \"fracCCData2\"\n print fracCCData\n print \"unscaled fracCCData\"\n print (1.0*nCC)/(1.0*(nCC+nIA))\n if self.cheatCCSub:\n nCCData = TrueNCCData\n if dump and (self.nprint < simInd):\n\n print \"Cheating CC Sub\"\n assert(not(TrueNCCData is None))\n\n elif dump and (self.nprint > simInd):\n print 'def'\n print \"Normal CC Sub\"\n if not self.cheatCCSub:\n nCCData = nData*fracCCData\n if dump and (self.nprint > simInd):\n print \"nCCData2\"\n print nCCData\n if not(TrueNCCData is None):\n print \"TrueNCCData\"\n print TrueNCCData\n \n \n #print f_Js\n #Check if I am scaling errors down with increasing MC size. Make MC twice as large as \"Data\" to test.\n if dump: chi2Storage = []\n if dump: scaledNSimStor = []\n if dump: JSumTempNumStor = []\n if dump: JSumTempDenStor = []\n\n if dump:\n print \"actually used NCC\"\n #print nCC\n print nCCData\n if dump and (simInd < self.nprint):\n print \"effmat\"\n print effmat\n print \"nData\"\n print nData\n print \"nCCData\"\n print nCCData\n print \"nSim\"\n print nSim\n\n print nCCData\n for row, nDataI, nCCDataI, i, zc in zip(effmat, nData, nCCData, range(self.nbinsFit), zCentersFit):\n if dump and (self.nprint > simInd):\n print 'effmat row'\n print row\n print 'nDataI'\n print nDataI\n print 'nCCDataI'\n print nCCDataI\n scaledNSimTemp = 0.0\n \n JSumTempNum = 0.0\n JSumTempDen = 0.0\n if dump and (simInd < self.nprint):\n print \"nBinsSamp\"\n print self.nbinsSamp\n assert(row.shape[0] == self.nbinsSamp)\n assert(nSim.shape[0] == self.nbinsSamp)\n assert(len(f_Js) == self.nbinsSamp)\n for eff, nSimJ, f_J, j in zip(row, nSim, f_Js, range(self.nbinsSamp)):\n if dump and (self.nprint > simInd):\n print 'NGen J'\n print nSimJ\n print 'JSumTempNum contr'\n print nSimJ*f_J*eff*fnorm\n print 'JSumTempDen contr'\n print nSimJ*f_J*eff*fnorm*f_J*fnorm\n #if dump and (i != j) and self.cheatZ and (self.nprint < simInd):\n # if nSimJ*f_J*eff*fnorm > 0:\n # print \" This should be zero but isnt \"\n # print nSimJ*f_J*eff*fnorm\n # assert(0)\n JSumTempNum += nSimJ*f_J*eff*fnorm\n JSumTempDen += nSimJ*f_J*eff*fnorm*f_J*fnorm\n dataFunc = np.maximum(nDataI ,1)\n #CCFunc = np.ceil(np.maximum(nCCDataI, 1))\n CCFunc = np.maximum(nCCDataI, 1)\n c2t = (nDataI - nCCDataI - JSumTempNum)**2/( dataFunc + CCFunc + JSumTempDen) \n if dump:\n JSumTempNumStor.append(JSumTempNum)\n JSumTempDenStor.append(JSumTempDen)\n\n if dump and (self.nprint > simInd):\n print i\n print 'nDataI'\n print nDataI\n print 'fnCCDataI'\n print nCCDataI\n print 'fnorm'\n print fnorm\n print \"JSumTempNum tot\"\n print JSumTempNum\n print \"JSumTempDen tot\"\n print JSumTempDen\n print \"Chi2Bin\"\n print c2t\n \n if dump:\n chi2Storage.append(c2t)\n \n if c2t > 5:\n print 'INSANITY CHECK ABOVE'\n\n \n # Chi2Temp += ((nDataI - nCCDataI - JSumTempNum)**2/(JSumTempNum + JSumTempDen))#*fnorm**2\n if nDataI > 1E-11 or JSumTempDen > 1E-11:\n Chi2Temp += c2t\n if dump and (self.nprint > simInd):\n print \"JSumTempNum/Den\"\n print JSumTempNumStor\n print JSumTempDenStor\n\n if dump:\n if (self.nprint >simInd):\n print Chi2Temp\n print kprior\n print betaprior\n print chi2Storage\n\n \n print \"nData\"\n print nData\n print \"nCCData\"\n print nCCData\n if priorRate is None:\n\n return Chi2Temp+kprior+betaprior , chi2Storage \n else:\n print \"PRIORS3\"\n print priorRate\n print \"fit k\"\n print k\n print 'MCK'\n print self.MCK\n print \"fit beta\"\n print Beta\n print 'MCBeta'\n print self.MCBeta\n print ratePrior(k*self.MCK, Beta + self.MCBeta, priorRate, priorZEff, ratePriorErrUp, ratePriorErrDown, ratePriorErrAll)\n\n return Chi2Temp+kprior+betaprior + ratePrior(k*self.MCK, Beta+self.MCBeta, priorRate, priorZEff, ratePriorErrUp, ratePriorErrDown, ratePriorErrAll), chi2Storage \n else:\n if dump and (self.nprint > simInd):\n print 'C2T'\n print Chi2Temp\n print kprior\n print betaprior\n\n if priorRate is None:\n\n return Chi2Temp+kprior+betaprior \n else:\n print \"PRIORS3\"\n print priorRate\n print \"fit k\"\n print k\n print 'MCK'\n print self.MCK\n print \"fit beta\"\n print Beta\n print 'MCBeta'\n print self.MCBeta\n print ratePrior(k*self.MCK, Beta+self.MCBeta, priorRate, priorZEff, ratePriorErrUp, ratePriorErrDown, ratePriorErrAll)\n\n return Chi2Temp+kprior+betaprior + ratePrior(k*self.MCK, Beta+self.MCBeta, priorRate, priorZEff, ratePriorErrUp, ratePriorErrDown, ratePriorErrAll)\n \n zCentersSamp = (self.binListSamp[1:] + self.binListSamp[:-1])/2.0\n zCentersFit = (self.binListFit[1:] + self.binListFit[:-1])/2.0\n \n #Is this right? Everything else in the other side of the chi2 function should be Ia only\n if self.cheatCCSub:\n self.fracCCData = TrueNCC*1.0/nData\n else:\n self.fracCCData = (NCC*1.0)/(1.0*(NCC + NIa))\n if (self.nprint > simInd):\n print \"nSim\"\n print nSim\n print 'fracCCData'\n print self.fracCCData\n print \"nData\"\n print nData\n #fnorm = float(np.sum(nData*(1-self.fracCCData)))/float(np.sum(nSim))\n fnorm = 1.0/240.0\n #print \"PRIORS\"\n #print self.priorZEff\n #print self.priorRate\n #print self.ratePriorErrUp\n #print self.ratePriorErrDown\n if self.Rate_Model == 'powerlaw':\n lamChi2 = lambda k, Beta: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)\n lamChi2Dump = lambda k, Beta: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, dump = True, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)\n MinObj = M(lamChi2, k = kInit, error_k = kErr , Beta = BetaInit, error_Beta = BetaErr, limit_k = (0.0, None), limit_Beta = (-100, 100), fix_k = fixK, fix_Beta = fixBeta)\n c2i, _ = lamChi2Dump(1.0, 0.0)\n\n print \"Chi2 init = {0}\".format(round(c2i, 4))\n elif self.Rate_Model == 'brokenpowerlaw':\n lamChi2 = lambda k, Beta: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, 1.0, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, Rate_Model = 'brokenpowerlaw', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)\n lamChi2Dump = lambda k, Beta: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, 1.0, dump = True, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, Rate_Model = 'brokenpowerlaw', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)\n\n MinObj = M(lamChi2, k = kInit, error_k = kErr , Beta = BetaInit, error_Beta = BetaErr, limit_k = (0.0, None), limit_Beta = (-100, 100), fix_k = fixK, fix_Beta = fixBeta)\n c2i, _ = lamChi2Dump(1.0, 0.0)\n\n print \"Chi2 init = {0}\".format(round(c2i, 4))\n elif self.Rate_Model == 'brokenpowerlawVar':\n lamChi2 = lambda k, Beta, zBreak: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, zBreak, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, Rate_Model = 'brokenpowerlawVar', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)\n lamChi2Dump = lambda k, Beta, zBreak: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, k, Beta, zBreak, dump = True, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, Rate_Model = 'brokenpowerlawVar', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)\n\n MinObj = M(lamChi2, k = kInit, error_k = kErr , Beta = BetaInit, error_Beta = BetaErr, limit_k = (0.0, None), limit_Beta = (-100, 100), fix_k = fixK, fix_Beta = fixBeta, zBreak = 1.0, error_zBreak = 0.1, limit_zBreak = (self.zminFit, self.zmaxFit))\n c2i, _ = lamChi2Dump(1.0, 0.0)\n\n print \"Chi2 init = {0}\".format(round(c2i, 4))\n\n \n elif self.Rate_Model == 'discrete':\n \n lamChi2 = lambda f_1, f_2, f_3, f_4, f_5, f_6, f_7, f_8, f_9, f_10, f_11: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, 1.0, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, f_1 = f_1, f_2 = f_2,f_3 = f_3, f_4 = f_4,f_5 = f_5, f_6 = f_6,f_7 = f_7, f_8 = f_8,f_9 = f_9, f_10 = f_10, f_11 = f_11, Rate_Model = 'discrete', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit )\n lamChi2Dump = lambda f_1, f_2, f_3, f_4, f_5, f_6, f_7, f_8, f_9, f_10, f_11: chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, 1.0, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC, f_1 = f_1, f_2 = f_2,f_3 = f_3, f_4 = f_4,f_5 = f_5, f_6 = f_6,f_7 = f_7, f_8 = f_8,f_9 = f_9, f_10 = f_10, f_11 = f_11, dump = True, Rate_Model = 'discrete', priorRate = self.priorRate, priorZEff = self.priorZEff, ratePriorErrUp = self.ratePriorErrUp, ratePriorErrDown =self.ratePriorErrDown, ratePriorErrAll = self.ratePriorErrAll)#, zbins = self.binListFit)\n\n c2i, _ = lamChi2Dump(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)\n\n print \"Chi2 init = {0}\".format(round(c2i, 4))\n\n MinObj = M(lamChi2, f_1 = 1.0, error_f_1 = 1.0, limit_f_1 = (0.0, None), f_2 = 1.0, error_f_2 = 1.0, limit_f_2 = (0.0, None), f_3 = 1.0, error_f_3 = 1.0, limit_f_3 = (0.0, None), f_4 = 1.0, error_f_4 = 1.0, limit_f_4 = (0.0, None), f_5 = 1.0, error_f_5 = 1.0, limit_f_5 = (0.0, None), f_6 = 1.0, error_f_6 = 1.0, limit_f_6 = (0.0, None), f_7 = 1.0, error_f_7 = 1.0, limit_f_7 = (0.0, None), f_8 = 1.0, error_f_8 = 1.0, limit_f_8 = (0.0, None), f_9 = 1.0, error_f_9 = 1.0, limit_f_9 = (0.0, None), f_10 = 1.0, error_f_10 = 1.0, limit_f_10 = (0.0, None), f_11 = 1.0,error_f_11 = 1.0, limit_f_11 = (0.0, None))\n\n if self.Rate_Model == 'discrete':\n c2f, c2stor = lamChi2Dump(MinObj.values['f_1'],MinObj.values['f_2'],MinObj.values['f_3'],MinObj.values['f_4'],MinObj.values['f_5'],MinObj.values['f_6'],MinObj.values['f_7'],MinObj.values['f_8'],MinObj.values['f_9'],MinObj.values['f_10'],MinObj.values['f_11'])\n else: \n print \"TEST DUMP HERE\"\n c2f, c2stor = lamChi2Dump(MinObj.values['k'], MinObj.values['Beta'])\n\n \n\n \n #MinObj = M(lamChi2, k = 1.0, fix_k = True, Beta = 0.0, error_Beta = 0.1)\n \n\n MinObj.set_strategy(2)\n\n fmin, param = MinObj.migrad(nsplit= 10)\n #fmin, param = MinObj.migrad()\n #ErrDict = MinObj.minos()\n\n \n self.covar = MinObj.np_covariance()\n\n ErrDict = MinObj.minos(maxcall = 1000)\n \n\n #plt.scatter(nData, c2stor)\n #plt.xlabel('nData')\n #plt.ylabel('chi2 in bin')\n #plt.savefig(self.realName + 'Chi2VsnData.png')\n #plt.clf()\n if self.nprint > simInd:\n print \"Shapes of things\"\n print len(c2stor)\n print nData.shape\n\n print dataBins.shape\n\n print self.binListFit.shape\n print self.binListSamp.shape\n #print DebugNIaPhot.shape\n #print DebugNCCPhot.shape\n #print DebugNIaTrue.shape\n #print DebugNCCTrue.shape\n\n for c2, nd in zip(c2stor, nData):\n self.globalChi2Storage.append(c2)\n self.globalNDataStorage.append(nd)\n\n if self.Rate_Model == 'discrete':\n fJList = [MinObj.values['f_1'],MinObj.values['f_2'],MinObj.values['f_3'],MinObj.values['f_4'],MinObj.values['f_5'],MinObj.values['f_6'],MinObj.values['f_7'],MinObj.values['f_8'],MinObj.values['f_9'],MinObj.values['f_10'],MinObj.values['f_11']]\n fJErrList = [MinObj.errors['f_1'],MinObj.errors['f_2'],MinObj.errors['f_3'],MinObj.errors['f_4'],MinObj.errors['f_5'],MinObj.errors['f_6'],MinObj.errors['f_7'],MinObj.errors['f_8'],MinObj.errors['f_9'],MinObj.errors['f_10'],MinObj.errors['f_11']]\n\n \n self.fJList = fJList\n self.fJErrList = fJErrList\n self.Beta = None\n self.k = None\n self.kErr = None\n self.BetaErr = None\n print fJList\n print fJErrList\n else:\n k = MinObj.values['k']\n #kErr = MinObj.errors['k']\n kErr = (np.abs(ErrDict['k']['lower']) + np.abs(ErrDict['k']['upper']))/2.0\n Beta = MinObj.values['Beta']\n #BetaErr = MinObj.errors['Beta']\n BetaErr = (np.abs(ErrDict['Beta']['lower']) + np.abs(ErrDict['Beta']['upper']))/2.0\n if self.Rate_Model == 'brokenpowerlawVar':\n zBreak = MinObj.values['zBreak']\n zBreakErr = MinObj.values['zBreakErr']\n self.k = k\n self.Beta = Beta\n self.kErr = kErr\n self.BetaErr = BetaErr\n #/(self.nbins - 2)\n self.BetaRatio = (1+zCentersFit)**(Beta)\n self.fJList = None\n\n print 'SCALE DEBUG'\n print NCC\n print NIa\n print self.BetaRatio\n print 'SCALE DEBUG2'\n print np.sum(NCC)\n print np.sum(NIa)\n print np.sum(NIa*self.BetaRatio)\n self.fracCCData = (NCC*1.0)/(1.0*(1.0*NCC + NIa*self.BetaRatio))\n self.fracCCDataTot = (np.sum(NCC)*1.0)/(1.0*(1.0*np.sum(NCC) + np.sum(NIa*self.BetaRatio)))\n print 'SCALE DEBUG3'\n print self.fracCCData\n print self.fracCCDataTot\n print 'SCALE DEBUG4'\n print OrigNCC\n print np.sum(OrigNCC)\n print CCScale\n\n #print self.fracCCDataTot\n #print type(self.fracCCDataTot)\n #assert(type(self.fracCCDataTot) == float)\n print \"Chi2 final = {0}\".format(round(lamChi2Dump(self.k, self.Beta)[0], 4))\n self.chi2 = fmin.fval\n print \"Chi2final? = {0}\".format(round(fmin.fval, 4))\n\n\n\n if not(self.priorRate is None):\n ratePriorFinalVal = ratePrior(self.k*self.MCK, self.Beta+self.MCBeta, self.priorRate, self.priorZEff, self.ratePriorErrUp, self.ratePriorErrDown, self.ratePriorErrAll )\n c2NoPrior = chi2func(nData, nSim, self.effmat, fnorm, zCentersSamp, zCentersFit, self.k, self.Beta, dump = False, nIA = NIa, nCC = NCC, simInd =simInd, TrueNCCData = TrueNCC)\n print \"RATE PRIOR FINAL\"\n print ratePriorFinalVal\n print \"Chi2final? = {0}\".format(round(fmin.fval, 4))\n print \"Chi2FinalNoPrior\"\n print c2NoPrior\n\n #fJs = np.ones(zCenters.shape)\n '''\n try:\n if (Rate_Model != 'discrete'):\n plt.clf()\n MinObj.draw_contour('k','Beta', nsigma=3)\n plt.savefig('{0}_{1}_k_beta_contour.png'.format(self.realName, self.simName))\n if Blind:\n locs, labels = plt.xticks()\n labels = locs + np.cos(cosVal)\n plt.xticks(labels)\n locs, labels = plt.yticks()\n labels = locs + np.cos(cosVal)\n plt.yticks(labels)\n plt.clf()\n \n #xgrid,ygrid, sigma, rawdata = MinObj.mncontour_grid('k', 'Beta', numpoints=30, sigma_res = 1, nsigma = 2.0)\n #fig, ax = plt.subplots(1)\n #plt.clf()\n #CS = ax.contour(xgrid, ygrid + self.MCBeta, sigma, levels = [ 1.0, 2.0])\n #ax.clabel(CS, fontsize=7, inline=1)\n #ax.set_xlabel('k')\n #ax.set_ylabel('Beta')\n #if Blind:\n # ax.set_xticklabels([])\n # ax.set_yticklabels([])\n #plt.savefig('{0}_{1}_k_beta_contour.png'.format(self.realName, self.simName))\n #plt.close()\n except: \n print \"Plot Fail A\"\n\n try:\n if (Rate_Model != 'discrete'):\n plt.clf()\n MinObj.draw_profile('Beta', text = False)\n if Blind:\n\n locs, labels = plt.xticks()\n labels = locs + np.cos(cosVal)\n plt.xticks(labels)\n plt.savefig('{0}_{1}_beta_contour.png'.format(self.realName, self.simName))\n plt.clf()\n except:\n print \"Plot Fail C\"\n try:\n if Rate_Model != 'discrete':\n Betas = np.linspace(self.Beta - 0.5, self.Beta + 0.5, 51)\n FCNs = []\n for bTemp in Betas:\n FCN = lamChi2( self.k, bTemp)\n FCNs.append(FCN)\n\n plt.plot(Betas, FCNs, c = 'k', label = 'Non Minuit Contour')\n plt.legend()\n plt.xlabel('Beta')\n plt.ylabel('Chi2')\n if Blind:\n\n locs, labels = plt.xticks()\n labels = locs + np.cos(cosVal)\n plt.xticks(labels)\n plt.savefig('{0}_{1}_beta_mycontour.png'.format(self.realName, self.simName))\n plt.clf()\n\n\n except:\n print \"Plot Fail D\"\n\n if Rate_Model != 'discrete':\n plt.clf()\n ax = plt.axes()\n Betas = np.linspace(self.Beta - 0.1, self.Beta + 0.1, 501)\n FCNs = []\n for bTemp in Betas:\n FCN = lamChi2( self.k, bTemp)\n FCNs.append(FCN)\n\n plt.plot(Betas, FCNs, c = 'k', label = 'Non Minuit Contour')\n plt.legend()\n plt.xlabel('Beta')\n plt.ylabel('Chi2')\n if Blind:\n\n locs, labels = plt.xticks()\n labels = locs + np.cos(cosVal)\n ax.set_xticklabels(labels)\n print \"FCNs\"\n print FCNs\n plt.savefig('{0}_{1}_beta_myzoomcontour.png'.format(self.realName, self.simName))\n plt.clf()\n\n\n plt.clf()\n ax = plt.axes()\n ks = np.linspace(self.k - 0.1, self.k + 0.1, 501)\n FCNs = []\n for kTemp in ks:\n FCN = lamChi2( kTemp,self.Beta)\n FCNs.append(FCN)\n\n plt.plot(ks, FCNs, c = 'k', label = 'Non Minuit Contour')\n plt.legend()\n plt.xlabel('k')\n plt.ylabel('Chi2')\n \n print \"FCNs\"\n print FCNs\n plt.savefig('{0}_{1}_k_myzoomcontour.png'.format(self.realName, self.simName))\n plt.clf()\n\n\n\n df = np.array(FCNs[1:]) - np.array(FCNs[:-1])\n inds = np.where(df > 0)[0]\n print 'inds'\n print inds\n print inds < 250\n print np.where(inds < 250)\n inds = inds[np.where(inds < 250)]\n print 'inds'\n print inds\n print \"INDSSHAPE\"\n print inds.shape\n if inds.shape[0]:\n print \"MINUIT IS PROBABLY MAD. HERES WHY\"\n print inds\n print Betas[inds]\n if inds.shape[0] > 1:\n inds = inds[-1]\n print inds\n print Betas[inds]\n\n lamChi2Dump(self.k, Betas[inds -3])\n print \"MINUIT MAD 2\"\n lamChi2Dump(self.k, Betas[inds -2])\n print \"MINUIT MAD 3\"\n lamChi2Dump(self.k, Betas[inds -1])\n\n print \"MINUIT MAD 4\"\n lamChi2Dump(self.k, Betas[inds])\n print \"MINUIT MAD 5\"\n lamChi2Dump(self.k, Betas[inds + 1])\n print \"MINUIT MAD 6\"\n lamChi2Dump(self.k, Betas[inds + 2])\n print \"MINUIT MAD 7\"\n lamChi2Dump(self.k, Betas[inds + 3])\n print \"END MINUIT MAD\"\n \n\n\n\n try:\n if (Rate_Model != 'discrete'):\n plt.clf()\n MinObj.draw_mncontour('k','Beta', nsigma=3)\n plt.savefig('{0}_{1}_k_beta_mncontour.png'.format(self.realName, self.simName))\n if Blind:\n locs, labels = plt.xticks()\n labels = locs + np.cos(cosVal)\n plt.xticks(labels)\n locs, labels = plt.yticks()\n labels = locs + np.cos(cosVal)\n plt.yticks(labels)\n plt.clf()\n MinObj.draw_mnprofile('Beta', text = False, subtract_min = True)\n if Blind:\n \n\n locs, labels = plt.xticks()\n labels = locs + np.cos(cosVal)\n plt.xticks(labels)\n plt.savefig('{0}_{1}_beta_mncontour.png'.format(self.realName, self.simName))\n plt.clf()\n #xgrid,ygrid, sigma, rawdata = MinObj.mncontour_grid('k', 'Beta', numpoints=30, sigma_res = 1, nsigma = 2.0)\n #fig, ax = plt.subplots(1)\n #plt.clf()\n #CS = ax.contour(xgrid, ygrid + self.MCBeta, sigma, levels = [ 1.0, 2.0])\n #ax.clabel(CS, fontsize=7, inline=1)\n #ax.set_xlabel('k')\n #ax.set_ylabel('Beta')\n #if Blind:\n # ax.set_xticklabels([])\n # ax.set_yticklabels([])\n #plt.savefig('{0}_{1}_k_beta_contour.png'.format(self.realName, self.simName))\n #plt.close()\n except: \n print \"Plot Fail B\"\n pass\n \n \n\n \n #plt.axhline(y = self.MCBeta, c = 'k', label = 'True Beta')\n #plt.axhline(y = Beta + self.MCBeta, c = 'g', label= 'Best Fit Beta')\n #plt.axvline(x = k, label = 'Best Fit k')\n ''' \n '''\n def chi2V2(self, fJs, fJErrs, zCenters, k, Beta):\n fitfJs = k*(1+zCenters)**Beta\n Chi2Temp = 0\n for fJ, fitfJ, fJErr in zip(fJs, fitfJs, fJErrs):\n Chi2Temp += (fJ - fitfJ)**2/(fJ + fJErr)\n return Chi2Temp\n '''\n\ndef weakPrior(value, priorTuple):\n if value < priorTuple[1]:\n if value > priorTuple[0]:\n return 1\n else: \n return (value - priorTuple[0])**4\n else:\n return (value - priorTuple[1])**4\n\ndef ratePrior(fitK, fitBeta, priorRate, zEffPrior, priorRateErrUp = None, priorRateErrDown = None, priorRateErrAll = None):\n\n print \"PRIOR\"\n print priorRate\n print zEffPrior\n print priorRateErrUp\n print priorRateErrDown\n print \"Fit Beta/k\"\n print fitBeta\n print fitK\n fitRate = fitK*(1+zEffPrior)**fitBeta\n print 'Fit Rate'\n print fitRate\n print \"PriorChi2\"\n\n if fitRate > priorRate:\n\n if not (priorRateErrUp is None):\n print (fitRate - priorRate)**2/priorRateErrUp**2\n return (fitRate - priorRate)**2/priorRateErrUp**2\n else:\n print (fitRate - priorRate)**2/priorRateErrAll**2\n return (fitRate - priorRate)**2/priorRateErrAll**2\n else:\n if not (priorRateErrDown is None):\n print (fitRate - priorRate)**2/priorRateErrDown**2\n return (fitRate - priorRate)**2/priorRateErrDown**2\n else:\n print (fitRate - priorRate)**2/priorRateErrAll**2\n return (fitRate - priorRate)**2/priorRateErrAll**2\n\n\n\n\n\n\n\ndef getCCScale(simCat, dataCat, MURESWindow = (-1, 1), zbins = [0.0, 0.3, 0.6, 0.9, 1.2], Beta = None, binList = None, fracCCData = None, outfilePrefix = 'Test', Rate_Model = 'powerlaw', f_Js = None, returnHist = False, debug = False, simInd = 100, ztype = 'zPHOT'):\n #import iminuit as iM\n #from iminuit import Minuit as M\n if debug:\n print \"Check this\"\n print Rate_Model\n print f_Js\n print Beta\n print fracCCData\n print \"Done Checking\"\n CCScales = []\n CCScaleErrs = []\n simIaHists = []\n simCCHists = []\n dataHists = []\n if not(f_Js is None):\n f_Js = np.array(f_Js)\n\n allSimCC = simCat[simCat['SIM_TYPE_INDEX'].astype(int) != 1]\n allSimIa = simCat[simCat['SIM_TYPE_INDEX'].astype(int) == 1]\n allData = np.copy(dataCat)\n\n\n #fnorm2 = float(dataCat.shape[0])/float(np.sum(simHist))\n \n simCat = simCat[(simCat['MURES'] < MURESWindow[0]) | (simCat['MURES'] > MURESWindow[1]) ]\n dataCat = dataCat[(dataCat['MURES'] < MURESWindow[0]) | (dataCat['MURES'] > MURESWindow[1]) ]\n \n\n for zl, zh in zip(zbins[:-1], zbins[1:]):\n\n tempSim = simCat[(simCat[ztype] < zh) & (simCat[ztype] > zl)]\n tempData = dataCat[(dataCat[ztype] < zh) & (dataCat[ztype] > zl)]\n\n\n allSimCCZbin = allSimCC[(allSimCC[ztype] < zh) & (allSimCC[ztype] > zl)]\n allSimIaZbin = allSimIa[(allSimIa[ztype] < zh) & (allSimIa[ztype] > zl)]\n if debug:\n print \"all Sim CC Zbin/IaZbin\"\n print allSimCCZbin.shape[0]\n print allSimIaZbin.shape[0]\n\n allDataZbin = allData[(allData[ztype] < zh) & (allData[ztype] > zl)]\n\n\n\n tempSimCC = tempSim[tempSim['SIM_TYPE_INDEX'] != 1]\n tempSimIa = tempSim[tempSim['SIM_TYPE_INDEX'] == 1]\n\n R = float(tempData.shape[0])/float(allDataZbin.shape[0])\n if debug:\n print \"R\"\n\n print R\n\n print \"Hist CC, outlier and total\"\n print tempSim.shape[0]\n print allSimCCZbin.shape[0]\n\n\n print \"pre Beta Correction allSimIa\"\n print tempData.shape[0]\n print allSimIaZbin.shape[0]\n\n if Rate_Model == 'discrete':\n hist, bins = np.histogram(allSimIaZbin[ztype], bins = 11)\n if debug:\n print 'fJ shape'\n print f_Js.shape\n print f_Js\n print hist\n print bins\n betaCorrAllSimIaZbin =np.sum(hist*f_Js)\n else:\n betaCorrAllSimIaZbin =np.sum((1+ allSimIaZbin[ztype])**Beta)\n #S = float(np.array(R*histSAllIa) - np.array(tempSimIa.shape[0]))/float(np.array(tempSimCC.shape[0]) - np.array(R*histSAllCC))\n\n try:\n if debug:\n print \"Test S\"\n print R\n print betaCorrAllSimIaZbin\n print tempSimIa.shape[0]\n print tempSimCC.shape[0]\n print allSimCCZbin.shape\n print 'EEE'\n print np.array(R*betaCorrAllSimIaZbin)\n print 'DDD'\n print np.array(tempSimIa.shape[0])\n print 'CCC'\n print (np.array(tempSimCC.shape[0]) - np.array(R*allSimCCZbin.shape[0]))\n print \"AAA\"\n print (np.array(R*betaCorrAllSimIaZbin) - np.array(tempSimIa.shape[0]))/(np.array(tempSimCC.shape[0]) - np.array(R*allSimCCZbin.shape[0]))\n print \"BBB\"\n #S = (np.array(R*betaCorrAllSimIaZbin) - np.array(tempSimIa.shape[0]))/(np.array(tempSimCC.shape[0]) - np.array(R*allSimCCZbin.shape[0]))\n S = float(np.array(R*betaCorrAllSimIaZbin) - np.array(tempSimIa.shape[0]))/float(np.array(tempSimCC.shape[0]) - np.array(R*allSimCCZbin.shape[0]))\n except: \n S = np.nan\n if debug:\n print \"S WTF\"\n print S\n\n\n print \"Uncertainty Related Bullshit\"\n '''\n print \"Delta R\"\n\n dR = np.sqrt(histD + histDAll)\n\n print dR\n\n num1 = np.sqrt(np.sqrt((dR/R)**2 + histSAllIa) + tempSimIa.shape[0])\n\n num2 = np.sqrt(np.sqrt((dR/R)**2 + histSAllCC) + tempSimCC.shape[0])\n\n den1 = (R*histSAllIa - tempSimIa.shape[0])\n\n den2 = (tempSimCC.shape[0] - R*histSAllCC)\n\n\n dS = np.sqrt((num1/den1)**2 + (num2/den2)**2)\n '''\n #ddnCC = np.sqrt(tempSimCC.shape[0])*(tempSimIa.shape[0] - histSAllIa*R)/(tempSimCC.shape[0] - R*histSAllCC)**2\n\n #ddNCC = np.sqrt(histSAllCC)*R*(histSAllIa*R - tempSimIa.shape[0])/(tempSimCC.shape[0] - R*histSAllCC)**2\n\n #ddnIa = np.sqrt(tempSimIa.shape[0])/(tempSimCC.shape[0] - R*histSAllCC)\n #ddNIa = np.sqrt(histSAllIa)*R/(tempSimCC.shape[0] - R*histSAllCC)\n\n ddnCC = np.sqrt(tempSimCC.shape[0])*(tempSimIa.shape[0] - allSimIaZbin.shape[0]*R)/(tempSimCC.shape[0] - R*allSimCCZbin.shape[0])**2\n\n ddNCC = np.sqrt(allSimCCZbin.shape[0])*R*(allSimIaZbin.shape[0]*R - tempSimIa.shape[0])/(tempSimCC.shape[0] - R*allSimCCZbin.shape[0])**2\n\n ddnIa = np.sqrt(tempSimIa.shape[0])/(tempSimCC.shape[0] - R*allSimCCZbin.shape[0])\n ddNIa = np.sqrt(allSimIaZbin.shape[0])*R/(tempSimCC.shape[0] - R*allSimCCZbin.shape[0])\n\n #ddR = (histSAllIa*tempSimCC.shape[0] - histSAllCC * tempSimIa.shape[0])/(tempSimCC.shape[0] - R*histSAllCC)**2\n\n dS = np.sqrt(ddnCC**2 + ddNCC**2 + ddnIa**2 + ddNIa**2)# + ddR**2)\n\n if debug:\n\n print \"ddnCC\"\n\n print ddnCC\n\n print \"ddNCC\"\n\n print ddNCC\n\n print \"ddnIa\"\n\n print ddnIa\n\n print \"ddNIa\"\n\n print ddNIa\n\n #print \"ddR\"\n\n #print ddR\n\n print \"Delta S\"\n\n print dS\n\n #assert(S > 0)\n if S < 0: \n S = np.nan\n if np.isnan(S):\n print 'SCALE IS NAN'\n if len(CCScales) > 0:\n #CCScales.append(CCScales[-1])\n CCScales.append(1.0)\n else: \n CCScales.append(1.0)\n else:\n CCScales.append(S)\n if type(dS) == np.ndarray:\n if np.isnan(dS[0]):\n CCScaleErrs.append(1.0)\n else:\n CCScaleErrs.append(dS[0])\n else:\n if np.isnan(dS):\n CCScaleErrs.append(1.0)\n else:\n CCScaleErrs.append(dS)\n\n #if debug:\n # print \"CC PlotDebug\"\n # print (simBinsCC[1:] + simBinsCC[:-1])/2.0\n # print simHistCC\n # print CCScales[0]\n # print dS\n # print fnorm2\n # print histD\n # print (muresBins[1:] + muresBins[:-1])/2.0\n \n #if simInd ==1:\n # plt.step((simBinsCC[1:] + simBinsCC[:-1])/2.0, simHistCC*fnorm2, c = 'b', where = 'mid', label = 'prescaled Sim CC')\n # plt.step((simBinsCC[1:] + simBinsCC[:-1])/2.0, CCScales[0]*simHistCC*fnorm2, c = 'g', where = 'post', label = 'postscaledSimCC')\n # plt.step((muresBins[1:] + muresBins[:-1])/2.0, histD, c = 'r', where = 'mid', label = 'data')\n # plt.legend()\n # plt.savefig(outfilePrefix + 'ScaledHist.png')\n # plt.clf()\n if debug:\n print \"CCScaleErrs\"\n print CCScaleErrs\n if returnHist:\n return CCScales, CCScaleErrs, simIaHists, simCCHists, dataHists\n return CCScales, CCScaleErrs\n\ndef applyCCScale(NCC, CCScales, CCScaleErrs, datazbins = None, zbins = None):\n if not(zbins is None):\n zbins = np.array(zbins)\n if not (datazbins is None):\n datazbins = np.array(datazbins)\n if type(CCScaleErrs) == list:\n CCScaleErrs = np.array(CCScaleErrs)\n if type(CCScales) == list:\n CCScales = np.array(CCScales)\n print 'CCScaleErrs'\n print CCScaleErrs\n print datazbins\n print zbins\n\n\n \n if type(CCScales) == np.ndarray:\n if CCScales.shape[0] == 1:\n NCCScaled = CCScales[0]*NCC\n else:\n if (datazbins is None) | (zbins is None):\n assert(0)\n if CCScales.shape[0] < 4:\n k = CCScales.shape[0] -1\n else:\n k = 3\n \n nancond = np.isnan(CCScales)\n if np.sum(nancond) > 0:\n CCScales[nancond] = 1.\n CCScaleErrs[nancond] = 1.\n\n zCenters = (zbins[1:]+ zbins[:-1])/2.0\n print zCenters\n print CCScales\n \n #spline = UnivariateSpline(zbins, CCScales, w = 1.0/CCScaleErrs, k = k)\n spline = UnivariateSpline(zCenters, CCScales, w = 1.0/CCScaleErrs, k = k)\n\n print datazbins.shape\n print datazbins\n print NCC.shape\n\n datazcents = (datazbins[1:]+ datazbins[:-1])/2.0\n\n NCCScaled = spline(datazcents)*NCC\n\n elif (type(CCScales) == int) | (type(CCScales) == float):\n NCCScaled = CCScales*NCC\n else:\n assert(0)\n\n NCCScaled = NCCScaled.clip(0)\n print NCCScaled\n\n assert(not bool(np.sum(NCCScaled < 0)))\n\n\n return NCCScaled\n\nif __name__ == '__main__':\n from sys import argv\n print \"argv\"\n print argv\n datadir = argv[1]\n simdir = argv[2]\n dataname = argv[3]\n print \"dataname\"\n simname = argv[4]\n print simname\n simgenfile = argv[5]\n print simgenfile\n NNCut = False\n cheatType = bool(int(argv[6]))\n cheatZ = bool(int(argv[7]))\n trueBeta = float(argv[8])\n paramFile = argv[9]\n cutFiles = [argv[10]]\n try:\n debug = bool(int(argv[11]))\n except:\n debug = False\n\n \n #if( ('Combine' in simdir) or ('SALT2' in simdir)) & (('Combine' in datadir) or ('SALT2' in simdir)):\n #NNCut = True\n #NNProbCut = 0.95\n \n #if len(argv) > 6:\n # NNCut = True\n # NNProbCut = 0.9\n # NNData = argv[6]\n # NNSim = argv[7]\n\n \n #default params\n\n zminFit = 0.1\n zmaxFit = 1.2\n zminSamp = 0.1\n zmaxSamp = 1.2\n MJDMin = 0.0\n MJDMax = np.inf\n bins = \"equalSize\" \n runFit = True\n fracContamCuts = [-1]\n fixBeta = True\n fixK = False\n nbins = None\n binList = None\n ScaleMuResCutLow = -1\n ScaleMuResCutHigh = 1\n #muresBins = 1\n muresBinsLow = 3\n muresBinsHigh = 3\n scaleZBins = [0.0, 1.2]\n nScaleZBins = None\n cheatCCSub = False\n cheatCCScale = False\n ZSysFlag = False\n Blind = False\n Rate_Model = 'powerlaw'\n MURESCuts = 2.0 #[(0.0, 0.8, -0.5, 0.5), (0.8, 1.5, -1, 1)]\n noCCMC = False\n fixCCScale = False\n trueMCBeta = 1.65\n trueMCK = 1.97E-5\n\n priorRate = None\n priorZEff = None\n ratePriorErrUp = None\n ratePriorErrDown =None\n ratePriorErrAll = None\n priors = None\n\n #override file\n\n params = open(paramFile, 'r').readlines()\n\n for p in params:\n\n print p\n exec(p)\n\n if nScaleZBins is None :\n redoScaleZBinFlag = False\n\n else:\n redoScaleZBinFlag = True\n\n if not(priors is None):\n if len(priors) == 3:\n priorRate, priorZEff, ratePriorErrAll = priors\n ratePriorErrUp = None\n ratePriorErrDown = None\n elif len(priors) == 4:\n priorRate, priorZEff, ratePriorErrUp, ratePriorErrDown = priors\n ratePriorErrAll =None\n\n\n\n\n cosVal = 47392945716038.134971247\n kmean = []\n ksigma = []\n kErr = []\n BetaMean = []\n #BetaWeightMean = []\n #KWeightMean = []\n BetaSigma= []\n BetaErr = []\n zBreakMeans = []\n zBreakSigmas =[]\n zBreakErrs = []\n Chi2Mean = []\n Chi2Sigma = []\n f_JStorage = []\n f_JErrStorage = []\n SampleSizes = []\n\n CCScaleStorageGlobal = []\n CCScaleErrStorageGlobal = []\n\n\n #MURES_Cuts = [2.0]\n #MURES_Cuts = [1.0, 1.5, 2.0, 3.0, 4.0, 99.0, 2.0]\n #for MURES_Cut in MURES_Cuts:\n fcc = -1\n for cf in cutFiles:\n cuts = [] # cuts = [('FITPROB', 0.01, np.inf), ('NN_PROB_IA', NNProbCut, np.inf)]\n\n cutlist = open(cf, 'r').readlines()\n for l in cutlist:\n spl = l.split()\n cuts.append(('{0}'.format(spl[0]), float('{0}'.format(spl[1])), float('{0}'.format(spl[2]))))\n\n ks = []\n kErrs = []\n Betas = []\n BetaErrs = []\n zBreaks =[]\n zBreakErrs = []\n Chi2s = []\n\n CCScaleStorage = []\n CCScaleErrStorage = []\n\n\n nFail = 0\n simLoaded = False\n #print \"FUCK MPI\"\n #if Rate_Model == 'discrete':\n # subprocess.call(['python', 'constructChi2Func.py', str(nbins)], shell = False)\n #print \"MPI Fucked\"\n if '{' in datadir:\n if os.path.exists(datadir.format(98)):\n print \"MOAR SIMS\"\n nfile = 101\n else:\n print \"FEWAR SIMS\"\n nfile = 49\n else:\n nfile = 2\n for simInd in range(1,nfile):\n \n\n #print \"Sim {0}\".format(simInd)\n #SimBeta = 2.1 # simdir.split('_')[-3]\n #SimR0 = 1.7*10**-5 #simdir.split('_')[-5]\n #print \"Sim R0 = {1}; Sim Beta = {0}\".format(SimBeta, SimR0)\n\n \n print datadir.format(simInd)\n if simLoaded:\n try:\n \n RateTest.newData(datadir.format(simInd), dataname.format(simInd), simInd =simInd)\n if ZSysFlag:\n assert(0)\n RateTest.zSystematic(nbins = nbins, binList = binList)\n\n\n if redoScaleZBinFlag:\n\n RealCat = RateTest.postCutRealCat \n RealOutlierCat = RealCat[(RealCat['MURES'] > muresBinsHigh)| (RealCat['MURES'] < muresBinsLow)]\n\n zArray =RealOutlierCat[RateTest.ztype]\n zArray.sort()\n\n splitZs = np.array_split(zArray, nScaleZBins)\n\n #[(0[0], (0[-1] + 1[0]), (1[-1] + 2[0]), 2[1]]\n\n scaleZBins = [splitZs[0][0]]\n\n \n for i in range(1,nScaleZBins):\n\n scaleZBins.append((splitZs[i-1][-1] + splitZs[i][0] )/2.0)\n scaleZBins.append(splitZs[i][-1])\n\n\n #RateTest.effCalc(nbins = nbins, fracContamCut = fcc, simInd =simInd)\n #RateTest.effCalc(nbins = 20)\n BetaIter = []\n BetaErrIter = []\n CCIter = []\n CCErrIter = []\n RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, simInd =simInd, trueBeta = trueBeta - trueMCBeta, CCScale = 1.0, TrueCCScale = TrueCCScale, scaleZBins = scaleZBins, Blind = Blind)\n if Rate_Model != 'discrete':\n if Blind:\n print \"Blinding A\"\n BetaIter.append(RateTest.Beta+ np.cos(cosVal))\n else:\n BetaIter.append(RateTest.Beta)\n BetaErrIter.append(RateTest.BetaErr)\n\n for iteration in range(nIter):\n if not fixCCScale:\n if not noCCMC:\n CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname,Rate_Model = Rate_Model, f_Js =RateTest.fJList, simInd = simInd, debug = debug, ztype = RateTest.ztype)\n CCIter.append(CCScale)\n CCErrIter.append(CCScaleErr)\n RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = CCScale, CCScaleErr = CCScaleErr, TrueCCScale = TrueCCScale, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, f_Js =RateTest.fJList, CCZbins = scaleZBins , scaleZBins = scaleZBins, Blind = Blind)\n else:\n CCIter.append(0.0)\n CCErrIter.append(0.0)\n RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = 0.0, CCScaleErr = 1.0, TrueCCScale = 0.0, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, f_Js =RateTest.fJList, CCZbins = scaleZBins , scaleZBins = scaleZBins, Blind = Blind)\n else:\n CCIter.append(1.0)\n CCErrIter.append(0.0)\n RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = 1.0, CCScaleErr = 1.0, TrueCCScale = 0.0, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, f_Js =RateTest.fJList, CCZbins = scaleZBins , scaleZBins = scaleZBins, Blind = Blind)\n\n if Blind:\n print \"Blinding b\"\n BetaIter.append(RateTest.Beta+ np.cos(cosVal))\n else:\n BetaIter.append(RateTest.Beta)\n BetaErrIter.append(RateTest.BetaErr)\n if not fixCCScale:\n if not noCCMC:\n CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname,Rate_Model = Rate_Model, f_Js =RateTest.fJList, simInd = simInd, debug = debug, ztype = RateTest.ztype)\n CCIter.append(CCScale)\n CCErrIter.append(CCScaleErr)\n else:\n CCIter.append(1.0)\n CCErrIter.append(0.0)\n \n print \"CCScale Progression\"\n print CCIter\n print \"CCScale Err Progression\"\n print CCErrIter\n if Rate_Model != 'discrete':\n print \"Beta Progression\"\n print BetaIter\n print \"Beta Err Progressions\"\n print BetaErrIter\n print \"Mean Betas\"\n print np.nanmean(BetaIter)\n\n print \"Mean CCScales\"\n print np.nanmean(CCIter)\n else:\n f_JStorage.append(RateTest.fJList)\n f_JErrStorage.append(RateTest.fJErrList)\n\n #print \"AAA CC Scales\"\n if not fixCCScale:\n\n if not noCCMC:\n CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname, Rate_Model = Rate_Model, f_Js =RateTest.fJList, simInd = simInd, debug = debug, ztype = RateTest.ztype)\n print CCScale\n CCScaleStorage.append(CCScale)\n CCScaleErrStorage.append(CCScaleErr)\n else:\n CCScaleStorage.append(0.0)\n CCScaleErrStorage.append(1.0)\n else:\n CCScaleStorage.append(1.0)\n CCScaleErrStorage.append(1.0)\n \n\n\n ks.append(RateTest.k)\n kErrs.append(RateTest.kErr)\n if Blind:\n print \"Blinding c\"\n Betas.append(RateTest.Beta+ np.cos(cosVal))\n\n else:\n Betas.append(RateTest.Beta)\n BetaErrs.append(RateTest.BetaErr)\n if Rate_Model == 'brokenpowerlawVar':\n zBreaks.append(Rate_Fitter.zBreak)\n zBreakErrs.append(Rate_Fitter.zBreakErr)\n\n Chi2s.append(RateTest.chi2)\n print \"CCScale Storage Iter {0}\".format(simInd)\n print CCScaleStorage\n if not noCCMC:\n print CCScale\n print CCScale[0]\n\n \n dnamestr = datadir.format(simInd)\n\n cutdnamestr = dnamestr.split('.')[0] + '+CUTS.FITRES.gz'\n #if saveCuts:\n # np.savetxt(cutdnamestr, RateTest.realcat.Catalog, delimiter = ' ', fmt='%s')\n\n lowzCut = zminFit\n highzCut = zmaxFit\n SampleSizes.append( RateTest.realcat.Catalog[(RateTest.realcat.Catalog[RateTest.ztype] < zmaxFit) & (RateTest.realcat.Catalog[RateTest.ztype] > zminFit)].shape[0])\n if saveCuts:\n np.savetxt(cutdnamestr, RateTest.realcat.Catalog[(RateTest.realcat.Catalog[RateTest.ztype] < zmaxFit) & (RateTest.realcat.Catalog[RateTest.ztype] > zminFit)], delimiter = ' ', fmt='%s')\n #with open(cutdnamestr, 'rb') as f_in:\n # with gzip.open(cutdnamestr + '.gz', 'wb') as f_out:\n # shutil.copyfileobj(f_in, f_out)\n except Exception, e:\n print \"FAILURE\"\n print e\n traceback.print_exc()\n nFail +=1\n else:\n try:\n\n RateTest = Rate_Fitter(datadir.format(simInd), dataname.format(simInd), simdir, simname,simgenfile, trueMCBeta, trueMCK, zminSamp =zminSamp, zmaxSamp =zmaxSamp, zminFit =zminFit, zmaxFit =zmaxFit, cheatZ = cheatZ, cheatType = cheatType, cuts = cuts, cheatCCSub = cheatCCSub, cheatCCScale = cheatCCScale, Rate_Model = Rate_Model, MURESCuts = MURESCuts, noCCMC = noCCMC, priorRate = priorRate, priorZEff = priorZEff, ratePriorErrUp = ratePriorErrUp, ratePriorErrDown =ratePriorErrDown, ratePriorErrAll = ratePriorErrAll)# , MJDMin = 0, MJDMax = np.inf)\n \n if ZSysFlag:\n RateTest.zSystematic(nbins = nbins, binList = binList)\n simLoaded = True\n\n RateTest.effCalc(nbinsSamp = nbinsSamp,nbinsFit = nbinsFit, fracContamCut = fcc)\n #RateTest.effCalc(nbins = 20)\n BetaIter = []\n BetaErrIter = []\n CCIter = []\n CCErrIter = []\n\n if redoScaleZBinFlag:\n\n RealCat = RateTest.postCutRealCat \n RealOutlierCat = RealCat[(RealCat['MURES'] > muresBinsHigh)| (RealCat['MURES'] < muresBinsLow)]\n\n zArray =RealOutlierCat[RateTest.ztype]\n zArray.sort()\n\n print 'zArray'\n print zArray\n print 'nScaleZBins'\n print nScaleZBins\n\n splitZs = np.array_split(zArray, nScaleZBins)\n\n #[(0[0], (0[-1] + 1[0]), (1[-1] + 2[0]), 2[1]]\n\n scaleZBins = [splitZs[0][0]]\n\n \n for i in range(1,nScaleZBins):\n\n scaleZBins.append((splitZs[i-1][-1] + splitZs[i][0] )/2.0)\n scaleZBins.append(splitZs[i][-1])\n\n\n RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, simInd =simInd, trueBeta = trueBeta - trueMCBeta, CCScale = 1.0, TrueCCScale = TrueCCScale, scaleZBins = scaleZBins, Blind = Blind)\n if Rate_Model != 'discrete':\n if Blind:\n print \"Blinding d\"\n BetaIter.append(RateTest.Beta+ np.cos(cosVal))\n else:\n BetaIter.append(RateTest.Beta)\n BetaErrIter.append(RateTest.BetaErr)\n for iteration in range(nIter):\n print \"interation Number\"\n print iteration\n if not fixCCScale:\n if not noCCMC:\n CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname, Rate_Model = Rate_Model, f_Js =RateTest.fJList, simInd = simInd, debug = debug, ztype = RateTest.ztype)\n CCIter.append(CCScale)\n CCErrIter.append(CCScaleErr)\n RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = CCScale, CCScaleErr = CCScaleErr, TrueCCScale = TrueCCScale, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, CCZbins = scaleZBins, scaleZBins = scaleZBins, Blind = Blind)\n else:\n CCIter.append(0.0)\n CCErrIter.append(1.0)\n\n RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = 0.0, CCScaleErr = 1.0, TrueCCScale = 0.0, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, CCZbins = scaleZBins, scaleZBins = scaleZBins, Blind = Blind)\n else:\n CCIter.append(1.0)\n CCErrIter.append(1.0)\n RateTest.fit_rate(fixK = fixK, fixBeta = fixBeta, trueBeta = trueBeta - trueMCBeta, CCScale = 1.0, CCScaleErr = 1.0, TrueCCScale = 0.0, BetaInit = RateTest.Beta, kInit = RateTest.k, BetaErr = RateTest.BetaErr, kErr = RateTest.kErr, CCZbins = scaleZBins, scaleZBins = scaleZBins, Blind = Blind)\n\n \n if Rate_Model != 'discrete':\n if Blind:\n print \"Blinding e\"\n BetaIter.append(RateTest.Beta+ np.cos(cosVal))\n else:\n BetaIter.append(RateTest.Beta)\n BetaErrIter.append(RateTest.BetaErr)\n if not fixCCScale:\n if not noCCMC:\n CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname, Rate_Model = Rate_Model, f_Js =RateTest.fJList, simInd = simInd, debug = debug, ztype = RateTest.ztype)\n CCIter.append(CCScale)\n CCErrIter.append(CCScaleErr)\n if Rate_Model != 'discrete':\n print \"Beta Progression\"\n print BetaIter\n print \"Beta Err Progressions\"\n print BetaErrIter\n \n print \"Mean Betas\"\n print np.nanmean(BetaIter)\n\n else:\n f_JStorage.append(RateTest.fJList)\n f_JErrStorage.append(RateTest.fJErrList)\n \n print \"CCScale Progression\"\n print CCIter\n print \"CCScale Err Progression\"\n print CCErrIter\n print \"Mean CCScales\"\n print np.nanmean(CCIter)\n if not fixCCScale:\n if not noCCMC:\n print \"AAA CC Scales\"\n \n CCScale, CCScaleErr = getCCScale(RateTest.postCutSimCat, RateTest.postCutRealCat, MURESWindow = (ScaleMuResCutLow, ScaleMuResCutHigh), zbins = scaleZBins, Beta = RateTest.Beta, binList = RateTest.binListFit, fracCCData = RateTest.fracCCData, outfilePrefix = dataname, f_Js =RateTest.fJList, Rate_Model = Rate_Model, simInd = simInd, debug = debug, ztype = RateTest.ztype)\n print 'CC Scale'\n print CCScale\n CCScaleStorage.append(CCScale)\n CCScaleErrStorage.append(CCScaleErr)\n else: \n CCScaleStorage.append(0.0)\n CCScaleErrStorage.append(1.0)\n else:\n CCScaleStorage.append(1.0)\n CCScaleErrStorage.append(1.0)\n\n dnamestr = datadir.format(simInd)\n\n cutdnamestr = dnamestr.split('.')[0] + '+CUTS.FITRES.gz'\n\n np.savetxt(cutdnamestr, RateTest.realcat.Catalog, delimiter = ' ', fmt='%s')\n\n #with open(cutdnamestr, 'rb') as f_in:\n # with gzip.open(cutdnamestr + '.gz', 'wb') as f_out:\n # shutil.copyfileobj(f_in, f_out)\n\n\n\n cutsnamestr = simname.split('.')[0] + '+CUTS.FITRES.gz'\n\n np.savetxt(cutsnamestr, RateTest.realcat.Catalog[(RateTest.realcat.Catalog[RateTest.ztype] < zmaxFit) & (RateTest.realcat.Catalog[RateTest.ztype] > zminFit)], delimiter = ' ', fmt = '%s')\n\n lowzCut = zminFit\n highzCut = zmaxFit\n SampleSizes.append( RateTest.realcat.Catalog[(RateTest.realcat.Catalog[RateTest.ztype] < zmaxFit) & (RateTest.realcat.Catalog[RateTest.ztype] > zminFit)].shape[0])\n\n #with open(cutsnamestr, 'rb') as f_in:\n # with gzip.open(cutsnamestr + '.gz', 'wb') as f_out:\n # shutil.copyfileobj(f_in, f_out)\n\n\n ks.append(RateTest.k)\n kErrs.append(RateTest.kErr)\n if Rate_Model != 'discrete':\n if Blind:\n print \"Blinding f\"\n Betas.append(RateTest.Beta+ np.cos(cosVal))\n else:\n Betas.append(RateTest.Beta)\n BetaErrs.append(RateTest.BetaErr)\n\n if Rate_Model == 'brokenpowerlawVar':\n zBreaks.append(Rate_Fitter.zBreak)\n zBreakErrs.append(Rate_Fitter.zBreakErr)\n\n Chi2s.append(RateTest.chi2)\n print \"CCScale Storage Iter {0}\".format(simInd)\n print CCScaleStorage\n if not noCCMC:\n print CCScale\n print CCScale[0]\n if Rate_Model != 'discrete':\n if np.isnan(RateTest.Beta):\n nFail +=1\n\n except Exception, e:\n print \"FAILURE\"\n print e\n traceback.print_exc()\n nFail +=1\n #if Blind:\n # Betas = np.array(Betas) + np.cos(47392945716038.134971247)\n print \"Number of Failures\"\n print nFail\n if Rate_Model != 'discrete':\n\n badSims = np.invert(np.isfinite(Betas) & (BetaErrs > 0) & np.isfinite(ks) & (kErrs > 0))\n mBetas = ma.masked_array(Betas, mask=badSims)\n mBetaErrs = ma.masked_array(BetaErrs, mask=badSims)\n mks = ma.masked_array(ks, mask=badSims)\n mkErrs = ma.masked_array(kErrs, mask=badSims)\n print \"mean k\"\n print np.nanmean(ks)\n print \"mean kerrs\"\n print np.nanmean(kErrs)\n print \"std. k\"\n print np.nanstd(ks)\n print \"Mean beta\"\n print np.nanmean(Betas)\n print \"Mean betaerrs\"\n print np.nanmean(BetaErrs)\n print \"std. beta\"\n print np.nanstd(Betas)\n if len(Betas) == 1:\n kmean.append(ks[0])\n ksigma.append(0.0)\n kErr.append(kErrs[0])\n BetaMean.append(Betas[0])\n BetaSigma.append(0.0)\n BetaErr.append(BetaErrs[0])\n else:\n print \"test here\"\n print ks\n print mks\n print Betas\n print mBetas\n print 'end test here'\n kmean.append(np.average(mks, weights = 1.0/mkErrs**2))\n ksigma.append(np.std(mks))\n kErr.append(np.mean(mkErrs))\n BetaMean.append(np.average(mBetas, weights = 1.0/mBetaErrs**2))\n #BetaWeightMean.append(np.average(Betas, weights = 1.0/ma.masked_invalid(BetaErrs)**2))\n #KWeightMean.append(np.average(ks, weights = 1.0/ma.masked_invalid(kErrs)**2))\n BetaSigma.append(np.std(mBetas))\n BetaErr.append(np.mean(mBetaErrs))\n else:\n print \"mean f_Js\"\n print np.nanmean(f_JStorage, axis =0)\n print \"mean f_JErrs\"\n print np.nanmean(f_JErrStorage, axis =0)\n if Rate_Model == 'brokenpowerlawVar':\n zBreakMeans.append(np.nanmean(zBreaks))\n zBreakSigmas.append(np.nanstd(zBreaks))\n\n Chi2Mean.append(np.nanmean(Chi2s))\n Chi2Sigma.append(np.nanstd(Chi2s))\n\n \n\n \n #if simInd == 1:\n print \"Indiv Chi2s\"\n print Chi2s\n bins0 = np.linspace(1.0, 20.0, 10)\n hist, bins = np.histogram(Chi2s, bins = bins0)\n xs = (bins[1:] + bins[:-1])/2.0\n plt.bar(xs, hist, width = bins[1:] - bins[:-1])\n\n print \"Chi2 Hist\"\n print bins\n print hist\n\n chi2s = scipy.stats.chi2.pdf(xs, nbinsFit - 2)\n\n norm = np.max(hist)*1.0/np.max(chi2s)\n\n\n plt.plot(xs, chi2s*norm, color = 'g')\n if cheatType and not cheatZ:\n plt.savefig(dataname +'Chi2Plot_CheatType.png')\n elif cheatZ and not cheatType:\n plt.savefig(dataname +'Chi2Plot_CheatZ.png')\n elif cheatZ and cheatType:\n plt.savefig(dataname +'Chi2Plot_CheatTypeZ.png')\n else:\n plt.savefig(dataname +'Chi2Plot.png')\n\n if not noCCMC:\n print \"AAA CC Scale means (weighted, unweighted)\"\n #print np.average(ma.masked_invalid(np.array(CCScaleStorage)),weights = 1.0/ma.masked_invalid(CCScaleErrStorage)**2, axis = 0)\n #print np.nanmean(ma.masked_invalid(np.array(CCScaleStorage)), axis = 0)\n #print CCScaleStorage\n #print CCScaleErrStorage\n print np.average(np.array(CCScaleStorage),weights = 1.0/ma.masked_invalid(CCScaleErrStorage)**2, axis = 0)\n print np.nanmean(np.array(CCScaleStorage), axis = 0)\n print \"AAA CC Scale stds\"\n print np.nanstd(np.array(CCScaleStorage), axis = 0)\n CCScaleStorageGlobal.append(CCScaleStorage)\n\n \n\n print \"All Betas\"\n print Betas\n\n if cheatType:\n print \"THESE RESULTS ONLY INCLUDE TRUE Ias BECAUSE WE CHEATED AND USED THE SIM INFORMATION\"\n if cheatZ:\n print \"THESE RESULTS Use Simulated Redshift info\"\n '''\n print \"lengths of lists\"\n\n print len(RateTest.globalNDataStorage)\n print len(RateTest.globalChi2Storage)\n print len(RateTest.globalZPhotBinStorage)\n print len(RateTest.globalNDataIaPhotBinStorage)\n plt.clf()\n plt.scatter(RateTest.globalNDataStorage, RateTest.globalChi2Storage)\n plt.xlabel('nData')\n plt.ylabel('chi2 in bin')\n string = ''\n if cheatType: string += 'CheatType'\n if cheatZ: string += 'CheatZ'\n print 'string here'\n print string\n plt.savefig(RateTest.realName + 'Chi2VsnData' + string +'.png')\n plt.clf()\n\n\n plt.scatter(RateTest.globalZPhotBinStorage, RateTest.globalChi2Storage)\n plt.xlabel('zPhot bin center')\n plt.ylabel('chi2 in bin')\n plt.savefig(RateTest.realName + 'Chi2VsZPhot' + string +'.png')\n plt.clf()\n\n plt.clf()\n plt.scatter(RateTest.globalZPhotBinStorage, RateTest.globalNDataIaPhotBinStorage, s = 1, c = 'r', label = 'Type Ia Data, zPhot')\n plt.scatter(RateTest.globalZPhotBinStorage, RateTest.globalNDataCCPhotBinStorage, s = 1, c = 'b', label = 'CC Data, zPhot')\n plt.scatter(RateTest.globalZTrueBinStorage, RateTest.globalNDataIaTrueBinStorage, s = 1, c = 'Pink', label = 'Type Ia Data, zTrue')\n plt.scatter(RateTest.globalZTrueBinStorage, RateTest.globalNDataCCTrueBinStorage, s = 1, c = 'Cyan', label = 'CC Data, zTrue')\n plt.yscale('log')\n plt.xlabel('redshift either true or phot')\n plt.legend()\n plt.savefig(RateTest.realName + 'AggregateZDistro' + string +'.png')\n\n '''\n #print \"MURES CUTS\"\n #print MURES_Cuts\n print \"Frac Contam Cuts\"\n print fracContamCuts\n if Rate_Model != 'discrete':\n print \"Kmeans\"\n print kmean\n print \"Ksigmas\"\n print ksigma\n print \"BetaMeans\"\n print BetaMean\n print \"BetaSigmas\"\n print BetaSigma\n print \"BetaErrs\"\n print BetaErr\n else: \n print \"f_J mean unweighted\"\n print np.mean(f_JStorage, axis = 0)\n print \"f_J mean weighted\"\n print np.average(f_JStorage, weights = 1.0/(np.array(f_JErrStorage))**2, axis = 0)\n\n print \"f_J Errors\"\n print np.mean(f_JErrStorage, axis = 0)\n\n if Rate_Model == 'brokenpowerlawVar':\n print \"mean powerlaw break z\"\n print zBreakMeans\n print \"st. dev powerlaw break z\"\n print zBreakSigmas\n print \"Chi2Means\"\n print Chi2Mean\n print \"Chi2Sigma\"\n print Chi2Sigma\n\n assert(fracContamCuts[0] == -1)\n outfile = dataname\n if Rate_Model != 'discrete':\n print \"outfile Pre Prefix\"\n print outfile\n\n if cheatType:\n outfile = outfile + '_CheatType'\n if cheatZ:\n outfile = outfile + 'Z'\n elif cheatZ:\n outfile = outfile + '_CheatZ'\n\n outfile1 = outfile + '.txt'\n outfile2 = outfile + '-IndivBetaK.txt'\n output2 = open(outfile2, 'w')\n output2.write('i Beta_i k_i BetaErr_i kErr_i\\n')\n for i, b, k, berr, kerr in zip(range(len(Betas)),Betas, ks, BetaErrs, kErrs):\n output2.write('{0} {1:.4f} {2:.4f} {3:.4f} {4:.4f}\\n'.format(i, b, k, berr, kerr))\n output2.close()\n print \"Outfile Name\"\n if not(os.path.isfile(outfile1)):\n output = open(outfile1, 'w')\n output.write('#Date Date/time at which job finished\\n')\n output.write('#DataBeta Input beta for the simulated data sample. Will be 0.0 for real data.\\n')\n output.write('#N_sims Number of datalike sims that go into the subsequent means\\n')\n output.write('#SampleSize Mean Number of Events in data post cut\\n')\n output.write('#delta_Beta mean difference between large MC sim beta (2.11 for the time being) and the measured beta for the data (not the beta in column 2.\\n')\n output.write('#sigma_Beta stdev of delta_Beta over N_sims sims\\n')\n output.write('#BetaStdErr std. error in the mean of delta_Beta over N_sims sims\\n')\n output.write('#Beta_err mean statistical error on beta\\n')\n output.write('#K mean ratio between large MC sim K (1.7E-5 for the time being) and the measured K for the data \\n')\n output.write('#sigma_K stdev of K over N_sims sims\\n')\n output.write('#KStdErr std. error in the mean of K over N_sims sims\\n')\n output.write('#KStaterr mean statistical error on K\\n')\n output.write('#meanZ mean photoZ of the large MC sim\\n')\n output.write('#sigmaZ std. deviation of the photoZs for the large Sim\\n')\n output.write('#sigmaDZ std. deviation of (zSim - zPHOT)\\n')\n output.write('#NCC/NTotScaled overall CC Contamination after adjusting CC Frac to data\\n')\n output.write('#NCC/NTot overall CC Contamination in sim only\\n')\n output.write('#CCScales relative sim vs. CC rate in z-bins \\n')\n output.write('#TypeChoice Internal Diagnostic, check code comments\\n')\n output.write('#NNProbCut Threshold for NN probability of Ia\\n')\n output.write('#NBins Number of Analysis Bins\\n')\n output.write('#MRSLow Threshold for Neg Mures Outliers\\n')\n output.write('#MRSHigh Threshold for Pos Mures Outliers\\n')\n output.write('#FitprobCut Lowest Fitprob in sim\\n')\n output.write('#MRSCut NSigma Hubble residual cut\\n')\n output.write('#Chi2 minimum value of Chi2 function\\n')\n output.write('#Correlation cov[0,1]/np.sqrt(cov[0,0]*cov[1,1])\\n')\n output.write('#Date \\t\\tDataBeta N_sims SampleSize delta_Beta sigma_Beta BetaStdErr BetaStatErr K sigma_K KStdErr KStatErr meanZ sigmaZ sigmaDz NCC/NTotScaled NCC/NTot CCScales TypeChoice NNProbCut NBins MRSLow MRSHigh FitprobCut MRSCut Chi2 Correlation\\n')\n else:\n output = open(outfile1, 'a')\n print 'outfile'\n print outfile\n\n\n\n cat = RateTest.simcat.Catalog\n t = time.strftime('%b-%d-%H:%M')\n N_Sims = np.sum(np.invert(np.isnan(ks)))\n SigBeta = float(BetaSigma[0])\n SigK = float(ksigma[0])\n kStdErr = float(ksigma[0])/np.sqrt(N_Sims)\n BetaStdErr = float(BetaSigma[0])/np.sqrt(N_Sims)\n meanZ = np.nanmean(cat[RateTest.ztype])\n sigZ = np.nanstd(cat[RateTest.ztype])\n sigDZ = np.nanstd(cat[RateTest.ztype] - cat['SIM_ZCMB'])\n lowzCut = zminFit\n highzCut = zmaxFit\n contam2 = np.sum(cat[(cat[RateTest.ztype] > lowzCut) & (cat[RateTest.ztype] < highzCut)]['SIM_TYPE_INDEX'] !=1).astype(float)/ float(cat[(cat[RateTest.ztype] > lowzCut) & (cat[RateTest.ztype] < highzCut)].shape[0])\n contam = RateTest.fracCCDataTot\n ccscales = np.average(np.array(CCScaleStorage),weights = 1.0/ma.masked_invalid(CCScaleErrStorage)**2, axis = 0)\n cov = RateTest.covar\n correlation = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])\n print \"Outfile debug\"\n print t\n print trueBeta\n print N_Sims\n print BetaMean[0]\n print BetaStdErr\n print BetaErrs[0]\n print meanZ\n print sigZ\n print sigDZ\n print contam\n print RateTest.typeString\n print RateTest.postCutSimCat['NN_PROB_IA'].min()\n print SigBeta\n print kmean[0]\n print kErrs[0]\n print kStdErr\n print SigK\n print np.nanmean(SampleSizes)\n print int(nbinsFit)\n print ScaleMuResCutLow\n print ScaleMuResCutHigh\n print RateTest.postCutSimCat['FITPROB'].min()\n print MURESCuts\n print np.mean(Chi2Mean)\n print contam2\n print ccscales\n print correlation\n ccscales = ','.join(str(ccscales).split())\n output.write('{0}\\t\\t{1:.2f}\\t{2}\\t{17:.3f}\\t{3:.3f}\\t{12:.3f}\\t{4:.3f}\\t{5:.3f}\\t{13:.3f}\\t{14:.3f}\\t{15:.3f}\\t{16:.3f}\\t{6:.3f}\\t{7:.3f}\\t{8:.3f}\\t{9:.3f}\\t{24:.3f}\\t{25}\\t{10}\\t{11:.3f}\\t{18:d}\\t{19:.3f}\\t{20:.3f}\\t{21:.3f}\\t{22:.2f}\\t{23:.3f}\\t{26:.3f}\\n'.format(t, trueBeta, N_Sims, BetaMean[0], BetaStdErr, BetaErrs[0],meanZ, sigZ, sigDZ, contam, RateTest.typeString, RateTest.postCutSimCat['NN_PROB_IA'].min(), SigBeta, kmean[0], kErrs[0], kStdErr, SigK, np.nanmean(SampleSizes), int(nbinsFit), ScaleMuResCutLow, ScaleMuResCutHigh, RateTest.postCutSimCat['FITPROB'].min(), MURESCuts, np.mean(Chi2Mean), contam2, ccscales, correlation) )\n print \"BetaMean[0]\"\n print BetaMean[0]\n print BetaMean\n print \"KMean[0]\"\n print kmean[0]\n print kmean\n print \"Correlation\"\n\n print correlation\n #print \"BetaWeightMean[0]\"\n #print BetaWeightMean[0]\n #print BetaWeightMean\n #print \"KWeightMean[0]\"\n #print KWeightMean[0]\n #print KWeightMean\n if not noCCMC:\n print \"Individual Scales\"\n print CCScaleStorage\n print \"Individual ScaleErrs\"\n print CCScaleErrStorage\n print \"average ScaleErrs\"\n print np.nanmean(CCScaleErrStorage)\n print \"AAA CC Scale means (weighted, unweighted)2\"\n print np.average(ma.masked_invalid(np.array(CCScaleStorage)), weights = 1.0/ma.masked_invalid(CCScaleErrStorage)**2)\n print np.nanmean(ma.masked_invalid(np.array(CCScaleStorage)))\n\n print \"AAA CC Scale stds\"\n print np.nanstd(np.array(CCScaleStorage))\n if simInd == 1:\n plt.clf()\n hist, bins = np.histogram(CCScaleStorage, bins = np.linspace(0.0, 5.0, 10))\n plt.step((bins[1:]+bins[:-1])/2.0, hist, where = 'mid', c = 'g')\n plt.savefig(dataname + 'ScaleDistro.png')\n plt.clf()\n\n\n print \"nIter\"\n print nIter\n if not (priorRate is None):\n kPriorPlots = np.linspace(0.8, 1.5, 300)\n kPriors = []\n for ktemp in kPriorPlots:\n kPriors.append(ratePrior(ktemp*trueMCK, BetaMean[0]*trueMCBeta, priorRate, priorZEff, priorRateErrUp = ratePriorErrUp, priorRateErrDown = ratePriorErrDown, priorRateErrAll = ratePriorErrAll))\n\n\n betaPriorPlots = np.linspace(-0.5, 0.5, 300)\n betaPriors = []\n for btemp in betaPriorPlots:\n betaPriors.append(ratePrior(kmean[0]*trueMCK, b*trueMCBeta, priorRate, priorZEff, priorRateErrUp = ratePriorErrUp, priorRateErrDown = ratePriorErrDown, priorRateErrAll = ratePriorErrAll))\n\n actualPrior = ratePrior(kmean[0]*trueMCK, BetaMean[0]*trueMCBeta, priorRate, priorZEff, priorRateErrUp = ratePriorErrUp, priorRateErrDown = ratePriorErrDown, priorRateErrAll = ratePriorErrAll)\n\n\n kPriors = np.array(kPriors)\n betaPriors = np.array(betaPriors)\n\n plt.clf()\n plt.figure()\n \n plt.plot(kPriorPlots, np.log10(kPriors) )\n plt.hlines(np.log10(actualPrior), kPriorPlots[0], kPriorPlots[-1], label = 'Best Fit Prior = {0:.03f}'.format(actualPrior))\n plt.vlines(kmean[0], np.log10(kPriors).min(), np.log10(kPriors).max(), label = 'Best Fit K = {0:.03f}'.format(kmean[0]))\n plt.xlabel('k')\n plt.ylabel('ratePrior')\n plt.legend()\n plt.savefig(dataname + '_LogKPriorPlot.png')\n\n \n\n plt.clf()\n plt.figure()\n plt.plot(kPriorPlots, kPriors)\n plt.hlines(actualPrior, kPriorPlots[0], kPriorPlots[-1], label = 'Best Fit Prior = {0:.03f}'.format(actualPrior))\n plt.vlines(kmean[0], kPriors.min(), kPriors.max(), label = 'Best Fit K = {0:.03f}'.format(kmean[0]))\n plt.xlabel('k')\n plt.ylabel('ratePrior')\n plt.legend()\n plt.savefig(dataname + '_KPriorPlot.png')\n\n plt.clf()\n plt.figure()\n plt.plot(betaPriorPlots, betaPriors)\n plt.hlines(actualPrior, betaPriorPlots[0], betaPriorPlots[-1], label = 'Best Fit Prior = {0:.03f}'.format(actualPrior))\n plt.vlines(BetaMean[0], betaPriors.min(), betaPriors.max(), label = 'Best Fit Beta = {0:.03f}'.format(BetaMean[0]))\n plt.xlabel('beta')\n plt.ylabel('ratePrior')\n plt.legend()\n plt.savefig(dataname + '_BetaPriorPlot.png')\n\n '''\n argList = ''\n minObjList = ''\n chi2Initargs = ''\n for i in xrange(zCenters.shape[0]):\n argList += 'f{0},'.format(i)\n minObjList += 'f{0} = 1.0, error_f{0} = 0.1, limit_f{0} = (0.0, None),'.format(i)\n chi2Initargs += '1.0,'\n argList = argList[:-1]\n minObjList = minObjList[:-1]\n chi2Initargs = chi2Initargs[:-1]\n #print argList\n #print minObjList\n #print chi2Initargs\n\n exec('''\n '''\n def chi2func(nData, nSim, effmat, fnorm, zCenters, {0}, dump = False, complexdump = False):\n\n Chi2Temp = 0.0\n f_Js = [{0}]\n chi2Mat = np.zeros((self.nbins))\n adjNMC = np.zeros((self.nbins))\n #print f_Js\n #Check if I am scaling errors down with increasing MC size. Make MC twice as large as \"Data\" to test.\n for row, nDataI, i in zip(effmat, nData, xrange(self.nbins)):\n #if dump:\n # print \"nDataI\"\n # print nDataI\n JSumTemp = 0.0\n for eff, nSimJ, f_J, j in zip(row, nSim, f_Js, xrange(self.nbins)):\n JSumTemp += nSimJ*f_J*eff*fnorm\n if dump and i == j:\n print \"nDataI\"\n print nDataI\n print \"Bin Contribution to scaled nSim\"\n print nSimJ*f_J*eff*fnorm\n #print \"Product of nSimJ, f_J, eff, fnorm\"\n #print nSimJ\n #print f_J\n #print eff\n #print fnorm\n if nDataI > 1E-11 or JSumTemp > 1E-11:\n if dump and i == j:\n print \"nDataI\"\n print nDataI\n print \"scaled nSim\"\n print JSumTemp\n print \"fnorm\"\n print fnorm\n print \"error\"\n print nDataI + JSumTemp*fnorm\n if (nDataI + JSumTemp*fnorm) <= 0:\n print (nDataI + JSumTemp*fnorm)\n assert(0)\n Chi2Temp += ((nDataI - JSumTemp)**2/(nDataI + JSumTemp*fnorm))#*fnorm**2\n\n return Chi2Temp\n ''''''.format(argList), locals())\n fnorm = float(np.sum(nData))/float(self.simcat.Catalog['zPHOT'].shape[0])\n\n #print type(chi2func)\n #print 'lamChi2 = lambda {0}: chi2func(nData, nSim, self.effmat, fnorm, zCenters, {0})'.format(argList)\n exec('lamChi2 = lambda {0}: chi2func(nData, nSim, self.effmat, fnorm, zCenters, {0})'.format(argList),locals())\n exec('lamChi2Dump = lambda {0}: chi2func(nData, nSim, self.effmat, fnorm, zCenters, {0}, dump = True)'.format(argList),locals())\n #print type(lamChi2)\n #print type(lamChi2Dump)\n #print 'MinObj = M(lamChi2, {0})'.format(minObjList)\n exec('MinObj = M(lamChi2, {0})'.format(minObjList),locals())\n exec('chi2Init = lamChi2Dump({0})'.format(chi2Initargs),locals())\n #print \"Chi2 init = {0}\".format(round(chi2Init, 4))\n\n\n\n MinObj.set_strategy(2)\n MinObj.migrad()\n #MinObj.minos()\n zCenters = (simBins[1:] + simBins[:-1])/2.0\n print MinObj.values\n fJs = []\n fJErrs = []\n for v in MinObj.values.keys():\n fJs.append(MinObj.values[v])\n fJErrs.append(MinObj.errors[v])\n\n \n exec('lamChi22 = lambda k, Beta: self.chi2V2(fJs, fJErrs, zCenters, k, Beta)',locals())\n exec('MinObj2 = M(lamChi22, k = 1.0, error_k = 0.1, limit_k = (0.0, None), Beta = 0.0, error_Beta = 0.1)',locals())\n\n\n #print \"Large Perfect Sim {0}\".format(simInd)\n #print \"Sim R0 = 1.7E-5; Sim Beta = 4.2\"\n ##print \"Sim Beta = 1.5; Data Beta = 1.5\"\n ##RateTest = Rate_Fitter('DES_FULLSURVEY_TEST/JLDESFULLSURVEYIaOnly+zPHOT+smearC11/FITOPT000+SALT2mu.FITRES', 'JLDESFULLSURVEYIaOnly+zPHOT+smearC11','JLDES_R0_7E-5_Beta_1-5_Shallow/JLDES_R0_7E-5_Beta_1-5_Shallow/FITOPT000+SALT2mu.FITRES', 'JLDES_R0_7E-5_Beta_1-5_Shallow','/project/rkessler/SN/SNDATA_ROOT/SIM/JLDES_R0_7E-5_Beta_1-5_Shallow/JLDES_R0_7E-5_Beta_1-5_Shallow.DUMP')\n #print '/project/rkessler/jlasker/Rate_Analysis/TestSameK2Beta/outFit_datasize/JLDES_R0_1-7E-5_Beta_4-2_Datasize_Perfect-00{0:02d}/FITOPT000.FITRES'.format(simInd)\n\n #RateTest = Rate_Fitter('/project/rkessler/jlasker/Rate_Analysis/TestSameK2Beta/outFit_datasize/JLDES_R0_1-7E-5_Beta_4-2_Datasize_Perfect-00{0:02d}/FITOPT000.FITRES'.format(simInd), 'TestSameK2Beta/JLDES_R0_1-7E-5_Beta_4-2-00{0:02d}'.format(simInd),'/project/rkessler/jlasker/Rate_Analysis/outFit_datalike/JLDES_R0_1-7E-5_Beta_2-1_Datalike_PERFECT/FITOPT000.FITRES', 'JLDES_R0_1-7E-5_Beta_2-1_DataLikePhotZ','/scratch/midway2/rkessler/SNDATA_ROOT/SIM/JLDES_R0_1-7E-5_Beta_2-1_Datalike_PERFECT/JLDES_R0_1-7E-5_Beta_2-1_Datalike_PERFECT.DUMP', 2.1, zmin = 0.1, zmax = 1.3)# , MJDMin = 0, MJDMax = np.inf)\n\n\n #RateTest.effCalc(nbins = 12)\n ##RateTest.effCalc(nbins = 20)\n #RateTest.fit_rate()\n\n\n #ksPerf.append(RateTest.k)\n #kErrsPerf.append(RateTest.kErr)\n #BetasPerf.append(RateTest.Beta)\n #BetaErrsPerf.append(RateTest.BetaErr)\n #print \"Sim Beta = 1.5; Data Beta = 1.5\"\n #RateTest = Rate_Fitter('DES_FULLSURVEY_TEST/JLDESFULLSURVEYIaOnly+zPHOT+smearC11/FITOPT000+SALT2mu.FITRES', 'JLDESFULLSURVEYIaOnly+zPHOT+smearC11','JLDES_R0_7E-5_Beta_1-5_Shallow/JLDES_R0_7E-5_Beta_1-5_Shallow/FITOPT000+SALT2mu.FITRES', 'JLDES_R0_7E-5_Beta_1-5_Shallow','/project/rkessler/SN/SNDATA_ROOT/SIM/JLDES_R0_7E-5_Beta_1-5_Shallow/JLDES_R0_7E-5_Beta_1-5_Shallow.DUMP')\n\n\n try:\n optfname = argv[1]\n opts = open(optfname, 'r')\n optlist = opts.readlines()\n\n zmin = None; zmax = None; MJDMin = None; MJDMax = None; bins = None; runFit = None\n\n for opt in optlist:\n try: \n optName, optVal = opt.split()\n except:\n print \"{0} not formatted correctly\".format(opt)\n continue\n\n if (optName.lower() == 'zmin') & (not zmin): zmin = optVal\n if (optName.lower() == 'zmax') & (not zmax): zmax = optVal\n if (optName.lower() == 'mjdmin') & (not MJDMin): MJDMin = optVal\n if (optName.lower() == 'mjdmax') & (not MJDMax): MJDMax = optVal\n if (optName.lower() == 'bins') & (not bins): zmin = optVal\n if (optName.lower() == 'runfit') & (not runFit == None): zmin = optVal\n\n if zmin == None: zmin = 0.1\n if zmax == None: zmax = 1.2\n if MJDMin == None: MJDMin = 0.0\n if MJDMax == None: MJDMax = np.inf\n if bins == None: bins = \"equalSize\"\n if runFit == None: runFit = True\n\n except:\n print \"Option File not working/Nonexistent. Using default values\"\n '''",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import torch
from torch import nn
from torch.nn import functional as F
import torchvision
import math
from torchvision.models.resnet import Bottleneck
from dataset import load_image, load_text, ALPHABET, MAX_LEN
class ResNetFeatures(nn.Module):
def __init__(self, pretrained=True):
super().__init__()
# Input images x of handwritten text-lines, which might have
# arbitrary lengths, are first processed by a Convolutional
# Neural Network. We obtain an intermediate visual feature
# representation Fc of size f. We use the ResNet50 [26] as
# our backbone convolutional architecture.
# Such visual feature representation has a contextualized global view of the
# whole input image while remaining compact.
self.resnet = torchvision.models.resnet50(pretrained=pretrained)
# self.resnet.inplanes = 512
# self.layer3 = self.resnet._make_layer(Bottleneck, 256, 6, stride=1, dilate=False)
def forward(self, x):
# From https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
x = self.resnet.conv1(x.repeat(1, 3, 1, 1))
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
return x
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len, dropout=0.1):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerHTR(nn.Module):
def __init__(self, alphabet, freeze_resnet=False, use_encoder=False, dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1, text_len=100):
super(TransformerHTR, self).__init__()
# (Visual Feature) Encoder
self.resnet = ResNetFeatures()
if freeze_resnet:
print('Freezing-resnet')
for param in self.resnet.resnet.parameters():
param.requires_grad = False
self.fc = nn.Linear(f*4, f)
self.pe_encode = PositionalEncoding(f, 140, dropout)
self.fc_bar = nn.Linear(f, f)
if use_encoder:
print('Transformer Encoder')
encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f, dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers)
else:
print('Identity encoder')
self.transformer_encoder = nn.Identity()
self.layer_norm = nn.LayerNorm(f)
print('freeze-resnet', freeze_resnet)
print('use_encoder', use_encoder)
# (Text Transcriber) Decoder
self.ebl = nn.Embedding(dict_size, f)
self.pe_decode = PositionalEncoding(f, text_len, dropout)
decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=num_heads, dim_feedforward=f, dropout=dropout)
self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)
self.linear = nn.Linear(f, dict_size)
# General
self.f = f
self.text_len = text_len
self.alphabet = alphabet
self.inv_alphabet = {j: i for i, j in alphabet.items()}
self.init_weights()
def init_weights(self):
initrange = 0.1
self.fc.bias.data.zero_()
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc_bar.bias.data.zero_()
self.fc_bar.weight.data.uniform_(-initrange, initrange)
self.ebl.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def encode(self, x):
x = self.resnet(x)
b, f, h, w = x.size()
x = x.view(b, f*h, w).permute(0, 2, 1)
# x = F.relu(self.fc(x))
x = self.fc(x)
x = self.pe_encode(x.permute(1, 0, 2))
# x = F.relu(self.fc_bar(x))
x = self.fc_bar(x)
x = self.transformer_encoder(x)
# x = self.layer_norm(x)
return x
def decode(self, x, y):
kpm = (x == self.alphabet['<P>']).transpose(1, 0)
x = self.ebl(x)*math.sqrt(self.f)
x = self.pe_decode(x)
dim = x.size()[0]
a = self.generate_square_subsequent_mask(dim).to(x.device)
x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)
return self.linear(x).permute(1, 0, 2)
def forward(self, x, y):
return self.decode(x, self.encode(y))
@torch.no_grad()
def to_text_(self, x, bulk=True):
txt = []
p = {self.alphabet["<E>"]}
s = {self.alphabet["<S>"], self.alphabet["<P>"]}
for idx in x:
if not bulk:
if idx in p:
break
if idx in s:
continue
txt.append(self.inv_alphabet[idx])
return (txt if bulk else "".join(txt))
@torch.no_grad()
def to_text(self, x, bulk=False):
x = x.cpu().numpy()
if len(x.shape) == 2:
return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]
else:
return self.to_text_(x, bulk=bulk)
@torch.no_grad()
def gen(self, y, bulk=False):
y = self.encode(y)
output_tokens = torch.full((y.size()[1], self.text_len), self.alphabet["<P>"]).long()
output_tokens[:, 0] = self.alphabet["<S>"]
output_tokens = output_tokens.to(y.device)
for j in range(1, self.text_len):
x = output_tokens[:, :j].permute(1, 0)
x = self.decode(x, y)
a = torch.argmax(x, dim=-1)
output_tokens[:, j] = a[:,-1]
if bulk:
return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens
else:
return self.to_text(output_tokens)
# DEBUG
import os
import torchvision
import numpy as np
from torchvision.transforms.functional import resize, pil_to_tensor
import PIL
def load_batch_image(max_img=2):
# Each batch should have
return torch.cat([load_image(os.path.join('debug-data', f"{i}.png")) for i in range(1, max_img+1)], dim=0).unsqueeze(1)
character_dict = dict()
def get(x):
a = character_dict.get(x, None)
if a is None:
idx = len(character_dict)
character_dict[x] = idx
return idx
else:
return a
TXT = ["A|MOVE|to|stop|Mr.|Gaitskell|from", "nominating|any|more|Labour|life|Peers"]
def load_text_tensor(txt):
return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)
def load_batch_text():
return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)
if __name__ == "__main__":
# load two images
transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)
bt = load_batch_text()
print(bt.size())
b = transformer(bt[0:transformer.text_len, :], load_batch_image())
criterion = nn.CrossEntropyLoss()
loss = 0
trgt = bt[1:, :]
for i in range(trgt.size()[1]):
loss += criterion(b[i], trgt[:, i])
loss.backward()
out = transformer.gen(load_batch_image())
print(out)
|
normal
|
{
"blob_id": "79522db1316e4a25ab5a598ee035cf9b9a9a9411",
"index": 3511,
"step-1": "<mask token>\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.\n log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n <mask token>\n\n\nclass TransformerHTR(nn.Module):\n\n def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,\n dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,\n text_len=100):\n super(TransformerHTR, self).__init__()\n self.resnet = ResNetFeatures()\n if freeze_resnet:\n print('Freezing-resnet')\n for param in self.resnet.resnet.parameters():\n param.requires_grad = False\n self.fc = nn.Linear(f * 4, f)\n self.pe_encode = PositionalEncoding(f, 140, dropout)\n self.fc_bar = nn.Linear(f, f)\n if use_encoder:\n print('Transformer Encoder')\n encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,\n dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers,\n num_layers)\n else:\n print('Identity encoder')\n self.transformer_encoder = nn.Identity()\n self.layer_norm = nn.LayerNorm(f)\n print('freeze-resnet', freeze_resnet)\n print('use_encoder', use_encoder)\n self.ebl = nn.Embedding(dict_size, f)\n self.pe_decode = PositionalEncoding(f, text_len, dropout)\n decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=\n num_heads, dim_feedforward=f, dropout=dropout)\n self.transformer_decoder = nn.TransformerDecoder(decoder_layer,\n num_layers=num_layers)\n self.linear = nn.Linear(f, dict_size)\n self.f = f\n self.text_len = text_len\n self.alphabet = alphabet\n self.inv_alphabet = {j: i for i, j in alphabet.items()}\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.fc.bias.data.zero_()\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc_bar.bias.data.zero_()\n self.fc_bar.weight.data.uniform_(-initrange, initrange)\n self.ebl.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(\n mask == 1, float(0.0))\n return mask\n\n def encode(self, x):\n x = self.resnet(x)\n b, f, h, w = x.size()\n x = x.view(b, f * h, w).permute(0, 2, 1)\n x = self.fc(x)\n x = self.pe_encode(x.permute(1, 0, 2))\n x = self.fc_bar(x)\n x = self.transformer_encoder(x)\n return x\n\n def decode(self, x, y):\n kpm = (x == self.alphabet['<P>']).transpose(1, 0)\n x = self.ebl(x) * math.sqrt(self.f)\n x = self.pe_decode(x)\n dim = x.size()[0]\n a = self.generate_square_subsequent_mask(dim).to(x.device)\n x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)\n return self.linear(x).permute(1, 0, 2)\n\n def forward(self, x, y):\n return self.decode(x, self.encode(y))\n\n @torch.no_grad()\n def to_text_(self, x, bulk=True):\n txt = []\n p = {self.alphabet['<E>']}\n s = {self.alphabet['<S>'], self.alphabet['<P>']}\n for idx in x:\n if not bulk:\n if idx in p:\n break\n if idx in s:\n continue\n txt.append(self.inv_alphabet[idx])\n return txt if bulk else ''.join(txt)\n\n @torch.no_grad()\n def to_text(self, x, bulk=False):\n x = x.cpu().numpy()\n if len(x.shape) == 2:\n return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]\n else:\n return self.to_text_(x, bulk=bulk)\n\n @torch.no_grad()\n def gen(self, y, bulk=False):\n y = self.encode(y)\n output_tokens = torch.full((y.size()[1], self.text_len), self.\n alphabet['<P>']).long()\n output_tokens[:, 0] = self.alphabet['<S>']\n output_tokens = output_tokens.to(y.device)\n for j in range(1, self.text_len):\n x = output_tokens[:, :j].permute(1, 0)\n x = self.decode(x, y)\n a = torch.argmax(x, dim=-1)\n output_tokens[:, j] = a[:, -1]\n if bulk:\n return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens\n else:\n return self.to_text(output_tokens)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ResNetFeatures(nn.Module):\n\n def __init__(self, pretrained=True):\n super().__init__()\n self.resnet = torchvision.models.resnet50(pretrained=pretrained)\n\n def forward(self, x):\n x = self.resnet.conv1(x.repeat(1, 3, 1, 1))\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n x = self.resnet.layer1(x)\n x = self.resnet.layer2(x)\n x = self.resnet.layer3(x)\n return x\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.\n log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\nclass TransformerHTR(nn.Module):\n\n def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,\n dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,\n text_len=100):\n super(TransformerHTR, self).__init__()\n self.resnet = ResNetFeatures()\n if freeze_resnet:\n print('Freezing-resnet')\n for param in self.resnet.resnet.parameters():\n param.requires_grad = False\n self.fc = nn.Linear(f * 4, f)\n self.pe_encode = PositionalEncoding(f, 140, dropout)\n self.fc_bar = nn.Linear(f, f)\n if use_encoder:\n print('Transformer Encoder')\n encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,\n dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers,\n num_layers)\n else:\n print('Identity encoder')\n self.transformer_encoder = nn.Identity()\n self.layer_norm = nn.LayerNorm(f)\n print('freeze-resnet', freeze_resnet)\n print('use_encoder', use_encoder)\n self.ebl = nn.Embedding(dict_size, f)\n self.pe_decode = PositionalEncoding(f, text_len, dropout)\n decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=\n num_heads, dim_feedforward=f, dropout=dropout)\n self.transformer_decoder = nn.TransformerDecoder(decoder_layer,\n num_layers=num_layers)\n self.linear = nn.Linear(f, dict_size)\n self.f = f\n self.text_len = text_len\n self.alphabet = alphabet\n self.inv_alphabet = {j: i for i, j in alphabet.items()}\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.fc.bias.data.zero_()\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc_bar.bias.data.zero_()\n self.fc_bar.weight.data.uniform_(-initrange, initrange)\n self.ebl.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(\n mask == 1, float(0.0))\n return mask\n\n def encode(self, x):\n x = self.resnet(x)\n b, f, h, w = x.size()\n x = x.view(b, f * h, w).permute(0, 2, 1)\n x = self.fc(x)\n x = self.pe_encode(x.permute(1, 0, 2))\n x = self.fc_bar(x)\n x = self.transformer_encoder(x)\n return x\n\n def decode(self, x, y):\n kpm = (x == self.alphabet['<P>']).transpose(1, 0)\n x = self.ebl(x) * math.sqrt(self.f)\n x = self.pe_decode(x)\n dim = x.size()[0]\n a = self.generate_square_subsequent_mask(dim).to(x.device)\n x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)\n return self.linear(x).permute(1, 0, 2)\n\n def forward(self, x, y):\n return self.decode(x, self.encode(y))\n\n @torch.no_grad()\n def to_text_(self, x, bulk=True):\n txt = []\n p = {self.alphabet['<E>']}\n s = {self.alphabet['<S>'], self.alphabet['<P>']}\n for idx in x:\n if not bulk:\n if idx in p:\n break\n if idx in s:\n continue\n txt.append(self.inv_alphabet[idx])\n return txt if bulk else ''.join(txt)\n\n @torch.no_grad()\n def to_text(self, x, bulk=False):\n x = x.cpu().numpy()\n if len(x.shape) == 2:\n return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]\n else:\n return self.to_text_(x, bulk=bulk)\n\n @torch.no_grad()\n def gen(self, y, bulk=False):\n y = self.encode(y)\n output_tokens = torch.full((y.size()[1], self.text_len), self.\n alphabet['<P>']).long()\n output_tokens[:, 0] = self.alphabet['<S>']\n output_tokens = output_tokens.to(y.device)\n for j in range(1, self.text_len):\n x = output_tokens[:, :j].permute(1, 0)\n x = self.decode(x, y)\n a = torch.argmax(x, dim=-1)\n output_tokens[:, j] = a[:, -1]\n if bulk:\n return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens\n else:\n return self.to_text(output_tokens)\n\n\n<mask token>\n\n\ndef get(x):\n a = character_dict.get(x, None)\n if a is None:\n idx = len(character_dict)\n character_dict[x] = idx\n return idx\n else:\n return a\n\n\n<mask token>\n\n\ndef load_text_tensor(txt):\n return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ResNetFeatures(nn.Module):\n\n def __init__(self, pretrained=True):\n super().__init__()\n self.resnet = torchvision.models.resnet50(pretrained=pretrained)\n\n def forward(self, x):\n x = self.resnet.conv1(x.repeat(1, 3, 1, 1))\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n x = self.resnet.layer1(x)\n x = self.resnet.layer2(x)\n x = self.resnet.layer3(x)\n return x\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.\n log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\nclass TransformerHTR(nn.Module):\n\n def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,\n dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,\n text_len=100):\n super(TransformerHTR, self).__init__()\n self.resnet = ResNetFeatures()\n if freeze_resnet:\n print('Freezing-resnet')\n for param in self.resnet.resnet.parameters():\n param.requires_grad = False\n self.fc = nn.Linear(f * 4, f)\n self.pe_encode = PositionalEncoding(f, 140, dropout)\n self.fc_bar = nn.Linear(f, f)\n if use_encoder:\n print('Transformer Encoder')\n encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,\n dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers,\n num_layers)\n else:\n print('Identity encoder')\n self.transformer_encoder = nn.Identity()\n self.layer_norm = nn.LayerNorm(f)\n print('freeze-resnet', freeze_resnet)\n print('use_encoder', use_encoder)\n self.ebl = nn.Embedding(dict_size, f)\n self.pe_decode = PositionalEncoding(f, text_len, dropout)\n decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=\n num_heads, dim_feedforward=f, dropout=dropout)\n self.transformer_decoder = nn.TransformerDecoder(decoder_layer,\n num_layers=num_layers)\n self.linear = nn.Linear(f, dict_size)\n self.f = f\n self.text_len = text_len\n self.alphabet = alphabet\n self.inv_alphabet = {j: i for i, j in alphabet.items()}\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.fc.bias.data.zero_()\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc_bar.bias.data.zero_()\n self.fc_bar.weight.data.uniform_(-initrange, initrange)\n self.ebl.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(\n mask == 1, float(0.0))\n return mask\n\n def encode(self, x):\n x = self.resnet(x)\n b, f, h, w = x.size()\n x = x.view(b, f * h, w).permute(0, 2, 1)\n x = self.fc(x)\n x = self.pe_encode(x.permute(1, 0, 2))\n x = self.fc_bar(x)\n x = self.transformer_encoder(x)\n return x\n\n def decode(self, x, y):\n kpm = (x == self.alphabet['<P>']).transpose(1, 0)\n x = self.ebl(x) * math.sqrt(self.f)\n x = self.pe_decode(x)\n dim = x.size()[0]\n a = self.generate_square_subsequent_mask(dim).to(x.device)\n x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)\n return self.linear(x).permute(1, 0, 2)\n\n def forward(self, x, y):\n return self.decode(x, self.encode(y))\n\n @torch.no_grad()\n def to_text_(self, x, bulk=True):\n txt = []\n p = {self.alphabet['<E>']}\n s = {self.alphabet['<S>'], self.alphabet['<P>']}\n for idx in x:\n if not bulk:\n if idx in p:\n break\n if idx in s:\n continue\n txt.append(self.inv_alphabet[idx])\n return txt if bulk else ''.join(txt)\n\n @torch.no_grad()\n def to_text(self, x, bulk=False):\n x = x.cpu().numpy()\n if len(x.shape) == 2:\n return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]\n else:\n return self.to_text_(x, bulk=bulk)\n\n @torch.no_grad()\n def gen(self, y, bulk=False):\n y = self.encode(y)\n output_tokens = torch.full((y.size()[1], self.text_len), self.\n alphabet['<P>']).long()\n output_tokens[:, 0] = self.alphabet['<S>']\n output_tokens = output_tokens.to(y.device)\n for j in range(1, self.text_len):\n x = output_tokens[:, :j].permute(1, 0)\n x = self.decode(x, y)\n a = torch.argmax(x, dim=-1)\n output_tokens[:, j] = a[:, -1]\n if bulk:\n return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens\n else:\n return self.to_text(output_tokens)\n\n\n<mask token>\n\n\ndef load_batch_image(max_img=2):\n return torch.cat([load_image(os.path.join('debug-data', f'{i}.png')) for\n i in range(1, max_img + 1)], dim=0).unsqueeze(1)\n\n\n<mask token>\n\n\ndef get(x):\n a = character_dict.get(x, None)\n if a is None:\n idx = len(character_dict)\n character_dict[x] = idx\n return idx\n else:\n return a\n\n\n<mask token>\n\n\ndef load_text_tensor(txt):\n return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)\n\n\ndef load_batch_text():\n return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)\n\n\nif __name__ == '__main__':\n transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)\n bt = load_batch_text()\n print(bt.size())\n b = transformer(bt[0:transformer.text_len, :], load_batch_image())\n criterion = nn.CrossEntropyLoss()\n loss = 0\n trgt = bt[1:, :]\n for i in range(trgt.size()[1]):\n loss += criterion(b[i], trgt[:, i])\n loss.backward()\n out = transformer.gen(load_batch_image())\n print(out)\n",
"step-4": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision\nimport math\nfrom torchvision.models.resnet import Bottleneck\nfrom dataset import load_image, load_text, ALPHABET, MAX_LEN\n\n\nclass ResNetFeatures(nn.Module):\n\n def __init__(self, pretrained=True):\n super().__init__()\n self.resnet = torchvision.models.resnet50(pretrained=pretrained)\n\n def forward(self, x):\n x = self.resnet.conv1(x.repeat(1, 3, 1, 1))\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n x = self.resnet.layer1(x)\n x = self.resnet.layer2(x)\n x = self.resnet.layer3(x)\n return x\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.\n log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\nclass TransformerHTR(nn.Module):\n\n def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,\n dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,\n text_len=100):\n super(TransformerHTR, self).__init__()\n self.resnet = ResNetFeatures()\n if freeze_resnet:\n print('Freezing-resnet')\n for param in self.resnet.resnet.parameters():\n param.requires_grad = False\n self.fc = nn.Linear(f * 4, f)\n self.pe_encode = PositionalEncoding(f, 140, dropout)\n self.fc_bar = nn.Linear(f, f)\n if use_encoder:\n print('Transformer Encoder')\n encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,\n dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers,\n num_layers)\n else:\n print('Identity encoder')\n self.transformer_encoder = nn.Identity()\n self.layer_norm = nn.LayerNorm(f)\n print('freeze-resnet', freeze_resnet)\n print('use_encoder', use_encoder)\n self.ebl = nn.Embedding(dict_size, f)\n self.pe_decode = PositionalEncoding(f, text_len, dropout)\n decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=\n num_heads, dim_feedforward=f, dropout=dropout)\n self.transformer_decoder = nn.TransformerDecoder(decoder_layer,\n num_layers=num_layers)\n self.linear = nn.Linear(f, dict_size)\n self.f = f\n self.text_len = text_len\n self.alphabet = alphabet\n self.inv_alphabet = {j: i for i, j in alphabet.items()}\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.fc.bias.data.zero_()\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc_bar.bias.data.zero_()\n self.fc_bar.weight.data.uniform_(-initrange, initrange)\n self.ebl.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(\n mask == 1, float(0.0))\n return mask\n\n def encode(self, x):\n x = self.resnet(x)\n b, f, h, w = x.size()\n x = x.view(b, f * h, w).permute(0, 2, 1)\n x = self.fc(x)\n x = self.pe_encode(x.permute(1, 0, 2))\n x = self.fc_bar(x)\n x = self.transformer_encoder(x)\n return x\n\n def decode(self, x, y):\n kpm = (x == self.alphabet['<P>']).transpose(1, 0)\n x = self.ebl(x) * math.sqrt(self.f)\n x = self.pe_decode(x)\n dim = x.size()[0]\n a = self.generate_square_subsequent_mask(dim).to(x.device)\n x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)\n return self.linear(x).permute(1, 0, 2)\n\n def forward(self, x, y):\n return self.decode(x, self.encode(y))\n\n @torch.no_grad()\n def to_text_(self, x, bulk=True):\n txt = []\n p = {self.alphabet['<E>']}\n s = {self.alphabet['<S>'], self.alphabet['<P>']}\n for idx in x:\n if not bulk:\n if idx in p:\n break\n if idx in s:\n continue\n txt.append(self.inv_alphabet[idx])\n return txt if bulk else ''.join(txt)\n\n @torch.no_grad()\n def to_text(self, x, bulk=False):\n x = x.cpu().numpy()\n if len(x.shape) == 2:\n return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]\n else:\n return self.to_text_(x, bulk=bulk)\n\n @torch.no_grad()\n def gen(self, y, bulk=False):\n y = self.encode(y)\n output_tokens = torch.full((y.size()[1], self.text_len), self.\n alphabet['<P>']).long()\n output_tokens[:, 0] = self.alphabet['<S>']\n output_tokens = output_tokens.to(y.device)\n for j in range(1, self.text_len):\n x = output_tokens[:, :j].permute(1, 0)\n x = self.decode(x, y)\n a = torch.argmax(x, dim=-1)\n output_tokens[:, j] = a[:, -1]\n if bulk:\n return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens\n else:\n return self.to_text(output_tokens)\n\n\nimport os\nimport torchvision\nimport numpy as np\nfrom torchvision.transforms.functional import resize, pil_to_tensor\nimport PIL\n\n\ndef load_batch_image(max_img=2):\n return torch.cat([load_image(os.path.join('debug-data', f'{i}.png')) for\n i in range(1, max_img + 1)], dim=0).unsqueeze(1)\n\n\ncharacter_dict = dict()\n\n\ndef get(x):\n a = character_dict.get(x, None)\n if a is None:\n idx = len(character_dict)\n character_dict[x] = idx\n return idx\n else:\n return a\n\n\nTXT = ['A|MOVE|to|stop|Mr.|Gaitskell|from',\n 'nominating|any|more|Labour|life|Peers']\n\n\ndef load_text_tensor(txt):\n return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)\n\n\ndef load_batch_text():\n return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)\n\n\nif __name__ == '__main__':\n transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)\n bt = load_batch_text()\n print(bt.size())\n b = transformer(bt[0:transformer.text_len, :], load_batch_image())\n criterion = nn.CrossEntropyLoss()\n loss = 0\n trgt = bt[1:, :]\n for i in range(trgt.size()[1]):\n loss += criterion(b[i], trgt[:, i])\n loss.backward()\n out = transformer.gen(load_batch_image())\n print(out)\n",
"step-5": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision\nimport math\nfrom torchvision.models.resnet import Bottleneck\nfrom dataset import load_image, load_text, ALPHABET, MAX_LEN\n\n\nclass ResNetFeatures(nn.Module):\n def __init__(self, pretrained=True):\n super().__init__()\n # Input images x of handwritten text-lines, which might have\n # arbitrary lengths, are first processed by a Convolutional\n # Neural Network. We obtain an intermediate visual feature\n # representation Fc of size f. We use the ResNet50 [26] as\n # our backbone convolutional architecture. \n # Such visual feature representation has a contextualized global view of the\n # whole input image while remaining compact.\n self.resnet = torchvision.models.resnet50(pretrained=pretrained)\n # self.resnet.inplanes = 512\n # self.layer3 = self.resnet._make_layer(Bottleneck, 256, 6, stride=1, dilate=False)\n\n def forward(self, x):\n # From https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n x = self.resnet.conv1(x.repeat(1, 3, 1, 1))\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n x = self.resnet.layer1(x)\n x = self.resnet.layer2(x)\n x = self.resnet.layer3(x)\n return x\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\nclass TransformerHTR(nn.Module):\n def __init__(self, alphabet, freeze_resnet=False, use_encoder=False, dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1, text_len=100):\n super(TransformerHTR, self).__init__()\n # (Visual Feature) Encoder\n self.resnet = ResNetFeatures()\n if freeze_resnet:\n print('Freezing-resnet')\n for param in self.resnet.resnet.parameters():\n param.requires_grad = False\n self.fc = nn.Linear(f*4, f)\n self.pe_encode = PositionalEncoding(f, 140, dropout)\n self.fc_bar = nn.Linear(f, f)\n if use_encoder:\n print('Transformer Encoder')\n encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f, dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers)\n else:\n print('Identity encoder')\n self.transformer_encoder = nn.Identity()\n self.layer_norm = nn.LayerNorm(f)\n\n print('freeze-resnet', freeze_resnet)\n print('use_encoder', use_encoder)\n # (Text Transcriber) Decoder\n self.ebl = nn.Embedding(dict_size, f)\n self.pe_decode = PositionalEncoding(f, text_len, dropout)\n decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=num_heads, dim_feedforward=f, dropout=dropout)\n self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)\n self.linear = nn.Linear(f, dict_size)\n\n # General\n self.f = f\n self.text_len = text_len\n self.alphabet = alphabet\n self.inv_alphabet = {j: i for i, j in alphabet.items()}\n self.init_weights()\n \n\n def init_weights(self):\n initrange = 0.1\n self.fc.bias.data.zero_()\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc_bar.bias.data.zero_()\n self.fc_bar.weight.data.uniform_(-initrange, initrange)\n self.ebl.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def encode(self, x):\n x = self.resnet(x)\n b, f, h, w = x.size()\n x = x.view(b, f*h, w).permute(0, 2, 1)\n # x = F.relu(self.fc(x))\n x = self.fc(x)\n x = self.pe_encode(x.permute(1, 0, 2))\n # x = F.relu(self.fc_bar(x))\n x = self.fc_bar(x)\n x = self.transformer_encoder(x)\n # x = self.layer_norm(x)\n return x\n\n def decode(self, x, y):\n kpm = (x == self.alphabet['<P>']).transpose(1, 0)\n x = self.ebl(x)*math.sqrt(self.f)\n x = self.pe_decode(x)\n dim = x.size()[0]\n a = self.generate_square_subsequent_mask(dim).to(x.device)\n x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)\n return self.linear(x).permute(1, 0, 2)\n\n def forward(self, x, y):\n return self.decode(x, self.encode(y))\n\n @torch.no_grad()\n def to_text_(self, x, bulk=True):\n txt = []\n p = {self.alphabet[\"<E>\"]}\n s = {self.alphabet[\"<S>\"], self.alphabet[\"<P>\"]}\n for idx in x:\n if not bulk:\n if idx in p:\n break\n if idx in s:\n continue\n txt.append(self.inv_alphabet[idx])\n return (txt if bulk else \"\".join(txt))\n\n @torch.no_grad()\n def to_text(self, x, bulk=False):\n x = x.cpu().numpy()\n if len(x.shape) == 2:\n return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]\n else:\n return self.to_text_(x, bulk=bulk)\n\n @torch.no_grad()\n def gen(self, y, bulk=False):\n y = self.encode(y)\n output_tokens = torch.full((y.size()[1], self.text_len), self.alphabet[\"<P>\"]).long()\n output_tokens[:, 0] = self.alphabet[\"<S>\"]\n output_tokens = output_tokens.to(y.device)\n for j in range(1, self.text_len):\n x = output_tokens[:, :j].permute(1, 0)\n x = self.decode(x, y)\n a = torch.argmax(x, dim=-1)\n output_tokens[:, j] = a[:,-1]\n if bulk:\n return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens\n else:\n return self.to_text(output_tokens)\n\n\n\n# DEBUG\nimport os\nimport torchvision\nimport numpy as np\nfrom torchvision.transforms.functional import resize, pil_to_tensor\nimport PIL\n\ndef load_batch_image(max_img=2):\n # Each batch should have \n return torch.cat([load_image(os.path.join('debug-data', f\"{i}.png\")) for i in range(1, max_img+1)], dim=0).unsqueeze(1)\n\ncharacter_dict = dict()\ndef get(x):\n a = character_dict.get(x, None)\n if a is None:\n idx = len(character_dict)\n character_dict[x] = idx\n return idx\n else:\n return a\n\nTXT = [\"A|MOVE|to|stop|Mr.|Gaitskell|from\", \"nominating|any|more|Labour|life|Peers\"]\ndef load_text_tensor(txt):\n return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)\n\ndef load_batch_text():\n return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)\n\nif __name__ == \"__main__\":\n # load two images\n transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)\n bt = load_batch_text()\n print(bt.size())\n b = transformer(bt[0:transformer.text_len, :], load_batch_image())\n criterion = nn.CrossEntropyLoss()\n loss = 0\n trgt = bt[1:, :]\n for i in range(trgt.size()[1]):\n loss += criterion(b[i], trgt[:, i])\n loss.backward()\n out = transformer.gen(load_batch_image())\n print(out)",
"step-ids": [
12,
18,
21,
23,
24
]
}
|
[
12,
18,
21,
23,
24
] |
import time
import optparse
from IPy import IP as IPTEST
ttlValues = {}
THRESH = 5
def checkTTL(ipsrc,ttl):
if IPTEST(ipsrc).iptype() == 'PRIVATE':
return
if not ttlValues.has_key(ipsrc):
pkt = srl(IP(dst=ipsrc) / TCMP(),retry=0,timeout=0,verbose=0)
ttlValues[ipsrc] = pkt.ttl
if abs(int(ttl) - int(ttlValues[ipsrc])) > THRESH:
print '\n[!] Detected Possible Spoofed Packer From:'+ipsrc
print '[!] TTL:'+ttl+',Actual TTL:'+str(ttlVaules[ipsrc])
def testTTL(pkt):
try:
if pkt.haslayer(IP):
ipsrc = pkt.getlayer(IP).src
ttl = str(pkt.ttl)
checkTTL(ipsrc,ttl)
except:
pass
def main():
parser = optparse.OptionParser("usage%prog"+"-i<interface> -t<thresh>")
parser.add_option('-i',dest='iface',type='string',help='specify network interface')
parser.add_option('-t',dest='thresh',type='int',help='specify threshold count')
(options,args) = parser.parse_args()
if options.iface == None:
conf.iface = 'eth0'
else:
conf.iface = options.iface
if options.thresh != None:
THRESH = options.thresh
else:
THRESH = 5
sniff(prn=testTTL,store=0)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "7081211336793bfde60b5c922f6ab9461a475949",
"index": 1616,
"step-1": "import time\r\nimport optparse\r\nfrom IPy import IP as IPTEST\r\nttlValues = {}\r\nTHRESH = 5\r\ndef checkTTL(ipsrc,ttl):\r\n if IPTEST(ipsrc).iptype() == 'PRIVATE':\r\n return\r\n if not ttlValues.has_key(ipsrc):\r\n pkt = srl(IP(dst=ipsrc) / TCMP(),retry=0,timeout=0,verbose=0)\r\n ttlValues[ipsrc] = pkt.ttl\r\n if abs(int(ttl) - int(ttlValues[ipsrc])) > THRESH:\r\n print '\\n[!] Detected Possible Spoofed Packer From:'+ipsrc\r\n print '[!] TTL:'+ttl+',Actual TTL:'+str(ttlVaules[ipsrc])\r\ndef testTTL(pkt):\r\n try:\r\n if pkt.haslayer(IP):\r\n ipsrc = pkt.getlayer(IP).src\r\n ttl = str(pkt.ttl)\r\n checkTTL(ipsrc,ttl)\r\n except:\r\n pass\r\ndef main():\r\n parser = optparse.OptionParser(\"usage%prog\"+\"-i<interface> -t<thresh>\")\r\n parser.add_option('-i',dest='iface',type='string',help='specify network interface')\r\n parser.add_option('-t',dest='thresh',type='int',help='specify threshold count')\r\n (options,args) = parser.parse_args()\r\n if options.iface == None:\r\n conf.iface = 'eth0'\r\n else:\r\n conf.iface = options.iface\r\n if options.thresh != None:\r\n THRESH = options.thresh\r\n else:\r\n THRESH = 5\r\n sniff(prn=testTTL,store=0)\r\nif __name__ == '__main__':\r\n main()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from collections import defaultdict
from mask import Mask
from utils import bits_to_decimal
def get_program(filename):
program = []
mask = None
with open(filename, 'r') as f:
for line in f:
line = line[:-1]
if 'mask' in line:
if mask is not None:
program.append(mask)
mask = Mask(line)
elif 'mem' in line:
mask.add_mem(line)
program.append(mask)
return program
def run_program_v1(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
address = mem[0]
bits = mem[1]
masked = mask.apply_v1(bits)
addresses[address] = masked
return addresses
def run_program_v2(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
subaddresses = mask.apply_v2(mem)
addresses.update(subaddresses)
return addresses
if __name__ == "__main__":
program = get_program('input.txt')
addresses_v1 = run_program_v1(program)
part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])
print(f'Part One Answer: {part_one}')
addresses_v2 = run_program_v2(program)
part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])
print(f'Part Two Answer: {part_two}')
|
normal
|
{
"blob_id": "56e8cdec854b3b7a2f925e70d7d59a73b76f9952",
"index": 9340,
"step-1": "<mask token>\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\nif __name__ == '__main__':\n program = get_program('input.txt')\n addresses_v1 = run_program_v1(program)\n part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])\n print(f'Part One Answer: {part_one}')\n addresses_v2 = run_program_v2(program)\n part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])\n print(f'Part Two Answer: {part_two}')\n",
"step-4": "from collections import defaultdict\nfrom mask import Mask\nfrom utils import bits_to_decimal\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\nif __name__ == '__main__':\n program = get_program('input.txt')\n addresses_v1 = run_program_v1(program)\n part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])\n print(f'Part One Answer: {part_one}')\n addresses_v2 = run_program_v2(program)\n part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])\n print(f'Part Two Answer: {part_two}')\n",
"step-5": "from collections import defaultdict\n\nfrom mask import Mask\nfrom utils import bits_to_decimal\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\nif __name__ == \"__main__\":\n program = get_program('input.txt')\n addresses_v1 = run_program_v1(program)\n part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])\n print(f'Part One Answer: {part_one}')\n addresses_v2 = run_program_v2(program)\n part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])\n print(f'Part Two Answer: {part_two}')\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
#Sample Python Code
print("Different Code!!!")
#print("Hello World!")
|
normal
|
{
"blob_id": "1e24952006afebb7bf10a83077fc4effd5cc9c58",
"index": 1301,
"step-1": "<mask token>\n",
"step-2": "print('Different Code!!!')\n",
"step-3": "#Sample Python Code\nprint(\"Different Code!!!\")\n#print(\"Hello World!\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from math import *
import math
import re
import numpy as np
class atom:
aid=0
atype=''
x=0.0
y=0.0
z=0.0
rid=0
rtype=''
model=[]
chainid=''
def getlen(atm1,atm2):
dist=sqrt(pow(atm1.x-atm2.x,2)+pow(atm1.y-atm2.y,2)+pow(atm1.z-atm2.z,2))
return dist
def getangle(atm1,atm2,atm3):
dist1=sqrt(pow(atm1.x-atm2.x,2)+pow(atm1.y-atm2.y,2)+pow(atm1.z-atm2.z,2))
dist2=sqrt(pow(atm3.x-atm2.x,2)+pow(atm3.y-atm2.y,2)+pow(atm3.z-atm2.z,2))
dotp=(atm1.x-atm2.x)*(atm3.x-atm2.x)+(atm1.y-atm2.y)*(atm3.y-atm2.y)+(atm1.z-atm2.z)*(atm3.z-atm2.z)
angle=acos(dotp/(dist1*dist2))*180/pi
return angle
def getangledihedral(atm1,atm2,atm3,atm4):
ab=np.zeros(3)
bc=np.zeros(3)
cd=np.zeros(3)
p=[]
q=[]
ab[0]=atm2.x-atm1.x
ab[1]=atm2.y-atm1.y
ab[2]=atm2.z-atm1.z
bc[0]=atm3.x-atm2.x
bc[1]=atm3.y-atm2.y
bc[2]=atm3.z-atm2.z
cd[0]=atm4.x-atm3.x
cd[1]=atm4.y-atm3.y
cd[2]=atm4.z-atm3.z
p.append(ab[1]*bc[2]-ab[2]*bc[1])
p.append(ab[2]*bc[0]-ab[0]*bc[2])
p.append(ab[0]*bc[1]-ab[1]*bc[0])
q.append(bc[1]*cd[2]-bc[2]*cd[1])
q.append(bc[2]*cd[0]-bc[0]*cd[2])
q.append(bc[0]*cd[1]-bc[1]*cd[0])
r1=0
r2=0
dp=0
dpcd=0
for i in range(0,3):
r1 += math.pow(p[i],2)
r2 += math.pow(q[i],2)
dp += p[i]*q[i]
dpcd += p[i]*cd[i]
dih=(dpcd/abs(dpcd))*math.acos(dp/(math.sqrt(r1)*math.sqrt(r2)))*180/math.pi
return dih
def getdihedralstrain(a1,a2,a3,a4,a5):
dse=8.37*(1+math.cos(3*a1*math.pi/180))+8.37*(1+math.cos(3*a5*math.pi/180))+4.18*(1+math.cos(3*a2*math.pi/180))+4.18*(1+math.cos(3*a4*math.pi/180))+14.64*(1+math.cos(2*a3*math.pi/180))+2.51*(1+math.cos(3*a3*math.pi/180))
return dse
s_s_l=1.6
s_s_u=2.5
filetxt=open('filelist.txt')
txt_lines=filetxt.read().split('\n')
filetxt.close()
fileout=open('out_C-S-S-C_BACKBONE_scan.txt','w')
f1=open('error_C-S-S-C_scan.txt','w')
intr=[]
lenlines=len(txt_lines)
for ppp in range(lenlines):
filename=txt_lines[ppp]
if filename=='':
continue
print('%.2f'%((ppp+1)*100.0/(lenlines-1))+'% ('+str(ppp+1)+'/'+str(lenlines-1)+') Executing for:'+filename)
file=open(filename,'r')
lines=file.read().split('\n')
file.close()
T=[]
D=[]
S=[]
C=[]
SX=[]
TX=[]
A=[]
B=[]
E=[]
F=[]
modelno=[]
try:
for ln in lines:
if len(ln)>=6 and (ln[0:4]=='ATOM' or ln[0:6]=='HETATM'):
atm=atom()
atm.aid=int(ln[6:11])
atm.atype=ln[12:16].strip()
atm.rtype=ln[17:20].strip()
atm.chainid=ln[21]
atm.rid=int(ln[22:26])
atm.x=float(ln[30:38])
atm.y=float(ln[38:46])
atm.z=float(ln[47:54])
atm.model=modelno
symb=ln[13].strip()
if atm.atype=='CB' and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS' :
C.append(atm)
D.append(atm)
if atm.atype=='SG'and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS':
SX.append(atm)
TX.append(atm)
if atm.atype=='CA' and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS':
B.append(atm)
E.append(atm)
if atm.atype=='N' and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS' :
A.append(atm)
F.append(atm)
elif len(ln)>=5 and ln[0:5]=='MODEL':
modelno=int(ln[12:])
except:
f1.write(filename+'\n')
for k in SX:
for k1 in SX:
if k1.chainid==k.chainid:
if k1.rid==k.rid and k1.aid!=k.aid :
break
else:
S.append(k)
for m in TX:
for m1 in TX:
if m1.chainid==m.chainid:
if m1.rid==m.rid and m1.aid!=m.aid :
break
else:
T.append(m)
for a in range(len(A)):
for b in range(len(B)):
if A[a].rid==B[b].rid:
for j in range(len(C)):
for k in range(len(S)):
if C[j].rid==S[k].rid and C[j].rid==B[b].rid and C[j].chainid==B[b].chainid==S[k].chainid==A[a].chainid :
for m in range(len(T)):
if getlen(S[k],T[m])>=s_s_l and getlen(S[k],T[m])<=s_s_u and S[k].rid<T[m].rid :
for n in range(len(D)):
for e in range(len(E)):
if E[e].rid==D[n].rid:
for f in range(len(F)):
if D[n].rid==T[m].rid and E[e].rid==F[f].rid and D[n].chainid==T[m].chainid==E[e].chainid==F[f].chainid :
a1=getangledihedral(A[a],B[b],C[j],S[k])
a2=getangledihedral(B[b],C[j],S[k],T[m])
a3=getangledihedral(C[j],S[k],T[m],D[n])
a4=getangledihedral(S[k],T[m],D[n],E[e])
a5=getangledihedral(T[m],D[n],E[e],F[f])
dse=getdihedralstrain(a1,a2,a3,a4,a5)
intr.append([])
intr[len(intr)-1].append(filename)
intr[len(intr)-1].append(C[j].chainid)
intr[len(intr)-1].append(C[j].rid)
intr[len(intr)-1].append(T[m].rid)
intr[len(intr)-1].append(T[m].chainid)
intr[len(intr)-1].append(getlen(C[j],S[k]))
intr[len(intr)-1].append(getlen(T[m],S[k]))
intr[len(intr)-1].append(getlen(T[m],D[n]))
intr[len(intr)-1].append(a1)
intr[len(intr)-1].append(a2)
intr[len(intr)-1].append(a3)
intr[len(intr)-1].append(a4)
intr[len(intr)-1].append(a5)
intr[len(intr)-1].append(dse)
C=[]
T=[]
D=[]
S=[]
SX=[]
TX=[]
A=[]
B=[]
E=[]
F=[]
for line in intr:
for xxd in line:
fileout.write(str(xxd))
fileout.write('\t')
fileout.write('\n')
intr=[]
fileout.close()
fileout=open('out_C-S-S-C_BACKBONE_scan.txt','a')
fileout.close()
f1.close()
|
normal
|
{
"blob_id": "78123c806e5a8c0cc7511a5024769f8c61621efa",
"index": 9877,
"step-1": "<mask token>\n\n\nclass atom:\n aid = 0\n atype = ''\n x = 0.0\n y = 0.0\n z = 0.0\n rid = 0\n rtype = ''\n model = []\n chainid = ''\n\n\ndef getlen(atm1, atm2):\n dist = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n return dist\n\n\n<mask token>\n\n\ndef getangledihedral(atm1, atm2, atm3, atm4):\n ab = np.zeros(3)\n bc = np.zeros(3)\n cd = np.zeros(3)\n p = []\n q = []\n ab[0] = atm2.x - atm1.x\n ab[1] = atm2.y - atm1.y\n ab[2] = atm2.z - atm1.z\n bc[0] = atm3.x - atm2.x\n bc[1] = atm3.y - atm2.y\n bc[2] = atm3.z - atm2.z\n cd[0] = atm4.x - atm3.x\n cd[1] = atm4.y - atm3.y\n cd[2] = atm4.z - atm3.z\n p.append(ab[1] * bc[2] - ab[2] * bc[1])\n p.append(ab[2] * bc[0] - ab[0] * bc[2])\n p.append(ab[0] * bc[1] - ab[1] * bc[0])\n q.append(bc[1] * cd[2] - bc[2] * cd[1])\n q.append(bc[2] * cd[0] - bc[0] * cd[2])\n q.append(bc[0] * cd[1] - bc[1] * cd[0])\n r1 = 0\n r2 = 0\n dp = 0\n dpcd = 0\n for i in range(0, 3):\n r1 += math.pow(p[i], 2)\n r2 += math.pow(q[i], 2)\n dp += p[i] * q[i]\n dpcd += p[i] * cd[i]\n dih = dpcd / abs(dpcd) * math.acos(dp / (math.sqrt(r1) * math.sqrt(r2))\n ) * 180 / math.pi\n return dih\n\n\ndef getdihedralstrain(a1, a2, a3, a4, a5):\n dse = 8.37 * (1 + math.cos(3 * a1 * math.pi / 180)) + 8.37 * (1 + math.\n cos(3 * a5 * math.pi / 180)) + 4.18 * (1 + math.cos(3 * a2 * math.\n pi / 180)) + 4.18 * (1 + math.cos(3 * a4 * math.pi / 180)) + 14.64 * (\n 1 + math.cos(2 * a3 * math.pi / 180)) + 2.51 * (1 + math.cos(3 * a3 *\n math.pi / 180))\n return dse\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass atom:\n aid = 0\n atype = ''\n x = 0.0\n y = 0.0\n z = 0.0\n rid = 0\n rtype = ''\n model = []\n chainid = ''\n\n\ndef getlen(atm1, atm2):\n dist = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n return dist\n\n\ndef getangle(atm1, atm2, atm3):\n dist1 = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n dist2 = sqrt(pow(atm3.x - atm2.x, 2) + pow(atm3.y - atm2.y, 2) + pow(\n atm3.z - atm2.z, 2))\n dotp = (atm1.x - atm2.x) * (atm3.x - atm2.x) + (atm1.y - atm2.y) * (atm3\n .y - atm2.y) + (atm1.z - atm2.z) * (atm3.z - atm2.z)\n angle = acos(dotp / (dist1 * dist2)) * 180 / pi\n return angle\n\n\ndef getangledihedral(atm1, atm2, atm3, atm4):\n ab = np.zeros(3)\n bc = np.zeros(3)\n cd = np.zeros(3)\n p = []\n q = []\n ab[0] = atm2.x - atm1.x\n ab[1] = atm2.y - atm1.y\n ab[2] = atm2.z - atm1.z\n bc[0] = atm3.x - atm2.x\n bc[1] = atm3.y - atm2.y\n bc[2] = atm3.z - atm2.z\n cd[0] = atm4.x - atm3.x\n cd[1] = atm4.y - atm3.y\n cd[2] = atm4.z - atm3.z\n p.append(ab[1] * bc[2] - ab[2] * bc[1])\n p.append(ab[2] * bc[0] - ab[0] * bc[2])\n p.append(ab[0] * bc[1] - ab[1] * bc[0])\n q.append(bc[1] * cd[2] - bc[2] * cd[1])\n q.append(bc[2] * cd[0] - bc[0] * cd[2])\n q.append(bc[0] * cd[1] - bc[1] * cd[0])\n r1 = 0\n r2 = 0\n dp = 0\n dpcd = 0\n for i in range(0, 3):\n r1 += math.pow(p[i], 2)\n r2 += math.pow(q[i], 2)\n dp += p[i] * q[i]\n dpcd += p[i] * cd[i]\n dih = dpcd / abs(dpcd) * math.acos(dp / (math.sqrt(r1) * math.sqrt(r2))\n ) * 180 / math.pi\n return dih\n\n\ndef getdihedralstrain(a1, a2, a3, a4, a5):\n dse = 8.37 * (1 + math.cos(3 * a1 * math.pi / 180)) + 8.37 * (1 + math.\n cos(3 * a5 * math.pi / 180)) + 4.18 * (1 + math.cos(3 * a2 * math.\n pi / 180)) + 4.18 * (1 + math.cos(3 * a4 * math.pi / 180)) + 14.64 * (\n 1 + math.cos(2 * a3 * math.pi / 180)) + 2.51 * (1 + math.cos(3 * a3 *\n math.pi / 180))\n return dse\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass atom:\n aid = 0\n atype = ''\n x = 0.0\n y = 0.0\n z = 0.0\n rid = 0\n rtype = ''\n model = []\n chainid = ''\n\n\ndef getlen(atm1, atm2):\n dist = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n return dist\n\n\ndef getangle(atm1, atm2, atm3):\n dist1 = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n dist2 = sqrt(pow(atm3.x - atm2.x, 2) + pow(atm3.y - atm2.y, 2) + pow(\n atm3.z - atm2.z, 2))\n dotp = (atm1.x - atm2.x) * (atm3.x - atm2.x) + (atm1.y - atm2.y) * (atm3\n .y - atm2.y) + (atm1.z - atm2.z) * (atm3.z - atm2.z)\n angle = acos(dotp / (dist1 * dist2)) * 180 / pi\n return angle\n\n\ndef getangledihedral(atm1, atm2, atm3, atm4):\n ab = np.zeros(3)\n bc = np.zeros(3)\n cd = np.zeros(3)\n p = []\n q = []\n ab[0] = atm2.x - atm1.x\n ab[1] = atm2.y - atm1.y\n ab[2] = atm2.z - atm1.z\n bc[0] = atm3.x - atm2.x\n bc[1] = atm3.y - atm2.y\n bc[2] = atm3.z - atm2.z\n cd[0] = atm4.x - atm3.x\n cd[1] = atm4.y - atm3.y\n cd[2] = atm4.z - atm3.z\n p.append(ab[1] * bc[2] - ab[2] * bc[1])\n p.append(ab[2] * bc[0] - ab[0] * bc[2])\n p.append(ab[0] * bc[1] - ab[1] * bc[0])\n q.append(bc[1] * cd[2] - bc[2] * cd[1])\n q.append(bc[2] * cd[0] - bc[0] * cd[2])\n q.append(bc[0] * cd[1] - bc[1] * cd[0])\n r1 = 0\n r2 = 0\n dp = 0\n dpcd = 0\n for i in range(0, 3):\n r1 += math.pow(p[i], 2)\n r2 += math.pow(q[i], 2)\n dp += p[i] * q[i]\n dpcd += p[i] * cd[i]\n dih = dpcd / abs(dpcd) * math.acos(dp / (math.sqrt(r1) * math.sqrt(r2))\n ) * 180 / math.pi\n return dih\n\n\ndef getdihedralstrain(a1, a2, a3, a4, a5):\n dse = 8.37 * (1 + math.cos(3 * a1 * math.pi / 180)) + 8.37 * (1 + math.\n cos(3 * a5 * math.pi / 180)) + 4.18 * (1 + math.cos(3 * a2 * math.\n pi / 180)) + 4.18 * (1 + math.cos(3 * a4 * math.pi / 180)) + 14.64 * (\n 1 + math.cos(2 * a3 * math.pi / 180)) + 2.51 * (1 + math.cos(3 * a3 *\n math.pi / 180))\n return dse\n\n\ns_s_l = 1.6\ns_s_u = 2.5\nfiletxt = open('filelist.txt')\ntxt_lines = filetxt.read().split('\\n')\nfiletxt.close()\nfileout = open('out_C-S-S-C_BACKBONE_scan.txt', 'w')\nf1 = open('error_C-S-S-C_scan.txt', 'w')\nintr = []\nlenlines = len(txt_lines)\nfor ppp in range(lenlines):\n filename = txt_lines[ppp]\n if filename == '':\n continue\n print('%.2f' % ((ppp + 1) * 100.0 / (lenlines - 1)) + '% (' + str(ppp +\n 1) + '/' + str(lenlines - 1) + ') Executing for:' + filename)\n file = open(filename, 'r')\n lines = file.read().split('\\n')\n file.close()\n T = []\n D = []\n S = []\n C = []\n SX = []\n TX = []\n A = []\n B = []\n E = []\n F = []\n modelno = []\n try:\n for ln in lines:\n if len(ln) >= 6 and (ln[0:4] == 'ATOM' or ln[0:6] == 'HETATM'):\n atm = atom()\n atm.aid = int(ln[6:11])\n atm.atype = ln[12:16].strip()\n atm.rtype = ln[17:20].strip()\n atm.chainid = ln[21]\n atm.rid = int(ln[22:26])\n atm.x = float(ln[30:38])\n atm.y = float(ln[38:46])\n atm.z = float(ln[47:54])\n atm.model = modelno\n symb = ln[13].strip()\n if atm.atype == 'CB' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n C.append(atm)\n D.append(atm)\n if atm.atype == 'SG' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n SX.append(atm)\n TX.append(atm)\n if atm.atype == 'CA' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n B.append(atm)\n E.append(atm)\n if atm.atype == 'N' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n A.append(atm)\n F.append(atm)\n elif len(ln) >= 5 and ln[0:5] == 'MODEL':\n modelno = int(ln[12:])\n except:\n f1.write(filename + '\\n')\n for k in SX:\n for k1 in SX:\n if k1.chainid == k.chainid:\n if k1.rid == k.rid and k1.aid != k.aid:\n break\n else:\n S.append(k)\n for m in TX:\n for m1 in TX:\n if m1.chainid == m.chainid:\n if m1.rid == m.rid and m1.aid != m.aid:\n break\n else:\n T.append(m)\n for a in range(len(A)):\n for b in range(len(B)):\n if A[a].rid == B[b].rid:\n for j in range(len(C)):\n for k in range(len(S)):\n if C[j].rid == S[k].rid and C[j].rid == B[b].rid and C[\n j].chainid == B[b].chainid == S[k].chainid == A[a\n ].chainid:\n for m in range(len(T)):\n if getlen(S[k], T[m]) >= s_s_l and getlen(S\n [k], T[m]) <= s_s_u and S[k].rid < T[m\n ].rid:\n for n in range(len(D)):\n for e in range(len(E)):\n if E[e].rid == D[n].rid:\n for f in range(len(F)):\n if D[n].rid == T[m].rid and E[e].rid == F[f\n ].rid and D[n].chainid == T[m\n ].chainid == E[e].chainid == F[f\n ].chainid:\n a1 = getangledihedral(A[a], B[b], C[j],\n S[k])\n a2 = getangledihedral(B[b], C[j], S[k],\n T[m])\n a3 = getangledihedral(C[j], S[k], T[m],\n D[n])\n a4 = getangledihedral(S[k], T[m], D[n],\n E[e])\n a5 = getangledihedral(T[m], D[n], E[e],\n F[f])\n dse = (getdihedralstrain(a1, a2, a3, a4,\n a5))\n intr.append([])\n intr[len(intr) - 1].append(filename)\n intr[len(intr) - 1].append(C[j].chainid)\n intr[len(intr) - 1].append(C[j].rid)\n intr[len(intr) - 1].append(T[m].rid)\n intr[len(intr) - 1].append(T[m].chainid)\n intr[len(intr) - 1].append(getlen(C[j],\n S[k]))\n intr[len(intr) - 1].append(getlen(T[m],\n S[k]))\n intr[len(intr) - 1].append(getlen(T[m],\n D[n]))\n intr[len(intr) - 1].append(a1)\n intr[len(intr) - 1].append(a2)\n intr[len(intr) - 1].append(a3)\n intr[len(intr) - 1].append(a4)\n intr[len(intr) - 1].append(a5)\n intr[len(intr) - 1].append(dse)\n C = []\n T = []\n D = []\n S = []\n SX = []\n TX = []\n A = []\n B = []\n E = []\n F = []\n for line in intr:\n for xxd in line:\n fileout.write(str(xxd))\n fileout.write('\\t')\n fileout.write('\\n')\n intr = []\n fileout.close()\n fileout = open('out_C-S-S-C_BACKBONE_scan.txt', 'a')\nfileout.close()\nf1.close()\n",
"step-4": "from math import *\nimport math\nimport re\nimport numpy as np\n\n\nclass atom:\n aid = 0\n atype = ''\n x = 0.0\n y = 0.0\n z = 0.0\n rid = 0\n rtype = ''\n model = []\n chainid = ''\n\n\ndef getlen(atm1, atm2):\n dist = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n return dist\n\n\ndef getangle(atm1, atm2, atm3):\n dist1 = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n dist2 = sqrt(pow(atm3.x - atm2.x, 2) + pow(atm3.y - atm2.y, 2) + pow(\n atm3.z - atm2.z, 2))\n dotp = (atm1.x - atm2.x) * (atm3.x - atm2.x) + (atm1.y - atm2.y) * (atm3\n .y - atm2.y) + (atm1.z - atm2.z) * (atm3.z - atm2.z)\n angle = acos(dotp / (dist1 * dist2)) * 180 / pi\n return angle\n\n\ndef getangledihedral(atm1, atm2, atm3, atm4):\n ab = np.zeros(3)\n bc = np.zeros(3)\n cd = np.zeros(3)\n p = []\n q = []\n ab[0] = atm2.x - atm1.x\n ab[1] = atm2.y - atm1.y\n ab[2] = atm2.z - atm1.z\n bc[0] = atm3.x - atm2.x\n bc[1] = atm3.y - atm2.y\n bc[2] = atm3.z - atm2.z\n cd[0] = atm4.x - atm3.x\n cd[1] = atm4.y - atm3.y\n cd[2] = atm4.z - atm3.z\n p.append(ab[1] * bc[2] - ab[2] * bc[1])\n p.append(ab[2] * bc[0] - ab[0] * bc[2])\n p.append(ab[0] * bc[1] - ab[1] * bc[0])\n q.append(bc[1] * cd[2] - bc[2] * cd[1])\n q.append(bc[2] * cd[0] - bc[0] * cd[2])\n q.append(bc[0] * cd[1] - bc[1] * cd[0])\n r1 = 0\n r2 = 0\n dp = 0\n dpcd = 0\n for i in range(0, 3):\n r1 += math.pow(p[i], 2)\n r2 += math.pow(q[i], 2)\n dp += p[i] * q[i]\n dpcd += p[i] * cd[i]\n dih = dpcd / abs(dpcd) * math.acos(dp / (math.sqrt(r1) * math.sqrt(r2))\n ) * 180 / math.pi\n return dih\n\n\ndef getdihedralstrain(a1, a2, a3, a4, a5):\n dse = 8.37 * (1 + math.cos(3 * a1 * math.pi / 180)) + 8.37 * (1 + math.\n cos(3 * a5 * math.pi / 180)) + 4.18 * (1 + math.cos(3 * a2 * math.\n pi / 180)) + 4.18 * (1 + math.cos(3 * a4 * math.pi / 180)) + 14.64 * (\n 1 + math.cos(2 * a3 * math.pi / 180)) + 2.51 * (1 + math.cos(3 * a3 *\n math.pi / 180))\n return dse\n\n\ns_s_l = 1.6\ns_s_u = 2.5\nfiletxt = open('filelist.txt')\ntxt_lines = filetxt.read().split('\\n')\nfiletxt.close()\nfileout = open('out_C-S-S-C_BACKBONE_scan.txt', 'w')\nf1 = open('error_C-S-S-C_scan.txt', 'w')\nintr = []\nlenlines = len(txt_lines)\nfor ppp in range(lenlines):\n filename = txt_lines[ppp]\n if filename == '':\n continue\n print('%.2f' % ((ppp + 1) * 100.0 / (lenlines - 1)) + '% (' + str(ppp +\n 1) + '/' + str(lenlines - 1) + ') Executing for:' + filename)\n file = open(filename, 'r')\n lines = file.read().split('\\n')\n file.close()\n T = []\n D = []\n S = []\n C = []\n SX = []\n TX = []\n A = []\n B = []\n E = []\n F = []\n modelno = []\n try:\n for ln in lines:\n if len(ln) >= 6 and (ln[0:4] == 'ATOM' or ln[0:6] == 'HETATM'):\n atm = atom()\n atm.aid = int(ln[6:11])\n atm.atype = ln[12:16].strip()\n atm.rtype = ln[17:20].strip()\n atm.chainid = ln[21]\n atm.rid = int(ln[22:26])\n atm.x = float(ln[30:38])\n atm.y = float(ln[38:46])\n atm.z = float(ln[47:54])\n atm.model = modelno\n symb = ln[13].strip()\n if atm.atype == 'CB' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n C.append(atm)\n D.append(atm)\n if atm.atype == 'SG' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n SX.append(atm)\n TX.append(atm)\n if atm.atype == 'CA' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n B.append(atm)\n E.append(atm)\n if atm.atype == 'N' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n A.append(atm)\n F.append(atm)\n elif len(ln) >= 5 and ln[0:5] == 'MODEL':\n modelno = int(ln[12:])\n except:\n f1.write(filename + '\\n')\n for k in SX:\n for k1 in SX:\n if k1.chainid == k.chainid:\n if k1.rid == k.rid and k1.aid != k.aid:\n break\n else:\n S.append(k)\n for m in TX:\n for m1 in TX:\n if m1.chainid == m.chainid:\n if m1.rid == m.rid and m1.aid != m.aid:\n break\n else:\n T.append(m)\n for a in range(len(A)):\n for b in range(len(B)):\n if A[a].rid == B[b].rid:\n for j in range(len(C)):\n for k in range(len(S)):\n if C[j].rid == S[k].rid and C[j].rid == B[b].rid and C[\n j].chainid == B[b].chainid == S[k].chainid == A[a\n ].chainid:\n for m in range(len(T)):\n if getlen(S[k], T[m]) >= s_s_l and getlen(S\n [k], T[m]) <= s_s_u and S[k].rid < T[m\n ].rid:\n for n in range(len(D)):\n for e in range(len(E)):\n if E[e].rid == D[n].rid:\n for f in range(len(F)):\n if D[n].rid == T[m].rid and E[e].rid == F[f\n ].rid and D[n].chainid == T[m\n ].chainid == E[e].chainid == F[f\n ].chainid:\n a1 = getangledihedral(A[a], B[b], C[j],\n S[k])\n a2 = getangledihedral(B[b], C[j], S[k],\n T[m])\n a3 = getangledihedral(C[j], S[k], T[m],\n D[n])\n a4 = getangledihedral(S[k], T[m], D[n],\n E[e])\n a5 = getangledihedral(T[m], D[n], E[e],\n F[f])\n dse = (getdihedralstrain(a1, a2, a3, a4,\n a5))\n intr.append([])\n intr[len(intr) - 1].append(filename)\n intr[len(intr) - 1].append(C[j].chainid)\n intr[len(intr) - 1].append(C[j].rid)\n intr[len(intr) - 1].append(T[m].rid)\n intr[len(intr) - 1].append(T[m].chainid)\n intr[len(intr) - 1].append(getlen(C[j],\n S[k]))\n intr[len(intr) - 1].append(getlen(T[m],\n S[k]))\n intr[len(intr) - 1].append(getlen(T[m],\n D[n]))\n intr[len(intr) - 1].append(a1)\n intr[len(intr) - 1].append(a2)\n intr[len(intr) - 1].append(a3)\n intr[len(intr) - 1].append(a4)\n intr[len(intr) - 1].append(a5)\n intr[len(intr) - 1].append(dse)\n C = []\n T = []\n D = []\n S = []\n SX = []\n TX = []\n A = []\n B = []\n E = []\n F = []\n for line in intr:\n for xxd in line:\n fileout.write(str(xxd))\n fileout.write('\\t')\n fileout.write('\\n')\n intr = []\n fileout.close()\n fileout = open('out_C-S-S-C_BACKBONE_scan.txt', 'a')\nfileout.close()\nf1.close()\n",
"step-5": "from math import *\nimport math\nimport re\nimport numpy as np\nclass atom:\n aid=0 \n atype='' \n x=0.0 \n y=0.0 \n z=0.0 \n rid=0 \n rtype='' \n model=[]\n chainid=''\n\ndef getlen(atm1,atm2):\n dist=sqrt(pow(atm1.x-atm2.x,2)+pow(atm1.y-atm2.y,2)+pow(atm1.z-atm2.z,2)) \n return dist\n\ndef getangle(atm1,atm2,atm3):\n dist1=sqrt(pow(atm1.x-atm2.x,2)+pow(atm1.y-atm2.y,2)+pow(atm1.z-atm2.z,2)) \n dist2=sqrt(pow(atm3.x-atm2.x,2)+pow(atm3.y-atm2.y,2)+pow(atm3.z-atm2.z,2)) \n dotp=(atm1.x-atm2.x)*(atm3.x-atm2.x)+(atm1.y-atm2.y)*(atm3.y-atm2.y)+(atm1.z-atm2.z)*(atm3.z-atm2.z) \n angle=acos(dotp/(dist1*dist2))*180/pi \n return angle\n\ndef getangledihedral(atm1,atm2,atm3,atm4):\n ab=np.zeros(3)\n bc=np.zeros(3)\n cd=np.zeros(3)\n p=[]\n q=[]\n ab[0]=atm2.x-atm1.x\n ab[1]=atm2.y-atm1.y\n ab[2]=atm2.z-atm1.z\n bc[0]=atm3.x-atm2.x\n bc[1]=atm3.y-atm2.y\n bc[2]=atm3.z-atm2.z\n cd[0]=atm4.x-atm3.x\n cd[1]=atm4.y-atm3.y\n cd[2]=atm4.z-atm3.z\n p.append(ab[1]*bc[2]-ab[2]*bc[1])\n p.append(ab[2]*bc[0]-ab[0]*bc[2])\n p.append(ab[0]*bc[1]-ab[1]*bc[0])\n q.append(bc[1]*cd[2]-bc[2]*cd[1])\n q.append(bc[2]*cd[0]-bc[0]*cd[2])\n q.append(bc[0]*cd[1]-bc[1]*cd[0])\n\n\n r1=0\n r2=0\n dp=0\n dpcd=0\n for i in range(0,3):\n r1 += math.pow(p[i],2)\n r2 += math.pow(q[i],2)\n dp += p[i]*q[i]\n dpcd += p[i]*cd[i]\n\n dih=(dpcd/abs(dpcd))*math.acos(dp/(math.sqrt(r1)*math.sqrt(r2)))*180/math.pi\n \n\n return dih\n\ndef getdihedralstrain(a1,a2,a3,a4,a5):\n dse=8.37*(1+math.cos(3*a1*math.pi/180))+8.37*(1+math.cos(3*a5*math.pi/180))+4.18*(1+math.cos(3*a2*math.pi/180))+4.18*(1+math.cos(3*a4*math.pi/180))+14.64*(1+math.cos(2*a3*math.pi/180))+2.51*(1+math.cos(3*a3*math.pi/180))\n return dse\n\ns_s_l=1.6\ns_s_u=2.5\n\nfiletxt=open('filelist.txt') \ntxt_lines=filetxt.read().split('\\n') \nfiletxt.close()\nfileout=open('out_C-S-S-C_BACKBONE_scan.txt','w')\nf1=open('error_C-S-S-C_scan.txt','w')\nintr=[]\nlenlines=len(txt_lines)\nfor ppp in range(lenlines):\n filename=txt_lines[ppp]\n if filename=='':\n continue\n print('%.2f'%((ppp+1)*100.0/(lenlines-1))+'% ('+str(ppp+1)+'/'+str(lenlines-1)+') Executing for:'+filename)\n file=open(filename,'r')\n lines=file.read().split('\\n')\n file.close()\n T=[]\n D=[]\n S=[] \n C=[]\n SX=[]\n TX=[]\n A=[]\n B=[]\n E=[]\n F=[]\n modelno=[]\n\n \n try:\n for ln in lines:\n if len(ln)>=6 and (ln[0:4]=='ATOM' or ln[0:6]=='HETATM'):\n atm=atom()\n atm.aid=int(ln[6:11]) \n atm.atype=ln[12:16].strip() \n atm.rtype=ln[17:20].strip() \n atm.chainid=ln[21]\n atm.rid=int(ln[22:26]) \n atm.x=float(ln[30:38]) \n atm.y=float(ln[38:46]) \n atm.z=float(ln[47:54]) \n atm.model=modelno\n symb=ln[13].strip()\n if atm.atype=='CB' and (modelno==1 or modelno==A or modelno==[]) :\n if atm.rtype=='CYS' : \n C.append(atm)\n D.append(atm)\n if atm.atype=='SG'and (modelno==1 or modelno==A or modelno==[]) :\n if atm.rtype=='CYS': \n SX.append(atm)\n TX.append(atm)\n if atm.atype=='CA' and (modelno==1 or modelno==A or modelno==[]) :\n if atm.rtype=='CYS':\n B.append(atm)\n E.append(atm)\n if atm.atype=='N' and (modelno==1 or modelno==A or modelno==[]) :\n if atm.rtype=='CYS' :\n A.append(atm)\n F.append(atm)\n elif len(ln)>=5 and ln[0:5]=='MODEL':\n modelno=int(ln[12:])\n\n except:\n f1.write(filename+'\\n')\n\n\n for k in SX:\n for k1 in SX:\n if k1.chainid==k.chainid: \n if k1.rid==k.rid and k1.aid!=k.aid :\n break\n else:\n S.append(k)\n for m in TX:\n for m1 in TX:\n if m1.chainid==m.chainid:\n if m1.rid==m.rid and m1.aid!=m.aid :\n break\n else:\n T.append(m)\n \n for a in range(len(A)):\n for b in range(len(B)):\n if A[a].rid==B[b].rid:\n for j in range(len(C)):\n for k in range(len(S)):\n if C[j].rid==S[k].rid and C[j].rid==B[b].rid and C[j].chainid==B[b].chainid==S[k].chainid==A[a].chainid :\n for m in range(len(T)):\n if getlen(S[k],T[m])>=s_s_l and getlen(S[k],T[m])<=s_s_u and S[k].rid<T[m].rid :\n for n in range(len(D)):\n for e in range(len(E)):\n if E[e].rid==D[n].rid:\n for f in range(len(F)):\n if D[n].rid==T[m].rid and E[e].rid==F[f].rid and D[n].chainid==T[m].chainid==E[e].chainid==F[f].chainid :\n a1=getangledihedral(A[a],B[b],C[j],S[k])\n a2=getangledihedral(B[b],C[j],S[k],T[m])\n a3=getangledihedral(C[j],S[k],T[m],D[n])\n a4=getangledihedral(S[k],T[m],D[n],E[e])\n a5=getangledihedral(T[m],D[n],E[e],F[f])\n dse=getdihedralstrain(a1,a2,a3,a4,a5)\n intr.append([])\n intr[len(intr)-1].append(filename) \n intr[len(intr)-1].append(C[j].chainid)\n intr[len(intr)-1].append(C[j].rid) \n intr[len(intr)-1].append(T[m].rid)\n intr[len(intr)-1].append(T[m].chainid)\n intr[len(intr)-1].append(getlen(C[j],S[k])) \n intr[len(intr)-1].append(getlen(T[m],S[k])) \n intr[len(intr)-1].append(getlen(T[m],D[n])) \n intr[len(intr)-1].append(a1)\n intr[len(intr)-1].append(a2)\n intr[len(intr)-1].append(a3)\n intr[len(intr)-1].append(a4)\n intr[len(intr)-1].append(a5)\n intr[len(intr)-1].append(dse)\n\n \n C=[]\n T=[]\n D=[]\n S=[]\n SX=[]\n TX=[]\n A=[]\n B=[]\n E=[]\n F=[]\n for line in intr:\n for xxd in line:\n fileout.write(str(xxd))\n fileout.write('\\t')\n fileout.write('\\n')\n intr=[]\n fileout.close()\n fileout=open('out_C-S-S-C_BACKBONE_scan.txt','a')\nfileout.close()\nf1.close()\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
#!/usr/bin/env python
from postimg import postimg
import argparse
import pyperclip
import json
def main(args):
if not args.quiet:
print("Uploading.....")
resp = postimg.Imgur(args.img_path).upload()
if not resp['success']:
if not args.quiet:
print(json.dumps(resp, sort_keys=True, indent=4, separators=(',', ': ')))
print("Unable to upload !!!")
return None
link = resp['data']['link']
if args.github:
link = ''%link
elif args.reddit:
link = '[Reddit](%s)'%link
elif args.html:
link = '<img src="%s" alt="snap">'%link
pyperclip.copy(link)
print(link)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Post/upload image on imgur.com', epilog='link will automatically copied to clipboard')
parser.add_argument('img_path', type=str, help='image path of file')
parser.add_argument('--github', action='store_true', help='Github markdown code of imgur url')
parser.add_argument('--html', action='store_true', help='html <img> code of imgur url')
parser.add_argument('--reddit', action='store_true', help='reddit markdown code of imgur url')
parser.add_argument('-q','--quiet', action='store_true', help='print only img url without verbose output')
args = parser.parse_args()
try:
main(args)
except KeyboardInterrupt:
print("Error: Interrupted by user!!")
|
normal
|
{
"blob_id": "705755340eef72470fc982ebd0004456469d23e4",
"index": 4859,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(args):\n if not args.quiet:\n print('Uploading.....')\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(\n ',', ': ')))\n print('Unable to upload !!!')\n return None\n link = resp['data']['link']\n if args.github:\n link = '' % link\n elif args.reddit:\n link = '[Reddit](%s)' % link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">' % link\n pyperclip.copy(link)\n print(link)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(args):\n if not args.quiet:\n print('Uploading.....')\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(\n ',', ': ')))\n print('Unable to upload !!!')\n return None\n link = resp['data']['link']\n if args.github:\n link = '' % link\n elif args.reddit:\n link = '[Reddit](%s)' % link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">' % link\n pyperclip.copy(link)\n print(link)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Post/upload image on imgur.com', epilog=\n 'link will automatically copied to clipboard')\n parser.add_argument('img_path', type=str, help='image path of file')\n parser.add_argument('--github', action='store_true', help=\n 'Github markdown code of imgur url')\n parser.add_argument('--html', action='store_true', help=\n 'html <img> code of imgur url')\n parser.add_argument('--reddit', action='store_true', help=\n 'reddit markdown code of imgur url')\n parser.add_argument('-q', '--quiet', action='store_true', help=\n 'print only img url without verbose output')\n args = parser.parse_args()\n try:\n main(args)\n except KeyboardInterrupt:\n print('Error: Interrupted by user!!')\n",
"step-4": "from postimg import postimg\nimport argparse\nimport pyperclip\nimport json\n\n\ndef main(args):\n if not args.quiet:\n print('Uploading.....')\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(\n ',', ': ')))\n print('Unable to upload !!!')\n return None\n link = resp['data']['link']\n if args.github:\n link = '' % link\n elif args.reddit:\n link = '[Reddit](%s)' % link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">' % link\n pyperclip.copy(link)\n print(link)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Post/upload image on imgur.com', epilog=\n 'link will automatically copied to clipboard')\n parser.add_argument('img_path', type=str, help='image path of file')\n parser.add_argument('--github', action='store_true', help=\n 'Github markdown code of imgur url')\n parser.add_argument('--html', action='store_true', help=\n 'html <img> code of imgur url')\n parser.add_argument('--reddit', action='store_true', help=\n 'reddit markdown code of imgur url')\n parser.add_argument('-q', '--quiet', action='store_true', help=\n 'print only img url without verbose output')\n args = parser.parse_args()\n try:\n main(args)\n except KeyboardInterrupt:\n print('Error: Interrupted by user!!')\n",
"step-5": "#!/usr/bin/env python\nfrom postimg import postimg\nimport argparse\nimport pyperclip\nimport json\ndef main(args):\n if not args.quiet:\n print(\"Uploading.....\")\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(',', ': ')))\n print(\"Unable to upload !!!\")\n return None\n link = resp['data']['link']\n if args.github:\n link = ''%link\n elif args.reddit:\n link = '[Reddit](%s)'%link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">'%link\n pyperclip.copy(link)\n print(link)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Post/upload image on imgur.com', epilog='link will automatically copied to clipboard')\n parser.add_argument('img_path', type=str, help='image path of file')\n parser.add_argument('--github', action='store_true', help='Github markdown code of imgur url')\n parser.add_argument('--html', action='store_true', help='html <img> code of imgur url')\n parser.add_argument('--reddit', action='store_true', help='reddit markdown code of imgur url')\n parser.add_argument('-q','--quiet', action='store_true', help='print only img url without verbose output')\n args = parser.parse_args()\n try:\n main(args)\n except KeyboardInterrupt:\n print(\"Error: Interrupted by user!!\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(sql)
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db = pymysql.connect(host='localhost', port=3306, user='root', password=
'Wubaba950823', database='mydb', charset='utf8mb4')
cursor = db.cursor()
sql = "INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')" % ('test3',
'经典', '2019/12/14')
print(sql)
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close()
<|reserved_special_token_1|>
import pymysql
db = pymysql.connect(host='localhost', port=3306, user='root', password=
'Wubaba950823', database='mydb', charset='utf8mb4')
cursor = db.cursor()
sql = "INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')" % ('test3',
'经典', '2019/12/14')
print(sql)
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close()
<|reserved_special_token_1|>
import pymysql
db= pymysql.connect(host = 'localhost',
port = 3306,
user = 'root',
password = 'Wubaba950823',
database = 'mydb',
charset = 'utf8mb4'
)
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 插入语句 里面的数据类型要对应
sql = "INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')" % ('test3','经典','2019/12/14')
print(sql)
try:
# 执行sql语句
cursor.execute(sql)
# 执行sql语句
db.commit()
except:
# 发生错误时回滚
db.rollback()
# 关闭数据库连接
db.close()
|
flexible
|
{
"blob_id": "8566e30a6450a72a0e441155321bd03363944b5a",
"index": 8236,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(sql)\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\ndb.close()\n",
"step-3": "<mask token>\ndb = pymysql.connect(host='localhost', port=3306, user='root', password=\n 'Wubaba950823', database='mydb', charset='utf8mb4')\ncursor = db.cursor()\nsql = \"INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')\" % ('test3',\n '经典', '2019/12/14')\nprint(sql)\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\ndb.close()\n",
"step-4": "import pymysql\ndb = pymysql.connect(host='localhost', port=3306, user='root', password=\n 'Wubaba950823', database='mydb', charset='utf8mb4')\ncursor = db.cursor()\nsql = \"INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')\" % ('test3',\n '经典', '2019/12/14')\nprint(sql)\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\ndb.close()\n",
"step-5": "import pymysql\n\ndb= pymysql.connect(host = 'localhost',\n port = 3306,\n user = 'root',\n password = 'Wubaba950823',\n database = 'mydb',\n charset = 'utf8mb4'\n )\n \n# 使用cursor()方法获取操作游标 \ncursor = db.cursor()\n\n# SQL 插入语句 里面的数据类型要对应\nsql = \"INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')\" % ('test3','经典','2019/12/14')\nprint(sql)\ntry:\n # 执行sql语句\n cursor.execute(sql)\n # 执行sql语句\n db.commit()\nexcept:\n # 发生错误时回滚\n db.rollback()\n \n# 关闭数据库连接\ndb.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import distance
with open('input.txt', 'r') as f:
data = f.read()
res = [i for i in data.splitlines()]
print(res)
newHold = []
for line in res:
newHold.append((tuple(int(i) for i in line.split(', '))))
print(newHold)
mapper = np.zeros((400,400))
#plt.scatter(*zip(*newHold))
#plt.show()
for i, tup in enumerate(newHold):
x = tup[0]
y = tup[1]
if mapper[y][x] == 0:
mapper[y][x] = i
rows = mapper.shape[0]
cols = mapper.shape[1]
for num, top in enumerate(newHold):
first = list(newHold[num])
for i in range(0, rows):
for j in range(0, cols):
if ((mapper[i][j] > distance.cityblock(first, [i,j])) or (mapper[i][j] == 0)):
mapper[i][j] = distance.cityblock(first, [i,j])
elif mapper[i][j] == distance.cityblock(first, [i,j]):
mapper[i][j] = -1000
print(num)
plt.imshow(mapper, cmap="viridis")
plt.show()
plt.imshow(mapper, cmap="viridis")
plt.show()
|
normal
|
{
"blob_id": "47476fbb78ca8ce14d30bf226795bbd85b5bae45",
"index": 6939,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('input.txt', 'r') as f:\n data = f.read()\n<mask token>\nprint(res)\n<mask token>\nfor line in res:\n newHold.append(tuple(int(i) for i in line.split(', ')))\nprint(newHold)\n<mask token>\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\n<mask token>\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j\n ] == 0:\n mapper[i][j] = distance.cityblock(first, [i, j])\n elif mapper[i][j] == distance.cityblock(first, [i, j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap='viridis')\n plt.show()\nplt.imshow(mapper, cmap='viridis')\nplt.show()\n",
"step-3": "<mask token>\nwith open('input.txt', 'r') as f:\n data = f.read()\nres = [i for i in data.splitlines()]\nprint(res)\nnewHold = []\nfor line in res:\n newHold.append(tuple(int(i) for i in line.split(', ')))\nprint(newHold)\nmapper = np.zeros((400, 400))\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\nrows = mapper.shape[0]\ncols = mapper.shape[1]\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j\n ] == 0:\n mapper[i][j] = distance.cityblock(first, [i, j])\n elif mapper[i][j] == distance.cityblock(first, [i, j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap='viridis')\n plt.show()\nplt.imshow(mapper, cmap='viridis')\nplt.show()\n",
"step-4": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\nwith open('input.txt', 'r') as f:\n data = f.read()\nres = [i for i in data.splitlines()]\nprint(res)\nnewHold = []\nfor line in res:\n newHold.append(tuple(int(i) for i in line.split(', ')))\nprint(newHold)\nmapper = np.zeros((400, 400))\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\nrows = mapper.shape[0]\ncols = mapper.shape[1]\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j\n ] == 0:\n mapper[i][j] = distance.cityblock(first, [i, j])\n elif mapper[i][j] == distance.cityblock(first, [i, j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap='viridis')\n plt.show()\nplt.imshow(mapper, cmap='viridis')\nplt.show()\n",
"step-5": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\n\nwith open('input.txt', 'r') as f:\n data = f.read()\n\nres = [i for i in data.splitlines()]\nprint(res)\n\nnewHold = []\nfor line in res:\n newHold.append((tuple(int(i) for i in line.split(', '))))\nprint(newHold)\nmapper = np.zeros((400,400))\n\n#plt.scatter(*zip(*newHold))\n#plt.show()\n\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\n\nrows = mapper.shape[0]\ncols = mapper.shape[1]\n\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if ((mapper[i][j] > distance.cityblock(first, [i,j])) or (mapper[i][j] == 0)):\n mapper[i][j] = distance.cityblock(first, [i,j])\n elif mapper[i][j] == distance.cityblock(first, [i,j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap=\"viridis\")\n plt.show()\n\nplt.imshow(mapper, cmap=\"viridis\")\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def findLCS(read, cassette, rIndex, cIndex, cassettes):
LCS = ''
while True:
if read[rIndex] == cassette[cIndex]:
LCS += read[rIndex]
rIndex = rIndex + 1
cIndex = cIndex + 1
else:
break
return LCS
<|reserved_special_token_0|>
def checkGap(LCS, cassettes, cIndex):
if findConsensus(cassettes, cIndex) == '-':
LCS = LCS + '-'
cIndex = cIndex + 1
return LCS, cIndex
else:
return LCS, cIndex
def deletenuc(read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex + 1, cIndex)) >= 3:
return True
else:
return False
<|reserved_special_token_0|>
def main():
FASTA = input('Enter FASTA file:')
reference = input('Enter reference file:')
in_file = open(FASTA, 'r')
in_file1 = open(reference, 'r')
line_list = []
line_list1 = []
for line in in_file:
line = line.strip()
line_list.append(line)
readnames = line_list[::2]
reads = line_list[1::2]
for line1 in in_file1:
line1 = line1.strip()
line_list1.append(line1)
cassettes = line_list1[1::2]
refnames = line_list1[::2]
A = []
for i in reads:
alignedRead = align(i, cassettes)
A.append(alignedRead)
<|reserved_special_token_1|>
def findLCS(read, cassette, rIndex, cIndex, cassettes):
LCS = ''
while True:
if read[rIndex] == cassette[cIndex]:
LCS += read[rIndex]
rIndex = rIndex + 1
cIndex = cIndex + 1
else:
break
return LCS
<|reserved_special_token_0|>
def checkGap(LCS, cassettes, cIndex):
if findConsensus(cassettes, cIndex) == '-':
LCS = LCS + '-'
cIndex = cIndex + 1
return LCS, cIndex
else:
return LCS, cIndex
def deletenuc(read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex + 1, cIndex)) >= 3:
return True
else:
return False
def insertnuc(LCS, read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex, cIndex + 1)) >= 3:
return True
else:
return False
<|reserved_special_token_0|>
def main():
FASTA = input('Enter FASTA file:')
reference = input('Enter reference file:')
in_file = open(FASTA, 'r')
in_file1 = open(reference, 'r')
line_list = []
line_list1 = []
for line in in_file:
line = line.strip()
line_list.append(line)
readnames = line_list[::2]
reads = line_list[1::2]
for line1 in in_file1:
line1 = line1.strip()
line_list1.append(line1)
cassettes = line_list1[1::2]
refnames = line_list1[::2]
A = []
for i in reads:
alignedRead = align(i, cassettes)
A.append(alignedRead)
<|reserved_special_token_1|>
def findLCS(read, cassette, rIndex, cIndex, cassettes):
LCS = ''
while True:
if read[rIndex] == cassette[cIndex]:
LCS += read[rIndex]
rIndex = rIndex + 1
cIndex = cIndex + 1
else:
break
return LCS
def findMaxLCS(read, cassettes, rIndex, cIndex):
maxLCS = ''
for i in range(0, len(cassettes)):
LCS = findLCS(read, cassettes[i], rIndex, cIndex, cassettes)
if len(LCS) > len(maxLCS):
maxLCS = LCS
rIndex = rIndex + len(maxLCS)
cIndex = cIndex + len(maxLCS)
return maxLCS, rIndex, cIndex
<|reserved_special_token_0|>
def checkGap(LCS, cassettes, cIndex):
if findConsensus(cassettes, cIndex) == '-':
LCS = LCS + '-'
cIndex = cIndex + 1
return LCS, cIndex
else:
return LCS, cIndex
def deletenuc(read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex + 1, cIndex)) >= 3:
return True
else:
return False
def insertnuc(LCS, read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex, cIndex + 1)) >= 3:
return True
else:
return False
<|reserved_special_token_0|>
def main():
FASTA = input('Enter FASTA file:')
reference = input('Enter reference file:')
in_file = open(FASTA, 'r')
in_file1 = open(reference, 'r')
line_list = []
line_list1 = []
for line in in_file:
line = line.strip()
line_list.append(line)
readnames = line_list[::2]
reads = line_list[1::2]
for line1 in in_file1:
line1 = line1.strip()
line_list1.append(line1)
cassettes = line_list1[1::2]
refnames = line_list1[::2]
A = []
for i in reads:
alignedRead = align(i, cassettes)
A.append(alignedRead)
<|reserved_special_token_1|>
def findLCS(read, cassette, rIndex, cIndex, cassettes):
LCS = ''
while True:
if read[rIndex] == cassette[cIndex]:
LCS += read[rIndex]
rIndex = rIndex + 1
cIndex = cIndex + 1
else:
break
return LCS
def findMaxLCS(read, cassettes, rIndex, cIndex):
maxLCS = ''
for i in range(0, len(cassettes)):
LCS = findLCS(read, cassettes[i], rIndex, cIndex, cassettes)
if len(LCS) > len(maxLCS):
maxLCS = LCS
rIndex = rIndex + len(maxLCS)
cIndex = cIndex + len(maxLCS)
return maxLCS, rIndex, cIndex
<|reserved_special_token_0|>
def checkGap(LCS, cassettes, cIndex):
if findConsensus(cassettes, cIndex) == '-':
LCS = LCS + '-'
cIndex = cIndex + 1
return LCS, cIndex
else:
return LCS, cIndex
def deletenuc(read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex + 1, cIndex)) >= 3:
return True
else:
return False
def insertnuc(LCS, read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex, cIndex + 1)) >= 3:
return True
else:
return False
def align(read, cassettes):
rIndex = 0
cIndex = 0
alignedRead = ''
LCS = ''
delrec = []
insertrec = []
substrec = []
while rIndex <= len(read):
LCS, rIndex, cIndex = findMaxLCS(read, cassettes, rIndex, cIndex)
LCS, cIndex = checkGap(LCS, cassettes, cIndex)
if len(LCS) <= 6:
if insertnuc(LCS, read, cassettes, rIndex, cIndex) == True:
insertrec.append(rIndex)
nuc = findConsensus(cassettes, cIndex)
cIndex = cIndex + 1
LCS = LCS + nuc
else:
LCS, cIndex = checkGap(LCS, cassettes, cIndex)
alignedRead = alignedRead + str(LCS)
print(alignedRead)
return alignedRead
def main():
FASTA = input('Enter FASTA file:')
reference = input('Enter reference file:')
in_file = open(FASTA, 'r')
in_file1 = open(reference, 'r')
line_list = []
line_list1 = []
for line in in_file:
line = line.strip()
line_list.append(line)
readnames = line_list[::2]
reads = line_list[1::2]
for line1 in in_file1:
line1 = line1.strip()
line_list1.append(line1)
cassettes = line_list1[1::2]
refnames = line_list1[::2]
A = []
for i in reads:
alignedRead = align(i, cassettes)
A.append(alignedRead)
<|reserved_special_token_1|>
#!/usr/bin/python
#import Bio
def findLCS(read, cassette, rIndex, cIndex,cassettes):
LCS=''
while True:
if read[rIndex] == cassette[cIndex]:
LCS+= read[rIndex]
rIndex= rIndex +1
cIndex= cIndex +1
#elif checkLCS(cIndex,cassettes)==True:
else:
break
#print(LCS)
return LCS
def findMaxLCS(read, cassettes, rIndex, cIndex):
#print(read)
maxLCS=''
#print(len(cassettes))
for i in range (0,len(cassettes)):
LCS=findLCS(read, cassettes[i],rIndex, cIndex,cassettes)
if len(LCS) > len(maxLCS):
maxLCS=LCS
rIndex= rIndex+len(maxLCS)
cIndex= cIndex+len(maxLCS)
return maxLCS ,rIndex ,cIndex
def findConsensus(cassettes, cIndex):
#print (cassettes)
con=[]
for i in range(0,len(cassettes[1])-26):
holder=[]
for j in range(0,len(cassettes)):
holder.append(cassettes[j][i])
con.append(holder)
con2=[]
for k in range (0,len(con)):
if con[k].count('G')==16 or (con[k].count('G')==14) :
con2.append('g')
elif con[k].count('A')==16 or (con[k].count('A')==14): #con[k][1]=='-'
con2.append('a')
elif con[k].count('C')==16 or (con[k].count('C')==14):
con2.append('c')
elif con[k].count('T')==16 or (con[k].count('T')==14):
con2.append('t')
elif con[k].count('-')>=10:
con2.append('-')
else:
con2.append('n')
#print(con)
#print(con2)
return con2[cIndex]
def checkGap(LCS, cassettes, cIndex):
#print(rIndex)
#print(cIndex)
#nuc= findConsensus(cassettes, cIndex)
#LCS=LCS+ str(nuc)
#cIndex=cIndex+1
if findConsensus(cassettes, cIndex)== '-':
LCS=LCS+'-'
cIndex=cIndex+1
return LCS, cIndex
else:
return LCS, cIndex
#print(rIndex)
#elif findConsens
#elif (findConsensus(cassettes, cIndex)).isalpha():
def deletenuc(read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex+1, cIndex))>=3:
return True
else:
return False
def insertnuc(LCS, read, cassettes, rIndex, cIndex):
if len(findMaxLCS(read, cassettes, rIndex, cIndex+1))>=3:
return True
else:
return False
#def subsnuc(
#def checkgaps(
def align(read, cassettes):
#print(read)
#print('hi')
#print(cassettes)
rIndex=0
cIndex=0
alignedRead=''
LCS=''
delrec=[]
insertrec=[]
substrec=[]
#print(read)
while rIndex<= len(read):
#print(read)
#print(len(read))
#print(rIndex)
LCS, rIndex, cIndex= findMaxLCS(read, cassettes,rIndex, cIndex)
#print(rIndex)
#print(cIndex)
#print(LCS)
LCS, cIndex= checkGap(LCS, cassettes,cIndex)
#print(rIndex,cIndex)
#print(LCS)
#if deletenuc(read, cassettes, rIndex,cIndex)==True:
#delrec.append(rIndex)
#rIndex= rIndex+1
if len(LCS)<=6 :
#print (LCS, rIndex)
#print('enter')
if insertnuc(LCS, read, cassettes, rIndex, cIndex)==True:
#print(True, LCS)
insertrec.append(rIndex)
nuc= findConsensus(cassettes, cIndex)
cIndex=cIndex+1
LCS= LCS+nuc
else:
LCS, cIndex= checkGap(LCS, cassettes,cIndex)
#elif subsnuc(LCS, read, cassettes, rIndex, cIndex)==True:
#else:
# LCS, cIndex= checkLCS(LCS, cassettes,cIndex)
# nuc= findConsensus(cassettes, cIndex)
# LCS= LCS+nuc
# cIndex=cIndex+1
# rIndex=rIndex+1
alignedRead= alignedRead+ str(LCS)
print(alignedRead)
return alignedRead
def main():
FASTA=input('Enter FASTA file:')
reference=input('Enter reference file:')
in_file=open(FASTA, 'r')
in_file1=open(reference,'r')
line_list=[]
line_list1=[]
for line in in_file:
line=line.strip()
line_list.append(line)
readnames=line_list[::2] #list of the read headers
reads=line_list[1::2] #list of sequences only
for line1 in in_file1:
line1=line1.strip()
line_list1.append(line1)
cassettes=line_list1[1::2]
refnames=line_list1[::2]
#for i in cassettes:
# print(len(i))
#print(cassettes)
#print(reads)
A=[]
for i in reads:
#print(i[0])
alignedRead=align(i,cassettes)
A.append(alignedRead)
#print(align(i,cassettes))
#out = open("out.txt", "w")
#out.write(align(i, cassettes)
#out.close()
#print(A)
#con=findConsensus(0,cassettes)
#print(con)
|
flexible
|
{
"blob_id": "5cec9e82aa994d07e25d8356a8218fc461bb8b4e",
"index": 4728,
"step-1": "def findLCS(read, cassette, rIndex, cIndex, cassettes):\n LCS = ''\n while True:\n if read[rIndex] == cassette[cIndex]:\n LCS += read[rIndex]\n rIndex = rIndex + 1\n cIndex = cIndex + 1\n else:\n break\n return LCS\n\n\n<mask token>\n\n\ndef checkGap(LCS, cassettes, cIndex):\n if findConsensus(cassettes, cIndex) == '-':\n LCS = LCS + '-'\n cIndex = cIndex + 1\n return LCS, cIndex\n else:\n return LCS, cIndex\n\n\ndef deletenuc(read, cassettes, rIndex, cIndex):\n if len(findMaxLCS(read, cassettes, rIndex + 1, cIndex)) >= 3:\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef main():\n FASTA = input('Enter FASTA file:')\n reference = input('Enter reference file:')\n in_file = open(FASTA, 'r')\n in_file1 = open(reference, 'r')\n line_list = []\n line_list1 = []\n for line in in_file:\n line = line.strip()\n line_list.append(line)\n readnames = line_list[::2]\n reads = line_list[1::2]\n for line1 in in_file1:\n line1 = line1.strip()\n line_list1.append(line1)\n cassettes = line_list1[1::2]\n refnames = line_list1[::2]\n A = []\n for i in reads:\n alignedRead = align(i, cassettes)\n A.append(alignedRead)\n",
"step-2": "def findLCS(read, cassette, rIndex, cIndex, cassettes):\n LCS = ''\n while True:\n if read[rIndex] == cassette[cIndex]:\n LCS += read[rIndex]\n rIndex = rIndex + 1\n cIndex = cIndex + 1\n else:\n break\n return LCS\n\n\n<mask token>\n\n\ndef checkGap(LCS, cassettes, cIndex):\n if findConsensus(cassettes, cIndex) == '-':\n LCS = LCS + '-'\n cIndex = cIndex + 1\n return LCS, cIndex\n else:\n return LCS, cIndex\n\n\ndef deletenuc(read, cassettes, rIndex, cIndex):\n if len(findMaxLCS(read, cassettes, rIndex + 1, cIndex)) >= 3:\n return True\n else:\n return False\n\n\ndef insertnuc(LCS, read, cassettes, rIndex, cIndex):\n if len(findMaxLCS(read, cassettes, rIndex, cIndex + 1)) >= 3:\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef main():\n FASTA = input('Enter FASTA file:')\n reference = input('Enter reference file:')\n in_file = open(FASTA, 'r')\n in_file1 = open(reference, 'r')\n line_list = []\n line_list1 = []\n for line in in_file:\n line = line.strip()\n line_list.append(line)\n readnames = line_list[::2]\n reads = line_list[1::2]\n for line1 in in_file1:\n line1 = line1.strip()\n line_list1.append(line1)\n cassettes = line_list1[1::2]\n refnames = line_list1[::2]\n A = []\n for i in reads:\n alignedRead = align(i, cassettes)\n A.append(alignedRead)\n",
"step-3": "def findLCS(read, cassette, rIndex, cIndex, cassettes):\n LCS = ''\n while True:\n if read[rIndex] == cassette[cIndex]:\n LCS += read[rIndex]\n rIndex = rIndex + 1\n cIndex = cIndex + 1\n else:\n break\n return LCS\n\n\ndef findMaxLCS(read, cassettes, rIndex, cIndex):\n maxLCS = ''\n for i in range(0, len(cassettes)):\n LCS = findLCS(read, cassettes[i], rIndex, cIndex, cassettes)\n if len(LCS) > len(maxLCS):\n maxLCS = LCS\n rIndex = rIndex + len(maxLCS)\n cIndex = cIndex + len(maxLCS)\n return maxLCS, rIndex, cIndex\n\n\n<mask token>\n\n\ndef checkGap(LCS, cassettes, cIndex):\n if findConsensus(cassettes, cIndex) == '-':\n LCS = LCS + '-'\n cIndex = cIndex + 1\n return LCS, cIndex\n else:\n return LCS, cIndex\n\n\ndef deletenuc(read, cassettes, rIndex, cIndex):\n if len(findMaxLCS(read, cassettes, rIndex + 1, cIndex)) >= 3:\n return True\n else:\n return False\n\n\ndef insertnuc(LCS, read, cassettes, rIndex, cIndex):\n if len(findMaxLCS(read, cassettes, rIndex, cIndex + 1)) >= 3:\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef main():\n FASTA = input('Enter FASTA file:')\n reference = input('Enter reference file:')\n in_file = open(FASTA, 'r')\n in_file1 = open(reference, 'r')\n line_list = []\n line_list1 = []\n for line in in_file:\n line = line.strip()\n line_list.append(line)\n readnames = line_list[::2]\n reads = line_list[1::2]\n for line1 in in_file1:\n line1 = line1.strip()\n line_list1.append(line1)\n cassettes = line_list1[1::2]\n refnames = line_list1[::2]\n A = []\n for i in reads:\n alignedRead = align(i, cassettes)\n A.append(alignedRead)\n",
"step-4": "def findLCS(read, cassette, rIndex, cIndex, cassettes):\n LCS = ''\n while True:\n if read[rIndex] == cassette[cIndex]:\n LCS += read[rIndex]\n rIndex = rIndex + 1\n cIndex = cIndex + 1\n else:\n break\n return LCS\n\n\ndef findMaxLCS(read, cassettes, rIndex, cIndex):\n maxLCS = ''\n for i in range(0, len(cassettes)):\n LCS = findLCS(read, cassettes[i], rIndex, cIndex, cassettes)\n if len(LCS) > len(maxLCS):\n maxLCS = LCS\n rIndex = rIndex + len(maxLCS)\n cIndex = cIndex + len(maxLCS)\n return maxLCS, rIndex, cIndex\n\n\n<mask token>\n\n\ndef checkGap(LCS, cassettes, cIndex):\n if findConsensus(cassettes, cIndex) == '-':\n LCS = LCS + '-'\n cIndex = cIndex + 1\n return LCS, cIndex\n else:\n return LCS, cIndex\n\n\ndef deletenuc(read, cassettes, rIndex, cIndex):\n if len(findMaxLCS(read, cassettes, rIndex + 1, cIndex)) >= 3:\n return True\n else:\n return False\n\n\ndef insertnuc(LCS, read, cassettes, rIndex, cIndex):\n if len(findMaxLCS(read, cassettes, rIndex, cIndex + 1)) >= 3:\n return True\n else:\n return False\n\n\ndef align(read, cassettes):\n rIndex = 0\n cIndex = 0\n alignedRead = ''\n LCS = ''\n delrec = []\n insertrec = []\n substrec = []\n while rIndex <= len(read):\n LCS, rIndex, cIndex = findMaxLCS(read, cassettes, rIndex, cIndex)\n LCS, cIndex = checkGap(LCS, cassettes, cIndex)\n if len(LCS) <= 6:\n if insertnuc(LCS, read, cassettes, rIndex, cIndex) == True:\n insertrec.append(rIndex)\n nuc = findConsensus(cassettes, cIndex)\n cIndex = cIndex + 1\n LCS = LCS + nuc\n else:\n LCS, cIndex = checkGap(LCS, cassettes, cIndex)\n alignedRead = alignedRead + str(LCS)\n print(alignedRead)\n return alignedRead\n\n\ndef main():\n FASTA = input('Enter FASTA file:')\n reference = input('Enter reference file:')\n in_file = open(FASTA, 'r')\n in_file1 = open(reference, 'r')\n line_list = []\n line_list1 = []\n for line in in_file:\n line = line.strip()\n line_list.append(line)\n readnames = line_list[::2]\n reads = line_list[1::2]\n for line1 in in_file1:\n line1 = line1.strip()\n line_list1.append(line1)\n cassettes = line_list1[1::2]\n refnames = line_list1[::2]\n A = []\n for i in reads:\n alignedRead = align(i, cassettes)\n A.append(alignedRead)\n",
"step-5": "#!/usr/bin/python\n#import Bio\n\n \n\ndef findLCS(read, cassette, rIndex, cIndex,cassettes):\n \n LCS=''\n while True:\n if read[rIndex] == cassette[cIndex]:\n LCS+= read[rIndex]\n rIndex= rIndex +1\n cIndex= cIndex +1\n #elif checkLCS(cIndex,cassettes)==True:\n else:\n break\n\n #print(LCS)\n \n return LCS\n\ndef findMaxLCS(read, cassettes, rIndex, cIndex):\n #print(read)\n maxLCS=''\n #print(len(cassettes))\n for i in range (0,len(cassettes)):\n LCS=findLCS(read, cassettes[i],rIndex, cIndex,cassettes)\n \n if len(LCS) > len(maxLCS):\n \n maxLCS=LCS\n \n \n \n rIndex= rIndex+len(maxLCS)\n cIndex= cIndex+len(maxLCS)\n return maxLCS ,rIndex ,cIndex\n\ndef findConsensus(cassettes, cIndex):\n #print (cassettes)\n con=[]\n for i in range(0,len(cassettes[1])-26):\n holder=[]\n for j in range(0,len(cassettes)):\n holder.append(cassettes[j][i])\n con.append(holder)\n con2=[]\n for k in range (0,len(con)):\n if con[k].count('G')==16 or (con[k].count('G')==14) :\n con2.append('g')\n elif con[k].count('A')==16 or (con[k].count('A')==14): #con[k][1]=='-'\n con2.append('a')\n elif con[k].count('C')==16 or (con[k].count('C')==14):\n con2.append('c')\n elif con[k].count('T')==16 or (con[k].count('T')==14):\n con2.append('t')\n elif con[k].count('-')>=10:\n con2.append('-')\n else:\n con2.append('n')\n #print(con) \n #print(con2)\n\n return con2[cIndex]\n\ndef checkGap(LCS, cassettes, cIndex):\n \n #print(rIndex)\n #print(cIndex)\n\n #nuc= findConsensus(cassettes, cIndex)\n #LCS=LCS+ str(nuc)\n #cIndex=cIndex+1\n \n if findConsensus(cassettes, cIndex)== '-':\n LCS=LCS+'-'\n cIndex=cIndex+1\n return LCS, cIndex\n else:\n return LCS, cIndex\n #print(rIndex)\n #elif findConsens\n \n \n #elif (findConsensus(cassettes, cIndex)).isalpha():\n \n \n \n\ndef deletenuc(read, cassettes, rIndex, cIndex):\n\n if len(findMaxLCS(read, cassettes, rIndex+1, cIndex))>=3:\n \n return True\n else:\n return False\n \ndef insertnuc(LCS, read, cassettes, rIndex, cIndex):\n\n if len(findMaxLCS(read, cassettes, rIndex, cIndex+1))>=3:\n return True\n else:\n return False\n\n#def subsnuc(\n \n\n#def checkgaps(\n\n\ndef align(read, cassettes):\n #print(read)\n #print('hi')\n #print(cassettes)\n rIndex=0\n cIndex=0\n alignedRead=''\n LCS=''\n delrec=[]\n insertrec=[]\n substrec=[]\n \n #print(read)\n while rIndex<= len(read):\n #print(read)\n \n #print(len(read))\n #print(rIndex)\n LCS, rIndex, cIndex= findMaxLCS(read, cassettes,rIndex, cIndex)\n #print(rIndex)\n #print(cIndex)\n #print(LCS)\n LCS, cIndex= checkGap(LCS, cassettes,cIndex)\n \n #print(rIndex,cIndex)\n #print(LCS) \n \n #if deletenuc(read, cassettes, rIndex,cIndex)==True:\n #delrec.append(rIndex)\n #rIndex= rIndex+1\n if len(LCS)<=6 :\n #print (LCS, rIndex)\n #print('enter')\n if insertnuc(LCS, read, cassettes, rIndex, cIndex)==True:\n #print(True, LCS)\n insertrec.append(rIndex)\n nuc= findConsensus(cassettes, cIndex)\n cIndex=cIndex+1\n LCS= LCS+nuc\n else:\n LCS, cIndex= checkGap(LCS, cassettes,cIndex)\n \n #elif subsnuc(LCS, read, cassettes, rIndex, cIndex)==True:\n \n\n \n #else:\n # LCS, cIndex= checkLCS(LCS, cassettes,cIndex)\n\n \n \n\n \n # nuc= findConsensus(cassettes, cIndex)\n # LCS= LCS+nuc\n # cIndex=cIndex+1\n # rIndex=rIndex+1\n \n alignedRead= alignedRead+ str(LCS)\n print(alignedRead)\n \n return alignedRead\n\ndef main():\n FASTA=input('Enter FASTA file:')\n reference=input('Enter reference file:')\n in_file=open(FASTA, 'r')\n in_file1=open(reference,'r')\n\n\n line_list=[] \n line_list1=[]\n\n\n\n for line in in_file:\n line=line.strip()\n line_list.append(line)\n readnames=line_list[::2] #list of the read headers\n reads=line_list[1::2] #list of sequences only\n\n for line1 in in_file1:\n line1=line1.strip()\n line_list1.append(line1) \n cassettes=line_list1[1::2]\n refnames=line_list1[::2]\n\n #for i in cassettes:\n # print(len(i))\n #print(cassettes)\n #print(reads)\n A=[]\n for i in reads:\n #print(i[0])\n alignedRead=align(i,cassettes)\n A.append(alignedRead)\n #print(align(i,cassettes))\n #out = open(\"out.txt\", \"w\")\n #out.write(align(i, cassettes)\n #out.close()\n \n #print(A)\n #con=findConsensus(0,cassettes)\n #print(con)\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
# Generated by Django 2.1.2 on 2018-10-26 12:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20181010_0852'),
('accounts', '0004_playercards'),
]
operations = [
migrations.RenameModel(
old_name='PlayerCards',
new_name='PlayerCard',
),
migrations.RemoveField(
model_name='profile',
name='cards',
),
]
|
normal
|
{
"blob_id": "59596c69df6a2c453fd147a9c8a2c7d47ed79fb3",
"index": 3222,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0007_auto_20181010_0852'), ('accounts',\n '0004_playercards')]\n operations = [migrations.RenameModel(old_name='PlayerCards', new_name=\n 'PlayerCard'), migrations.RemoveField(model_name='profile', name=\n 'cards')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0007_auto_20181010_0852'), ('accounts',\n '0004_playercards')]\n operations = [migrations.RenameModel(old_name='PlayerCards', new_name=\n 'PlayerCard'), migrations.RemoveField(model_name='profile', name=\n 'cards')]\n",
"step-5": "# Generated by Django 2.1.2 on 2018-10-26 12:40\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0007_auto_20181010_0852'),\n ('accounts', '0004_playercards'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='PlayerCards',\n new_name='PlayerCard',\n ),\n migrations.RemoveField(\n model_name='profile',\n name='cards',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding=UTF-8
#!/usr/bin/env python
# for models.py
from django.db import models
from django.db.models import F, Q, Sum, Avg
from django.db import transaction
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.sites.models import Site
# from apps.router.models import User
# from django.contrib.auth.models import Message
# from django.contrib import messages TODO: wangqi 20150521 Message�ƺ�û�õ��ˣ����Ҫ�������������滻
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.template.loader import render_to_string
from datetime import datetime, timedelta, date
# from apps.common.utils.utils_collection import *
# from apps.common.utils.utils_datetime import *
# from apps.common.utils.utils_mysql import *
# from apps.common.utils.utils_number import *
# from apps.common.utils.utils_render import *
# from apps.common.biz_utils.utils_sorter import *
# from apps.common.utils.utils_string import *
# from apps.common.biz_utils.utils_misc import *
# from apilib import *
# from apilib import tsapi
|
normal
|
{
"blob_id": "d551cab1856fbdb91918f9171d5c02b8dab84aba",
"index": 8223,
"step-1": "<mask token>\n",
"step-2": "from django.db import models\nfrom django.db.models import F, Q, Sum, Avg\nfrom django.db import transaction\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.sites.models import Site\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template.loader import render_to_string\nfrom datetime import datetime, timedelta, date\n",
"step-3": "# coding=UTF-8\n#!/usr/bin/env python\n\n# for models.py\nfrom django.db import models\nfrom django.db.models import F, Q, Sum, Avg\nfrom django.db import transaction\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.sites.models import Site\n# from apps.router.models import User\n# from django.contrib.auth.models import Message\n# from django.contrib import messages TODO: wangqi 20150521 Message�ƺ�û�õ��ˣ����Ҫ�������������滻\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template.loader import render_to_string\nfrom datetime import datetime, timedelta, date\n\n# from apps.common.utils.utils_collection import *\n# from apps.common.utils.utils_datetime import *\n# from apps.common.utils.utils_mysql import *\n# from apps.common.utils.utils_number import *\n# from apps.common.utils.utils_render import *\n# from apps.common.biz_utils.utils_sorter import *\n# from apps.common.utils.utils_string import *\n# from apps.common.biz_utils.utils_misc import *\n# from apilib import *\n# from apilib import tsapi\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python2
## -*- coding: utf-8 -*-
import sys
def sx(bits, value):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
SymVar_0 = int(sys.argv[1])
ref_263 = SymVar_0
ref_278 = ref_263 # MOV operation
ref_5710 = ref_278 # MOV operation
ref_5786 = ref_5710 # MOV operation
ref_5800 = (0x1F02C962 | ref_5786) # OR operation
ref_5901 = ref_5800 # MOV operation
ref_5915 = (0x1F8797B2 & ref_5901) # AND operation
ref_6846 = ref_5915 # MOV operation
ref_7764 = ref_6846 # MOV operation
ref_8577 = ref_278 # MOV operation
ref_8653 = ref_8577 # MOV operation
ref_8665 = ref_7764 # MOV operation
ref_8667 = (ref_8665 & ref_8653) # AND operation
ref_9598 = ref_8667 # MOV operation
ref_10431 = ref_278 # MOV operation
ref_10631 = ref_10431 # MOV operation
ref_10637 = (((sx(0x40, 0x66AF1DF) * sx(0x40, ref_10631)) & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) & 0xFFFFFFFFFFFFFFFF) # IMUL operation
ref_11673 = ref_9598 # MOV operation
ref_11749 = ref_11673 # MOV operation
ref_11763 = ((ref_11749 << (0x39 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_12802 = ref_9598 # MOV operation
ref_12878 = ref_12802 # MOV operation
ref_12892 = (ref_12878 >> (0x7 & 0x3F)) # SHR operation
ref_12993 = ref_12892 # MOV operation
ref_13005 = ref_11763 # MOV operation
ref_13007 = (ref_13005 | ref_12993) # OR operation
ref_13116 = ref_10637 # MOV operation
ref_13120 = ref_13007 # MOV operation
ref_13122 = ((ref_13120 + ref_13116) & 0xFFFFFFFFFFFFFFFF) # ADD operation
ref_14054 = ref_13122 # MOV operation
ref_22590 = ref_14054 # MOV operation
ref_23808 = ref_14054 # MOV operation
ref_23892 = ref_22590 # MOV operation
ref_23896 = ref_23808 # MOV operation
ref_23898 = ((ref_23896 + ref_23892) & 0xFFFFFFFFFFFFFFFF) # ADD operation
ref_24830 = ref_23898 # MOV operation
ref_26068 = ref_9598 # MOV operation
ref_26144 = ref_26068 # MOV operation
ref_26158 = (0x7 & ref_26144) # AND operation
ref_26259 = ref_26158 # MOV operation
ref_26273 = ((ref_26259 << (0x2 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_27516 = ref_14054 # MOV operation
ref_27592 = ref_27516 # MOV operation
ref_27604 = ref_26273 # MOV operation
ref_27606 = (ref_27604 | ref_27592) # OR operation
ref_28857 = ref_27606 # MOV operation
ref_28859 = ((ref_28857 >> 56) & 0xFF) # Byte reference - MOV operation
ref_28860 = ((ref_28857 >> 48) & 0xFF) # Byte reference - MOV operation
ref_28861 = ((ref_28857 >> 40) & 0xFF) # Byte reference - MOV operation
ref_28862 = ((ref_28857 >> 32) & 0xFF) # Byte reference - MOV operation
ref_28863 = ((ref_28857 >> 24) & 0xFF) # Byte reference - MOV operation
ref_28864 = ((ref_28857 >> 16) & 0xFF) # Byte reference - MOV operation
ref_28865 = ((ref_28857 >> 8) & 0xFF) # Byte reference - MOV operation
ref_28866 = (ref_28857 & 0xFF) # Byte reference - MOV operation
ref_30829 = ref_28859 # MOVZX operation
ref_30905 = (ref_30829 & 0xFF) # MOVZX operation
ref_34489 = ref_28866 # MOVZX operation
ref_34565 = (ref_34489 & 0xFF) # MOVZX operation
ref_34567 = (ref_34565 & 0xFF) # Byte reference - MOV operation
ref_36529 = (ref_30905 & 0xFF) # MOVZX operation
ref_36605 = (ref_36529 & 0xFF) # MOVZX operation
ref_36607 = (ref_36605 & 0xFF) # Byte reference - MOV operation
ref_37835 = ref_9598 # MOV operation
ref_39053 = ((((((((ref_34567) << 8 | ref_28860) << 8 | ref_28861) << 8 | ref_28862) << 8 | ref_28863) << 8 | ref_28864) << 8 | ref_28865) << 8 | ref_36607) # MOV operation
ref_39129 = ref_39053 # MOV operation
ref_39141 = ref_37835 # MOV operation
ref_39143 = (ref_39141 & ref_39129) # AND operation
ref_39244 = ref_39143 # MOV operation
ref_39258 = (0x1F & ref_39244) # AND operation
ref_39359 = ref_39258 # MOV operation
ref_39373 = ((ref_39359 << (0x4 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_40296 = ref_6846 # MOV operation
ref_40372 = ref_40296 # MOV operation
ref_40384 = ref_39373 # MOV operation
ref_40386 = (ref_40384 | ref_40372) # OR operation
ref_41317 = ref_40386 # MOV operation
ref_43860 = ref_24830 # MOV operation
ref_45078 = ref_24830 # MOV operation
ref_45162 = ref_43860 # MOV operation
ref_45166 = ref_45078 # MOV operation
ref_45168 = ((ref_45166 + ref_45162) & 0xFFFFFFFFFFFFFFFF) # ADD operation
ref_46100 = ref_45168 # MOV operation
ref_47338 = ((((((((ref_34567) << 8 | ref_28860) << 8 | ref_28861) << 8 | ref_28862) << 8 | ref_28863) << 8 | ref_28864) << 8 | ref_28865) << 8 | ref_36607) # MOV operation
ref_47414 = ref_47338 # MOV operation
ref_47428 = (0x7 & ref_47414) # AND operation
ref_47529 = ref_47428 # MOV operation
ref_47543 = ((ref_47529 << (0x2 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_48786 = ref_46100 # MOV operation
ref_48862 = ref_48786 # MOV operation
ref_48874 = ref_47543 # MOV operation
ref_48876 = (ref_48874 | ref_48862) # OR operation
ref_50127 = ref_48876 # MOV operation
ref_50129 = ((ref_50127 >> 56) & 0xFF) # Byte reference - MOV operation
ref_50130 = ((ref_50127 >> 48) & 0xFF) # Byte reference - MOV operation
ref_50131 = ((ref_50127 >> 40) & 0xFF) # Byte reference - MOV operation
ref_50132 = ((ref_50127 >> 32) & 0xFF) # Byte reference - MOV operation
ref_50133 = ((ref_50127 >> 24) & 0xFF) # Byte reference - MOV operation
ref_50134 = ((ref_50127 >> 16) & 0xFF) # Byte reference - MOV operation
ref_50135 = ((ref_50127 >> 8) & 0xFF) # Byte reference - MOV operation
ref_50136 = (ref_50127 & 0xFF) # Byte reference - MOV operation
ref_52099 = ref_50129 # MOVZX operation
ref_52175 = (ref_52099 & 0xFF) # MOVZX operation
ref_55759 = ref_50136 # MOVZX operation
ref_55835 = (ref_55759 & 0xFF) # MOVZX operation
ref_55837 = (ref_55835 & 0xFF) # Byte reference - MOV operation
ref_57799 = (ref_52175 & 0xFF) # MOVZX operation
ref_57875 = (ref_57799 & 0xFF) # MOVZX operation
ref_57877 = (ref_57875 & 0xFF) # Byte reference - MOV operation
ref_59105 = ((((((((ref_34567) << 8 | ref_28860) << 8 | ref_28861) << 8 | ref_28862) << 8 | ref_28863) << 8 | ref_28864) << 8 | ref_28865) << 8 | ref_36607) # MOV operation
ref_60323 = ((((((((ref_55837) << 8 | ref_50130) << 8 | ref_50131) << 8 | ref_50132) << 8 | ref_50133) << 8 | ref_50134) << 8 | ref_50135) << 8 | ref_57877) # MOV operation
ref_60399 = ref_60323 # MOV operation
ref_60411 = ref_59105 # MOV operation
ref_60413 = (ref_60411 & ref_60399) # AND operation
ref_60514 = ref_60413 # MOV operation
ref_60528 = (0x1F & ref_60514) # AND operation
ref_60629 = ref_60528 # MOV operation
ref_60643 = ((ref_60629 << (0x4 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_61566 = ref_41317 # MOV operation
ref_61642 = ref_61566 # MOV operation
ref_61654 = ref_60643 # MOV operation
ref_61656 = (ref_61654 | ref_61642) # OR operation
ref_62587 = ref_61656 # MOV operation
ref_65203 = ((((((((ref_55837) << 8 | ref_50130) << 8 | ref_50131) << 8 | ref_50132) << 8 | ref_50133) << 8 | ref_50134) << 8 | ref_50135) << 8 | ref_57877) # MOV operation
ref_66101 = ((((((((ref_34567) << 8 | ref_28860) << 8 | ref_28861) << 8 | ref_28862) << 8 | ref_28863) << 8 | ref_28864) << 8 | ref_28865) << 8 | ref_36607) # MOV operation
ref_66177 = ref_66101 # MOV operation
ref_66189 = ref_65203 # MOV operation
ref_66191 = (ref_66189 | ref_66177) # OR operation
ref_66292 = ref_66191 # MOV operation
ref_66306 = (0xF & ref_66292) # AND operation
ref_66407 = ref_66306 # MOV operation
ref_66421 = (0x1 | ref_66407) # OR operation
ref_66650 = ref_66421 # MOV operation
ref_66652 = ((0x40 - ref_66650) & 0xFFFFFFFFFFFFFFFF) # SUB operation
ref_66660 = ref_66652 # MOV operation
ref_67926 = ref_9598 # MOV operation
ref_68002 = ref_67926 # MOV operation
ref_68016 = (ref_68002 >> (0x1 & 0x3F)) # SHR operation
ref_68117 = ref_68016 # MOV operation
ref_68131 = (0xF & ref_68117) # AND operation
ref_68232 = ref_68131 # MOV operation
ref_68246 = (0x1 | ref_68232) # OR operation
ref_68475 = ref_68246 # MOV operation
ref_68477 = ((0x40 - ref_68475) & 0xFFFFFFFFFFFFFFFF) # SUB operation
ref_68485 = ref_68477 # MOV operation
ref_69403 = ref_62587 # MOV operation
ref_69479 = ref_69403 # MOV operation
ref_69491 = ref_68485 # MOV operation
ref_69493 = ((ref_69479 << ((ref_69491 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_70764 = ref_9598 # MOV operation
ref_70840 = ref_70764 # MOV operation
ref_70854 = (ref_70840 >> (0x1 & 0x3F)) # SHR operation
ref_70955 = ref_70854 # MOV operation
ref_70969 = (0xF & ref_70955) # AND operation
ref_71070 = ref_70969 # MOV operation
ref_71084 = (0x1 | ref_71070) # OR operation
ref_72007 = ref_62587 # MOV operation
ref_72083 = ref_72007 # MOV operation
ref_72095 = ref_71084 # MOV operation
ref_72097 = (ref_72083 >> ((ref_72095 & 0xFF) & 0x3F)) # SHR operation
ref_72198 = ref_72097 # MOV operation
ref_72210 = ref_69493 # MOV operation
ref_72212 = (ref_72210 | ref_72198) # OR operation
ref_72313 = ref_72212 # MOV operation
ref_72325 = ref_66660 # MOV operation
ref_72327 = (ref_72313 >> ((ref_72325 & 0xFF) & 0x3F)) # SHR operation
ref_73482 = ((((((((ref_55837) << 8 | ref_50130) << 8 | ref_50131) << 8 | ref_50132) << 8 | ref_50133) << 8 | ref_50134) << 8 | ref_50135) << 8 | ref_57877) # MOV operation
ref_74380 = ((((((((ref_34567) << 8 | ref_28860) << 8 | ref_28861) << 8 | ref_28862) << 8 | ref_28863) << 8 | ref_28864) << 8 | ref_28865) << 8 | ref_36607) # MOV operation
ref_74456 = ref_74380 # MOV operation
ref_74468 = ref_73482 # MOV operation
ref_74470 = (ref_74468 | ref_74456) # OR operation
ref_74571 = ref_74470 # MOV operation
ref_74585 = (0xF & ref_74571) # AND operation
ref_74686 = ref_74585 # MOV operation
ref_74700 = (0x1 | ref_74686) # OR operation
ref_75971 = ref_9598 # MOV operation
ref_76047 = ref_75971 # MOV operation
ref_76061 = (ref_76047 >> (0x1 & 0x3F)) # SHR operation
ref_76162 = ref_76061 # MOV operation
ref_76176 = (0xF & ref_76162) # AND operation
ref_76277 = ref_76176 # MOV operation
ref_76291 = (0x1 | ref_76277) # OR operation
ref_76520 = ref_76291 # MOV operation
ref_76522 = ((0x40 - ref_76520) & 0xFFFFFFFFFFFFFFFF) # SUB operation
ref_76530 = ref_76522 # MOV operation
ref_77448 = ref_62587 # MOV operation
ref_77524 = ref_77448 # MOV operation
ref_77536 = ref_76530 # MOV operation
ref_77538 = ((ref_77524 << ((ref_77536 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_78809 = ref_9598 # MOV operation
ref_78885 = ref_78809 # MOV operation
ref_78899 = (ref_78885 >> (0x1 & 0x3F)) # SHR operation
ref_79000 = ref_78899 # MOV operation
ref_79014 = (0xF & ref_79000) # AND operation
ref_79115 = ref_79014 # MOV operation
ref_79129 = (0x1 | ref_79115) # OR operation
ref_80052 = ref_62587 # MOV operation
ref_80128 = ref_80052 # MOV operation
ref_80140 = ref_79129 # MOV operation
ref_80142 = (ref_80128 >> ((ref_80140 & 0xFF) & 0x3F)) # SHR operation
ref_80243 = ref_80142 # MOV operation
ref_80255 = ref_77538 # MOV operation
ref_80257 = (ref_80255 | ref_80243) # OR operation
ref_80358 = ref_80257 # MOV operation
ref_80370 = ref_74700 # MOV operation
ref_80372 = ((ref_80358 << ((ref_80370 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation
ref_80473 = ref_80372 # MOV operation
ref_80485 = ref_72327 # MOV operation
ref_80487 = (ref_80485 | ref_80473) # OR operation
ref_81342 = ref_80487 # MOV operation
ref_81553 = ref_81342 # MOV operation
ref_81555 = ref_81553 # MOV operation
print ref_81555 & 0xffffffffffffffff
|
normal
|
{
"blob_id": "22d3ff0fca9a5537da37bfbc968d83ec6f919752",
"index": 5162,
"step-1": "#!/usr/bin/env python2\n## -*- coding: utf-8 -*-\n\nimport sys\n\ndef sx(bits, value):\n sign_bit = 1 << (bits - 1)\n return (value & (sign_bit - 1)) - (value & sign_bit)\n\nSymVar_0 = int(sys.argv[1])\nref_263 = SymVar_0\nref_278 = ref_263 # MOV operation\nref_5710 = ref_278 # MOV operation\nref_5786 = ref_5710 # MOV operation\nref_5800 = (0x1F02C962 | ref_5786) # OR operation\nref_5901 = ref_5800 # MOV operation\nref_5915 = (0x1F8797B2 & ref_5901) # AND operation\nref_6846 = ref_5915 # MOV operation\nref_7764 = ref_6846 # MOV operation\nref_8577 = ref_278 # MOV operation\nref_8653 = ref_8577 # MOV operation\nref_8665 = ref_7764 # MOV operation\nref_8667 = (ref_8665 & ref_8653) # AND operation\nref_9598 = ref_8667 # MOV operation\nref_10431 = ref_278 # MOV operation\nref_10631 = ref_10431 # MOV operation\nref_10637 = (((sx(0x40, 0x66AF1DF) * sx(0x40, ref_10631)) & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) & 0xFFFFFFFFFFFFFFFF) # IMUL operation\nref_11673 = ref_9598 # MOV operation\nref_11749 = ref_11673 # MOV operation\nref_11763 = ((ref_11749 << (0x39 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation\nref_12802 = ref_9598 # MOV operation\nref_12878 = ref_12802 # MOV operation\nref_12892 = (ref_12878 >> (0x7 & 0x3F)) # SHR operation\nref_12993 = ref_12892 # MOV operation\nref_13005 = ref_11763 # MOV operation\nref_13007 = (ref_13005 | ref_12993) # OR operation\nref_13116 = ref_10637 # MOV operation\nref_13120 = ref_13007 # MOV operation\nref_13122 = ((ref_13120 + ref_13116) & 0xFFFFFFFFFFFFFFFF) # ADD operation\nref_14054 = ref_13122 # MOV operation\nref_22590 = ref_14054 # MOV operation\nref_23808 = ref_14054 # MOV operation\nref_23892 = ref_22590 # MOV operation\nref_23896 = ref_23808 # MOV operation\nref_23898 = ((ref_23896 + ref_23892) & 0xFFFFFFFFFFFFFFFF) # ADD operation\nref_24830 = ref_23898 # MOV operation\nref_26068 = ref_9598 # MOV operation\nref_26144 = ref_26068 # MOV operation\nref_26158 = (0x7 & ref_26144) # AND operation\nref_26259 = ref_26158 # MOV operation\nref_26273 = ((ref_26259 << (0x2 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation\nref_27516 = ref_14054 # MOV operation\nref_27592 = ref_27516 # MOV operation\nref_27604 = ref_26273 # MOV operation\nref_27606 = (ref_27604 | ref_27592) # OR operation\nref_28857 = ref_27606 # MOV operation\nref_28859 = ((ref_28857 >> 56) & 0xFF) # Byte reference - MOV operation\nref_28860 = ((ref_28857 >> 48) & 0xFF) # Byte reference - MOV operation\nref_28861 = ((ref_28857 >> 40) & 0xFF) # Byte reference - MOV operation\nref_28862 = ((ref_28857 >> 32) & 0xFF) # Byte reference - MOV operation\nref_28863 = ((ref_28857 >> 24) & 0xFF) # Byte reference - MOV operation\nref_28864 = ((ref_28857 >> 16) & 0xFF) # Byte reference - MOV operation\nref_28865 = ((ref_28857 >> 8) & 0xFF) # Byte reference - MOV operation\nref_28866 = (ref_28857 & 0xFF) # Byte reference - MOV operation\nref_30829 = ref_28859 # MOVZX operation\nref_30905 = (ref_30829 & 0xFF) # MOVZX operation\nref_34489 = ref_28866 # MOVZX operation\nref_34565 = (ref_34489 & 0xFF) # MOVZX operation\nref_34567 = (ref_34565 & 0xFF) # Byte reference - MOV operation\nref_36529 = (ref_30905 & 0xFF) # MOVZX operation\nref_36605 = (ref_36529 & 0xFF) # MOVZX operation\nref_36607 = (ref_36605 & 0xFF) # Byte reference - MOV operation\nref_37835 = ref_9598 # MOV operation\nref_39053 = ((((((((ref_34567) << 8 | ref_28860) << 8 | ref_28861) << 8 | ref_28862) << 8 | ref_28863) << 8 | ref_28864) << 8 | ref_28865) << 8 | ref_36607) # MOV operation\nref_39129 = ref_39053 # MOV operation\nref_39141 = ref_37835 # MOV operation\nref_39143 = (ref_39141 & ref_39129) # AND operation\nref_39244 = ref_39143 # MOV operation\nref_39258 = (0x1F & ref_39244) # AND operation\nref_39359 = ref_39258 # MOV operation\nref_39373 = ((ref_39359 << (0x4 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation\nref_40296 = ref_6846 # MOV operation\nref_40372 = ref_40296 # MOV operation\nref_40384 = ref_39373 # MOV operation\nref_40386 = (ref_40384 | ref_40372) # OR operation\nref_41317 = ref_40386 # MOV operation\nref_43860 = ref_24830 # MOV operation\nref_45078 = ref_24830 # MOV operation\nref_45162 = ref_43860 # MOV operation\nref_45166 = ref_45078 # MOV operation\nref_45168 = ((ref_45166 + ref_45162) & 0xFFFFFFFFFFFFFFFF) # ADD operation\nref_46100 = ref_45168 # MOV operation\nref_47338 = ((((((((ref_34567) << 8 | ref_28860) << 8 | ref_28861) << 8 | ref_28862) << 8 | ref_28863) << 8 | ref_28864) << 8 | ref_28865) << 8 | ref_36607) # MOV operation\nref_47414 = ref_47338 # MOV operation\nref_47428 = (0x7 & ref_47414) # AND operation\nref_47529 = ref_47428 # MOV operation\nref_47543 = ((ref_47529 << (0x2 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation\nref_48786 = ref_46100 # MOV operation\nref_48862 = ref_48786 # MOV operation\nref_48874 = ref_47543 # MOV operation\nref_48876 = (ref_48874 | ref_48862) # OR operation\nref_50127 = ref_48876 # MOV operation\nref_50129 = ((ref_50127 >> 56) & 0xFF) # Byte reference - MOV operation\nref_50130 = ((ref_50127 >> 48) & 0xFF) # Byte reference - MOV operation\nref_50131 = ((ref_50127 >> 40) & 0xFF) # Byte reference - MOV operation\nref_50132 = ((ref_50127 >> 32) & 0xFF) # Byte reference - MOV operation\nref_50133 = ((ref_50127 >> 24) & 0xFF) # Byte reference - MOV operation\nref_50134 = ((ref_50127 >> 16) & 0xFF) # Byte reference - MOV operation\nref_50135 = ((ref_50127 >> 8) & 0xFF) # Byte reference - MOV operation\nref_50136 = (ref_50127 & 0xFF) # Byte reference - MOV operation\nref_52099 = ref_50129 # MOVZX operation\nref_52175 = (ref_52099 & 0xFF) # MOVZX operation\nref_55759 = ref_50136 # MOVZX operation\nref_55835 = (ref_55759 & 0xFF) # MOVZX operation\nref_55837 = (ref_55835 & 0xFF) # Byte reference - MOV operation\nref_57799 = (ref_52175 & 0xFF) # MOVZX operation\nref_57875 = (ref_57799 & 0xFF) # MOVZX operation\nref_57877 = (ref_57875 & 0xFF) # Byte reference - MOV operation\nref_59105 = ((((((((ref_34567) << 8 | ref_28860) << 8 | ref_28861) << 8 | ref_28862) << 8 | ref_28863) << 8 | ref_28864) << 8 | ref_28865) << 8 | ref_36607) # MOV operation\nref_60323 = ((((((((ref_55837) << 8 | ref_50130) << 8 | ref_50131) << 8 | ref_50132) << 8 | ref_50133) << 8 | ref_50134) << 8 | ref_50135) << 8 | ref_57877) # MOV operation\nref_60399 = ref_60323 # MOV operation\nref_60411 = ref_59105 # MOV operation\nref_60413 = (ref_60411 & ref_60399) # AND operation\nref_60514 = ref_60413 # MOV operation\nref_60528 = (0x1F & ref_60514) # AND operation\nref_60629 = ref_60528 # MOV operation\nref_60643 = ((ref_60629 << (0x4 & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation\nref_61566 = ref_41317 # MOV operation\nref_61642 = ref_61566 # MOV operation\nref_61654 = ref_60643 # MOV operation\nref_61656 = (ref_61654 | ref_61642) # OR operation\nref_62587 = ref_61656 # MOV operation\nref_65203 = ((((((((ref_55837) << 8 | ref_50130) << 8 | ref_50131) << 8 | ref_50132) << 8 | ref_50133) << 8 | ref_50134) << 8 | ref_50135) << 8 | ref_57877) # MOV operation\nref_66101 = ((((((((ref_34567) << 8 | ref_28860) << 8 | ref_28861) << 8 | ref_28862) << 8 | ref_28863) << 8 | ref_28864) << 8 | ref_28865) << 8 | ref_36607) # MOV operation\nref_66177 = ref_66101 # MOV operation\nref_66189 = ref_65203 # MOV operation\nref_66191 = (ref_66189 | ref_66177) # OR operation\nref_66292 = ref_66191 # MOV operation\nref_66306 = (0xF & ref_66292) # AND operation\nref_66407 = ref_66306 # MOV operation\nref_66421 = (0x1 | ref_66407) # OR operation\nref_66650 = ref_66421 # MOV operation\nref_66652 = ((0x40 - ref_66650) & 0xFFFFFFFFFFFFFFFF) # SUB operation\nref_66660 = ref_66652 # MOV operation\nref_67926 = ref_9598 # MOV operation\nref_68002 = ref_67926 # MOV operation\nref_68016 = (ref_68002 >> (0x1 & 0x3F)) # SHR operation\nref_68117 = ref_68016 # MOV operation\nref_68131 = (0xF & ref_68117) # AND operation\nref_68232 = ref_68131 # MOV operation\nref_68246 = (0x1 | ref_68232) # OR operation\nref_68475 = ref_68246 # MOV operation\nref_68477 = ((0x40 - ref_68475) & 0xFFFFFFFFFFFFFFFF) # SUB operation\nref_68485 = ref_68477 # MOV operation\nref_69403 = ref_62587 # MOV operation\nref_69479 = ref_69403 # MOV operation\nref_69491 = ref_68485 # MOV operation\nref_69493 = ((ref_69479 << ((ref_69491 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation\nref_70764 = ref_9598 # MOV operation\nref_70840 = ref_70764 # MOV operation\nref_70854 = (ref_70840 >> (0x1 & 0x3F)) # SHR operation\nref_70955 = ref_70854 # MOV operation\nref_70969 = (0xF & ref_70955) # AND operation\nref_71070 = ref_70969 # MOV operation\nref_71084 = (0x1 | ref_71070) # OR operation\nref_72007 = ref_62587 # MOV operation\nref_72083 = ref_72007 # MOV operation\nref_72095 = ref_71084 # MOV operation\nref_72097 = (ref_72083 >> ((ref_72095 & 0xFF) & 0x3F)) # SHR operation\nref_72198 = ref_72097 # MOV operation\nref_72210 = ref_69493 # MOV operation\nref_72212 = (ref_72210 | ref_72198) # OR operation\nref_72313 = ref_72212 # MOV operation\nref_72325 = ref_66660 # MOV operation\nref_72327 = (ref_72313 >> ((ref_72325 & 0xFF) & 0x3F)) # SHR operation\nref_73482 = ((((((((ref_55837) << 8 | ref_50130) << 8 | ref_50131) << 8 | ref_50132) << 8 | ref_50133) << 8 | ref_50134) << 8 | ref_50135) << 8 | ref_57877) # MOV operation\nref_74380 = ((((((((ref_34567) << 8 | ref_28860) << 8 | ref_28861) << 8 | ref_28862) << 8 | ref_28863) << 8 | ref_28864) << 8 | ref_28865) << 8 | ref_36607) # MOV operation\nref_74456 = ref_74380 # MOV operation\nref_74468 = ref_73482 # MOV operation\nref_74470 = (ref_74468 | ref_74456) # OR operation\nref_74571 = ref_74470 # MOV operation\nref_74585 = (0xF & ref_74571) # AND operation\nref_74686 = ref_74585 # MOV operation\nref_74700 = (0x1 | ref_74686) # OR operation\nref_75971 = ref_9598 # MOV operation\nref_76047 = ref_75971 # MOV operation\nref_76061 = (ref_76047 >> (0x1 & 0x3F)) # SHR operation\nref_76162 = ref_76061 # MOV operation\nref_76176 = (0xF & ref_76162) # AND operation\nref_76277 = ref_76176 # MOV operation\nref_76291 = (0x1 | ref_76277) # OR operation\nref_76520 = ref_76291 # MOV operation\nref_76522 = ((0x40 - ref_76520) & 0xFFFFFFFFFFFFFFFF) # SUB operation\nref_76530 = ref_76522 # MOV operation\nref_77448 = ref_62587 # MOV operation\nref_77524 = ref_77448 # MOV operation\nref_77536 = ref_76530 # MOV operation\nref_77538 = ((ref_77524 << ((ref_77536 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation\nref_78809 = ref_9598 # MOV operation\nref_78885 = ref_78809 # MOV operation\nref_78899 = (ref_78885 >> (0x1 & 0x3F)) # SHR operation\nref_79000 = ref_78899 # MOV operation\nref_79014 = (0xF & ref_79000) # AND operation\nref_79115 = ref_79014 # MOV operation\nref_79129 = (0x1 | ref_79115) # OR operation\nref_80052 = ref_62587 # MOV operation\nref_80128 = ref_80052 # MOV operation\nref_80140 = ref_79129 # MOV operation\nref_80142 = (ref_80128 >> ((ref_80140 & 0xFF) & 0x3F)) # SHR operation\nref_80243 = ref_80142 # MOV operation\nref_80255 = ref_77538 # MOV operation\nref_80257 = (ref_80255 | ref_80243) # OR operation\nref_80358 = ref_80257 # MOV operation\nref_80370 = ref_74700 # MOV operation\nref_80372 = ((ref_80358 << ((ref_80370 & 0xFF) & 0x3F)) & 0xFFFFFFFFFFFFFFFF) # SHL operation\nref_80473 = ref_80372 # MOV operation\nref_80485 = ref_72327 # MOV operation\nref_80487 = (ref_80485 | ref_80473) # OR operation\nref_81342 = ref_80487 # MOV operation\nref_81553 = ref_81342 # MOV operation\nref_81555 = ref_81553 # MOV operation\n\nprint ref_81555 & 0xffffffffffffffff\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import io
import time
import multiprocessing as mp
from queue import Empty
import picamera
from PIL import Image
from http import server
import socketserver
import numpy as np
import cv2
class QueueOutputMJPEG(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame, put the last frame's data in the queue
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
class QueueOutputH264(object):
def __init__(self, queue, finished):
self.queue = queue
self.finished = finished
self.stream = io.BytesIO()
def write(self, buf):
if True:
size = self.stream.tell()
if size:
self.stream.seek(0)
if self.queue.empty():
self.queue.put(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
def flush(self):
self.queue.close()
self.queue.join_thread()
self.finished.set()
def do_capture(queueH264, queueMJPEG, stopCap):
print('Capture started')
with picamera.PiCamera(sensor_mode=2) as camera:
camera.resolution=(1280, 720)
camera.framerate=15
camera.video_stabilization = True
camera.video_denoise = True
camera.vflip = True
camera.sharpness = 20
camera.meter_mode = 'matrix'
camera.awb_mode = 'auto'
camera.saturation = 2
camera.contrast = 10
camera.drc_strength = 'high'
camera.exposure_mode = 'antishake'
camera.exposure_compensation = 3
outputH264 = QueueOutputH264(queueH264, stopCap)
outputMJPEG = QueueOutputMJPEG(queueMJPEG, stopCap)
camera.start_recording(outputH264, format='h264', profile='high', intra_period=30, sps_timing=True, bitrate=4000000, quality=25, resize=(420,234))
camera.start_recording(outputMJPEG, splitter_port=2, format='mjpeg', resize=(672,384))
while not stopCap.wait(0): #camera.wait_recording(100)
pass
camera.stop_recording(splitter_port=2)
camera.stop_recording()
time.sleep(0.2)
camera.close()
def do_detection(ImageQueue, RectQueue, finished):
net = cv2.dnn.readNet('pedestrian-detection-adas-002.xml', 'pedestrian-detection-adas-002.bin')
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
st = time.monotonic()
cnt = 1
fps = 0
FutureOuts = []
ospid = os.getpid()
while not finished.wait(0):
stream = None
try:
stream = io.BytesIO(ImageQueue.get(False))
except:
pass
if len(FutureOuts) == 3:
stream = None
if not stream is None:
stream.seek(0)
try:
image = Image.open(stream).convert('RGB')
except:
pass
cv_img = np.array(image)
cv_img = cv_img[:, :, ::-1].copy()
blob = cv2.dnn.blobFromImage(cv_img, 1.0, size=(672,384),\
mean=(127.5, 127.5, 127.5), swapRB=False, crop=False)
net.setInput(blob)
FutureOuts.append(net.forwardAsync())
while FutureOuts and FutureOuts[0].wait_for(0):
out1 = FutureOuts[0].get()
if cnt >= 20:
fps = cnt/(time.monotonic() - st)
st = time.monotonic()
cnt = 1
print('%d: Detecting at %FPS' % (ospid, fps))
else:
cnt += 1
props = []
for detection in out1.reshape(-1,7):
inf = []
obj_type = int(detection[1]-1)
conf = float(detection[2])
xmin = float(detection[3])
ymin = float(detection[4])
xmax = float(detection[5])
ymax = float(detection[6])
if conf > 0.6:
prop = {'coord': (xmin, ymin, xmax, ymax), 'type': obj_type, 'conf': conf}
props.append(prop)
if RectQueue.empty():
RectQueue.put(props)
del FutureOuts[0]
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if '/data.html' in self.path:
strprops = "ffffd9"
if not self.server.DetectQueue.empty():
props = self.server.DetectQueue.get(False)
pcnt = 0
for prop in props:
strprops += 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\n'.format(
prop['coord'][0], prop['coord'][1], prop['coord'][2], prop['coord'][3], pcnt)
pcnt += 1
strprops += "ffaaee"
content = strprops.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif '/stream.mjpg' in self.path:
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
while self.server.MJPEGQueue.empty():
pass
buf = io.BytesIO(self.server.MJPEGQueue.get())
try:
st = time.monotonic()
cnt = 1
fps = 0
ospid = os.getpid()
while True:
if not self.server.MJPEGQueue.empty():
buf = io.BytesIO(self.server.MJPEGQueue.get(False))
if cnt >= 20:
fps = cnt/(time.monotonic() - st)
st = time.monotonic()
cnt = 1
print('%d: Streaming MJPEG at %dFPS' % (ospid, fps))
else:
cnt += 1
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(buf.getvalue()))
self.end_headers()
self.wfile.write(buf.getvalue())
self.wfile.write(b'\r\r')
except Exception as e:
print('Removed streaming clients from MJPEG %s: %s', self.client_address, str(e))
else:
#self.send_response(200)
#self.send_header('Age', 0)
#self.send_header('Cache-Control', 'no-cache, private')
#self.send_header('Pragma', 'no-cache')
#self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
#self.end_headers()
try:
st2 = time.monotonic()
cnt2 = 1
fps2 = 0
ospid2 = os.getpid()
while True:
if not self.server.H264Queue.empty():
frame = io.BytesIO(self.server.H264Queue.get(False))
buf = frame
if cnt2 >= 20:
fps2 = cnt2/(time.monotonic() - st2)
st2 = time.monotonic()
cnt2 = 1
print('%d: Streaming H264 at %dFPS' % (ospid2, fps2))
else:
cnt2 += 1
self.wfile.write(buf.getvalue())
#self.wfile.write(b'\r\r')
except Exception as e:
print('Removed streaming clients from H264 %s: %s', self.client_address, str(e))
# else:
# self.send_error(404)
# self.end_headers()
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
def server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):
try:
address = ('', port)
server = StreamingServer(address, StreamingHandler)
server.MJPEGQueue = MJPEGQueue
server.DetectQueue = DetectQueue
server.H264Queue = H264Queue
print('Started server')
server.serve_forever()
finally:
servstop.set()
if __name__ == '__main__':
queueH264 = mp.Queue(1)
queueMJPEG = mp.Queue(1)
queueDetectRect = mp.Queue(1)
stopCapture = mp.Event()
queueProcessedLow = mp.Queue(1)
queueProcessedHigh = mp.Queue(1)
ServerStop = mp.Event()
capture_proc = mp.Process(target=do_capture, args=(queueH264, queueMJPEG, stopCapture), daemon=True)
server_proc = mp.Process(target=server_start, args=(queueMJPEG, queueH264, queueDetectRect, 8000, stopCapture), daemon=True)
detect_proc = mp.Process(target=do_detection, args=(queueMJPEG, queueDetectRect, stopCapture), daemon=True)
capture_proc.start()
detect_proc.start()
server_proc.start()
while True:
if stopCapture.is_set():
stopCapture.set()
time.sleep(0.1)
capture_proc.terminate()
server_proc.terminate()
detect_proc.terminate()
proccessing_proc_lores.terminate()
break
time.sleep(1)
|
normal
|
{
"blob_id": "ffd034eb5f0482c027dcc344bddb01b90249511c",
"index": 3198,
"step-1": "<mask token>\n\n\nclass QueueOutputMJPEG(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\nclass QueueOutputH264(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if True:\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\n<mask token>\n\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n if '/data.html' in self.path:\n strprops = 'ffffd9'\n if not self.server.DetectQueue.empty():\n props = self.server.DetectQueue.get(False)\n pcnt = 0\n for prop in props:\n strprops += (\n 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\\n'\n .format(prop['coord'][0], prop['coord'][1], prop[\n 'coord'][2], prop['coord'][3], pcnt))\n pcnt += 1\n strprops += 'ffaaee'\n content = strprops.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif '/stream.mjpg' in self.path:\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type',\n 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n while self.server.MJPEGQueue.empty():\n pass\n buf = io.BytesIO(self.server.MJPEGQueue.get())\n try:\n st = time.monotonic()\n cnt = 1\n fps = 0\n ospid = os.getpid()\n while True:\n if not self.server.MJPEGQueue.empty():\n buf = io.BytesIO(self.server.MJPEGQueue.get(False))\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)\n )\n else:\n cnt += 1\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buf.getvalue()))\n self.end_headers()\n self.wfile.write(buf.getvalue())\n self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from MJPEG %s: %s', self.\n client_address, str(e))\n else:\n try:\n st2 = time.monotonic()\n cnt2 = 1\n fps2 = 0\n ospid2 = os.getpid()\n while True:\n if not self.server.H264Queue.empty():\n frame = io.BytesIO(self.server.H264Queue.get(False))\n buf = frame\n if cnt2 >= 20:\n fps2 = cnt2 / (time.monotonic() - st2)\n st2 = time.monotonic()\n cnt2 = 1\n print('%d: Streaming H264 at %dFPS' % (ospid2,\n fps2))\n else:\n cnt2 += 1\n self.wfile.write(buf.getvalue())\n except Exception as e:\n print('Removed streaming clients from H264 %s: %s', self.\n client_address, str(e))\n\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass QueueOutputMJPEG(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\nclass QueueOutputH264(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if True:\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\n<mask token>\n\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n if '/data.html' in self.path:\n strprops = 'ffffd9'\n if not self.server.DetectQueue.empty():\n props = self.server.DetectQueue.get(False)\n pcnt = 0\n for prop in props:\n strprops += (\n 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\\n'\n .format(prop['coord'][0], prop['coord'][1], prop[\n 'coord'][2], prop['coord'][3], pcnt))\n pcnt += 1\n strprops += 'ffaaee'\n content = strprops.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif '/stream.mjpg' in self.path:\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type',\n 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n while self.server.MJPEGQueue.empty():\n pass\n buf = io.BytesIO(self.server.MJPEGQueue.get())\n try:\n st = time.monotonic()\n cnt = 1\n fps = 0\n ospid = os.getpid()\n while True:\n if not self.server.MJPEGQueue.empty():\n buf = io.BytesIO(self.server.MJPEGQueue.get(False))\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)\n )\n else:\n cnt += 1\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buf.getvalue()))\n self.end_headers()\n self.wfile.write(buf.getvalue())\n self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from MJPEG %s: %s', self.\n client_address, str(e))\n else:\n try:\n st2 = time.monotonic()\n cnt2 = 1\n fps2 = 0\n ospid2 = os.getpid()\n while True:\n if not self.server.H264Queue.empty():\n frame = io.BytesIO(self.server.H264Queue.get(False))\n buf = frame\n if cnt2 >= 20:\n fps2 = cnt2 / (time.monotonic() - st2)\n st2 = time.monotonic()\n cnt2 = 1\n print('%d: Streaming H264 at %dFPS' % (ospid2,\n fps2))\n else:\n cnt2 += 1\n self.wfile.write(buf.getvalue())\n except Exception as e:\n print('Removed streaming clients from H264 %s: %s', self.\n client_address, str(e))\n\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n\ndef server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):\n try:\n address = '', port\n server = StreamingServer(address, StreamingHandler)\n server.MJPEGQueue = MJPEGQueue\n server.DetectQueue = DetectQueue\n server.H264Queue = H264Queue\n print('Started server')\n server.serve_forever()\n finally:\n servstop.set()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass QueueOutputMJPEG(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\nclass QueueOutputH264(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if True:\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\ndef do_capture(queueH264, queueMJPEG, stopCap):\n print('Capture started')\n with picamera.PiCamera(sensor_mode=2) as camera:\n camera.resolution = 1280, 720\n camera.framerate = 15\n camera.video_stabilization = True\n camera.video_denoise = True\n camera.vflip = True\n camera.sharpness = 20\n camera.meter_mode = 'matrix'\n camera.awb_mode = 'auto'\n camera.saturation = 2\n camera.contrast = 10\n camera.drc_strength = 'high'\n camera.exposure_mode = 'antishake'\n camera.exposure_compensation = 3\n outputH264 = QueueOutputH264(queueH264, stopCap)\n outputMJPEG = QueueOutputMJPEG(queueMJPEG, stopCap)\n camera.start_recording(outputH264, format='h264', profile='high',\n intra_period=30, sps_timing=True, bitrate=4000000, quality=25,\n resize=(420, 234))\n camera.start_recording(outputMJPEG, splitter_port=2, format='mjpeg',\n resize=(672, 384))\n while not stopCap.wait(0):\n pass\n camera.stop_recording(splitter_port=2)\n camera.stop_recording()\n time.sleep(0.2)\n camera.close()\n\n\ndef do_detection(ImageQueue, RectQueue, finished):\n net = cv2.dnn.readNet('pedestrian-detection-adas-002.xml',\n 'pedestrian-detection-adas-002.bin')\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)\n st = time.monotonic()\n cnt = 1\n fps = 0\n FutureOuts = []\n ospid = os.getpid()\n while not finished.wait(0):\n stream = None\n try:\n stream = io.BytesIO(ImageQueue.get(False))\n except:\n pass\n if len(FutureOuts) == 3:\n stream = None\n if not stream is None:\n stream.seek(0)\n try:\n image = Image.open(stream).convert('RGB')\n except:\n pass\n cv_img = np.array(image)\n cv_img = cv_img[:, :, ::-1].copy()\n blob = cv2.dnn.blobFromImage(cv_img, 1.0, size=(672, 384), mean\n =(127.5, 127.5, 127.5), swapRB=False, crop=False)\n net.setInput(blob)\n FutureOuts.append(net.forwardAsync())\n while FutureOuts and FutureOuts[0].wait_for(0):\n out1 = FutureOuts[0].get()\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Detecting at %FPS' % (ospid, fps))\n else:\n cnt += 1\n props = []\n for detection in out1.reshape(-1, 7):\n inf = []\n obj_type = int(detection[1] - 1)\n conf = float(detection[2])\n xmin = float(detection[3])\n ymin = float(detection[4])\n xmax = float(detection[5])\n ymax = float(detection[6])\n if conf > 0.6:\n prop = {'coord': (xmin, ymin, xmax, ymax), 'type':\n obj_type, 'conf': conf}\n props.append(prop)\n if RectQueue.empty():\n RectQueue.put(props)\n del FutureOuts[0]\n\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n if '/data.html' in self.path:\n strprops = 'ffffd9'\n if not self.server.DetectQueue.empty():\n props = self.server.DetectQueue.get(False)\n pcnt = 0\n for prop in props:\n strprops += (\n 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\\n'\n .format(prop['coord'][0], prop['coord'][1], prop[\n 'coord'][2], prop['coord'][3], pcnt))\n pcnt += 1\n strprops += 'ffaaee'\n content = strprops.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif '/stream.mjpg' in self.path:\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type',\n 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n while self.server.MJPEGQueue.empty():\n pass\n buf = io.BytesIO(self.server.MJPEGQueue.get())\n try:\n st = time.monotonic()\n cnt = 1\n fps = 0\n ospid = os.getpid()\n while True:\n if not self.server.MJPEGQueue.empty():\n buf = io.BytesIO(self.server.MJPEGQueue.get(False))\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)\n )\n else:\n cnt += 1\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buf.getvalue()))\n self.end_headers()\n self.wfile.write(buf.getvalue())\n self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from MJPEG %s: %s', self.\n client_address, str(e))\n else:\n try:\n st2 = time.monotonic()\n cnt2 = 1\n fps2 = 0\n ospid2 = os.getpid()\n while True:\n if not self.server.H264Queue.empty():\n frame = io.BytesIO(self.server.H264Queue.get(False))\n buf = frame\n if cnt2 >= 20:\n fps2 = cnt2 / (time.monotonic() - st2)\n st2 = time.monotonic()\n cnt2 = 1\n print('%d: Streaming H264 at %dFPS' % (ospid2,\n fps2))\n else:\n cnt2 += 1\n self.wfile.write(buf.getvalue())\n except Exception as e:\n print('Removed streaming clients from H264 %s: %s', self.\n client_address, str(e))\n\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n\ndef server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):\n try:\n address = '', port\n server = StreamingServer(address, StreamingHandler)\n server.MJPEGQueue = MJPEGQueue\n server.DetectQueue = DetectQueue\n server.H264Queue = H264Queue\n print('Started server')\n server.serve_forever()\n finally:\n servstop.set()\n\n\nif __name__ == '__main__':\n queueH264 = mp.Queue(1)\n queueMJPEG = mp.Queue(1)\n queueDetectRect = mp.Queue(1)\n stopCapture = mp.Event()\n queueProcessedLow = mp.Queue(1)\n queueProcessedHigh = mp.Queue(1)\n ServerStop = mp.Event()\n capture_proc = mp.Process(target=do_capture, args=(queueH264,\n queueMJPEG, stopCapture), daemon=True)\n server_proc = mp.Process(target=server_start, args=(queueMJPEG,\n queueH264, queueDetectRect, 8000, stopCapture), daemon=True)\n detect_proc = mp.Process(target=do_detection, args=(queueMJPEG,\n queueDetectRect, stopCapture), daemon=True)\n capture_proc.start()\n detect_proc.start()\n server_proc.start()\n while True:\n if stopCapture.is_set():\n stopCapture.set()\n time.sleep(0.1)\n capture_proc.terminate()\n server_proc.terminate()\n detect_proc.terminate()\n proccessing_proc_lores.terminate()\n break\n time.sleep(1)\n",
"step-4": "import os\nimport io\nimport time\nimport multiprocessing as mp\nfrom queue import Empty\nimport picamera\nfrom PIL import Image\nfrom http import server\nimport socketserver\nimport numpy as np\nimport cv2\n\n\nclass QueueOutputMJPEG(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\nclass QueueOutputH264(object):\n\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if True:\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\n\ndef do_capture(queueH264, queueMJPEG, stopCap):\n print('Capture started')\n with picamera.PiCamera(sensor_mode=2) as camera:\n camera.resolution = 1280, 720\n camera.framerate = 15\n camera.video_stabilization = True\n camera.video_denoise = True\n camera.vflip = True\n camera.sharpness = 20\n camera.meter_mode = 'matrix'\n camera.awb_mode = 'auto'\n camera.saturation = 2\n camera.contrast = 10\n camera.drc_strength = 'high'\n camera.exposure_mode = 'antishake'\n camera.exposure_compensation = 3\n outputH264 = QueueOutputH264(queueH264, stopCap)\n outputMJPEG = QueueOutputMJPEG(queueMJPEG, stopCap)\n camera.start_recording(outputH264, format='h264', profile='high',\n intra_period=30, sps_timing=True, bitrate=4000000, quality=25,\n resize=(420, 234))\n camera.start_recording(outputMJPEG, splitter_port=2, format='mjpeg',\n resize=(672, 384))\n while not stopCap.wait(0):\n pass\n camera.stop_recording(splitter_port=2)\n camera.stop_recording()\n time.sleep(0.2)\n camera.close()\n\n\ndef do_detection(ImageQueue, RectQueue, finished):\n net = cv2.dnn.readNet('pedestrian-detection-adas-002.xml',\n 'pedestrian-detection-adas-002.bin')\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)\n st = time.monotonic()\n cnt = 1\n fps = 0\n FutureOuts = []\n ospid = os.getpid()\n while not finished.wait(0):\n stream = None\n try:\n stream = io.BytesIO(ImageQueue.get(False))\n except:\n pass\n if len(FutureOuts) == 3:\n stream = None\n if not stream is None:\n stream.seek(0)\n try:\n image = Image.open(stream).convert('RGB')\n except:\n pass\n cv_img = np.array(image)\n cv_img = cv_img[:, :, ::-1].copy()\n blob = cv2.dnn.blobFromImage(cv_img, 1.0, size=(672, 384), mean\n =(127.5, 127.5, 127.5), swapRB=False, crop=False)\n net.setInput(blob)\n FutureOuts.append(net.forwardAsync())\n while FutureOuts and FutureOuts[0].wait_for(0):\n out1 = FutureOuts[0].get()\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Detecting at %FPS' % (ospid, fps))\n else:\n cnt += 1\n props = []\n for detection in out1.reshape(-1, 7):\n inf = []\n obj_type = int(detection[1] - 1)\n conf = float(detection[2])\n xmin = float(detection[3])\n ymin = float(detection[4])\n xmax = float(detection[5])\n ymax = float(detection[6])\n if conf > 0.6:\n prop = {'coord': (xmin, ymin, xmax, ymax), 'type':\n obj_type, 'conf': conf}\n props.append(prop)\n if RectQueue.empty():\n RectQueue.put(props)\n del FutureOuts[0]\n\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n if '/data.html' in self.path:\n strprops = 'ffffd9'\n if not self.server.DetectQueue.empty():\n props = self.server.DetectQueue.get(False)\n pcnt = 0\n for prop in props:\n strprops += (\n 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\\n'\n .format(prop['coord'][0], prop['coord'][1], prop[\n 'coord'][2], prop['coord'][3], pcnt))\n pcnt += 1\n strprops += 'ffaaee'\n content = strprops.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif '/stream.mjpg' in self.path:\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type',\n 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n while self.server.MJPEGQueue.empty():\n pass\n buf = io.BytesIO(self.server.MJPEGQueue.get())\n try:\n st = time.monotonic()\n cnt = 1\n fps = 0\n ospid = os.getpid()\n while True:\n if not self.server.MJPEGQueue.empty():\n buf = io.BytesIO(self.server.MJPEGQueue.get(False))\n if cnt >= 20:\n fps = cnt / (time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Streaming MJPEG at %dFPS' % (ospid, fps)\n )\n else:\n cnt += 1\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buf.getvalue()))\n self.end_headers()\n self.wfile.write(buf.getvalue())\n self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from MJPEG %s: %s', self.\n client_address, str(e))\n else:\n try:\n st2 = time.monotonic()\n cnt2 = 1\n fps2 = 0\n ospid2 = os.getpid()\n while True:\n if not self.server.H264Queue.empty():\n frame = io.BytesIO(self.server.H264Queue.get(False))\n buf = frame\n if cnt2 >= 20:\n fps2 = cnt2 / (time.monotonic() - st2)\n st2 = time.monotonic()\n cnt2 = 1\n print('%d: Streaming H264 at %dFPS' % (ospid2,\n fps2))\n else:\n cnt2 += 1\n self.wfile.write(buf.getvalue())\n except Exception as e:\n print('Removed streaming clients from H264 %s: %s', self.\n client_address, str(e))\n\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n\ndef server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):\n try:\n address = '', port\n server = StreamingServer(address, StreamingHandler)\n server.MJPEGQueue = MJPEGQueue\n server.DetectQueue = DetectQueue\n server.H264Queue = H264Queue\n print('Started server')\n server.serve_forever()\n finally:\n servstop.set()\n\n\nif __name__ == '__main__':\n queueH264 = mp.Queue(1)\n queueMJPEG = mp.Queue(1)\n queueDetectRect = mp.Queue(1)\n stopCapture = mp.Event()\n queueProcessedLow = mp.Queue(1)\n queueProcessedHigh = mp.Queue(1)\n ServerStop = mp.Event()\n capture_proc = mp.Process(target=do_capture, args=(queueH264,\n queueMJPEG, stopCapture), daemon=True)\n server_proc = mp.Process(target=server_start, args=(queueMJPEG,\n queueH264, queueDetectRect, 8000, stopCapture), daemon=True)\n detect_proc = mp.Process(target=do_detection, args=(queueMJPEG,\n queueDetectRect, stopCapture), daemon=True)\n capture_proc.start()\n detect_proc.start()\n server_proc.start()\n while True:\n if stopCapture.is_set():\n stopCapture.set()\n time.sleep(0.1)\n capture_proc.terminate()\n server_proc.terminate()\n detect_proc.terminate()\n proccessing_proc_lores.terminate()\n break\n time.sleep(1)\n",
"step-5": "import os\nimport io\nimport time\nimport multiprocessing as mp\nfrom queue import Empty\nimport picamera\nfrom PIL import Image\nfrom http import server\nimport socketserver\nimport numpy as np\nimport cv2\n\nclass QueueOutputMJPEG(object):\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n # New frame, put the last frame's data in the queue\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\nclass QueueOutputH264(object):\n def __init__(self, queue, finished):\n self.queue = queue\n self.finished = finished\n self.stream = io.BytesIO()\n\n def write(self, buf):\n if True:\n size = self.stream.tell()\n if size:\n self.stream.seek(0)\n if self.queue.empty():\n self.queue.put(self.stream.read(size))\n self.stream.seek(0)\n self.stream.write(buf)\n\n def flush(self):\n self.queue.close()\n self.queue.join_thread()\n self.finished.set()\n\ndef do_capture(queueH264, queueMJPEG, stopCap):\n print('Capture started')\n with picamera.PiCamera(sensor_mode=2) as camera:\n camera.resolution=(1280, 720)\n camera.framerate=15\n camera.video_stabilization = True\n camera.video_denoise = True\n camera.vflip = True\n camera.sharpness = 20\n camera.meter_mode = 'matrix'\n camera.awb_mode = 'auto'\n camera.saturation = 2\n camera.contrast = 10\n camera.drc_strength = 'high'\n camera.exposure_mode = 'antishake'\n camera.exposure_compensation = 3\n outputH264 = QueueOutputH264(queueH264, stopCap)\n outputMJPEG = QueueOutputMJPEG(queueMJPEG, stopCap)\n camera.start_recording(outputH264, format='h264', profile='high', intra_period=30, sps_timing=True, bitrate=4000000, quality=25, resize=(420,234))\n camera.start_recording(outputMJPEG, splitter_port=2, format='mjpeg', resize=(672,384))\n while not stopCap.wait(0): #camera.wait_recording(100)\n pass\n camera.stop_recording(splitter_port=2)\n camera.stop_recording()\n time.sleep(0.2)\n camera.close()\n\ndef do_detection(ImageQueue, RectQueue, finished):\n net = cv2.dnn.readNet('pedestrian-detection-adas-002.xml', 'pedestrian-detection-adas-002.bin')\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)\n st = time.monotonic()\n cnt = 1\n fps = 0\n FutureOuts = []\n ospid = os.getpid()\n while not finished.wait(0):\n stream = None\n try:\n stream = io.BytesIO(ImageQueue.get(False))\n except:\n pass\n if len(FutureOuts) == 3:\n stream = None\n if not stream is None:\n stream.seek(0)\n try:\n image = Image.open(stream).convert('RGB')\n except:\n pass\n cv_img = np.array(image)\n cv_img = cv_img[:, :, ::-1].copy()\n blob = cv2.dnn.blobFromImage(cv_img, 1.0, size=(672,384),\\\n mean=(127.5, 127.5, 127.5), swapRB=False, crop=False)\n net.setInput(blob)\n FutureOuts.append(net.forwardAsync())\n while FutureOuts and FutureOuts[0].wait_for(0):\n out1 = FutureOuts[0].get()\n if cnt >= 20:\n fps = cnt/(time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Detecting at %FPS' % (ospid, fps))\n else:\n cnt += 1\n props = []\n for detection in out1.reshape(-1,7):\n inf = []\n obj_type = int(detection[1]-1)\n conf = float(detection[2])\n xmin = float(detection[3])\n ymin = float(detection[4])\n xmax = float(detection[5])\n ymax = float(detection[6])\n if conf > 0.6:\n prop = {'coord': (xmin, ymin, xmax, ymax), 'type': obj_type, 'conf': conf}\n props.append(prop)\n if RectQueue.empty():\n RectQueue.put(props)\n del FutureOuts[0]\n\nclass StreamingHandler(server.BaseHTTPRequestHandler):\n def do_GET(self):\n if '/data.html' in self.path:\n strprops = \"ffffd9\"\n if not self.server.DetectQueue.empty():\n props = self.server.DetectQueue.get(False)\n pcnt = 0\n for prop in props:\n strprops += 'Coord = ({0:4f}, {1:4f}, {2:4f}, {3:4f}. ID = {4:d}\\n'.format(\n prop['coord'][0], prop['coord'][1], prop['coord'][2], prop['coord'][3], pcnt)\n pcnt += 1\n strprops += \"ffaaee\"\n content = strprops.encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(content))\n self.end_headers()\n self.wfile.write(content)\n elif '/stream.mjpg' in self.path:\n self.send_response(200)\n self.send_header('Age', 0)\n self.send_header('Cache-Control', 'no-cache, private')\n self.send_header('Pragma', 'no-cache')\n self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')\n self.end_headers()\n while self.server.MJPEGQueue.empty():\n pass\n buf = io.BytesIO(self.server.MJPEGQueue.get())\n try:\n st = time.monotonic()\n cnt = 1\n fps = 0\n ospid = os.getpid()\n while True:\n if not self.server.MJPEGQueue.empty():\n buf = io.BytesIO(self.server.MJPEGQueue.get(False))\n if cnt >= 20:\n fps = cnt/(time.monotonic() - st)\n st = time.monotonic()\n cnt = 1\n print('%d: Streaming MJPEG at %dFPS' % (ospid, fps))\n else:\n cnt += 1\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buf.getvalue()))\n self.end_headers()\n self.wfile.write(buf.getvalue())\n self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from MJPEG %s: %s', self.client_address, str(e))\n else:\n #self.send_response(200)\n #self.send_header('Age', 0)\n #self.send_header('Cache-Control', 'no-cache, private')\n #self.send_header('Pragma', 'no-cache')\n #self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')\n #self.end_headers()\n try:\n st2 = time.monotonic()\n cnt2 = 1\n fps2 = 0\n ospid2 = os.getpid()\n while True:\n if not self.server.H264Queue.empty():\n frame = io.BytesIO(self.server.H264Queue.get(False))\n buf = frame\n if cnt2 >= 20:\n fps2 = cnt2/(time.monotonic() - st2)\n st2 = time.monotonic()\n cnt2 = 1\n print('%d: Streaming H264 at %dFPS' % (ospid2, fps2))\n else:\n cnt2 += 1\n self.wfile.write(buf.getvalue())\n #self.wfile.write(b'\\r\\r')\n except Exception as e:\n print('Removed streaming clients from H264 %s: %s', self.client_address, str(e))\n # else:\n # self.send_error(404)\n # self.end_headers()\n\nclass StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):\n allow_reuse_address = True\n daemon_threads = True\n\ndef server_start(MJPEGQueue, H264Queue, DetectQueue, port, servstop):\n try:\n address = ('', port)\n server = StreamingServer(address, StreamingHandler)\n server.MJPEGQueue = MJPEGQueue\n server.DetectQueue = DetectQueue\n server.H264Queue = H264Queue\n print('Started server')\n server.serve_forever()\n finally:\n servstop.set()\n\nif __name__ == '__main__':\n queueH264 = mp.Queue(1)\n queueMJPEG = mp.Queue(1)\n queueDetectRect = mp.Queue(1)\n stopCapture = mp.Event()\n queueProcessedLow = mp.Queue(1)\n queueProcessedHigh = mp.Queue(1)\n ServerStop = mp.Event()\n capture_proc = mp.Process(target=do_capture, args=(queueH264, queueMJPEG, stopCapture), daemon=True)\n server_proc = mp.Process(target=server_start, args=(queueMJPEG, queueH264, queueDetectRect, 8000, stopCapture), daemon=True)\n detect_proc = mp.Process(target=do_detection, args=(queueMJPEG, queueDetectRect, stopCapture), daemon=True)\n\n capture_proc.start()\n detect_proc.start()\n server_proc.start()\n\n while True:\n if stopCapture.is_set():\n stopCapture.set()\n time.sleep(0.1)\n capture_proc.terminate()\n server_proc.terminate()\n detect_proc.terminate()\n proccessing_proc_lores.terminate()\n break\n time.sleep(1)\n\n",
"step-ids": [
12,
13,
16,
17,
18
]
}
|
[
12,
13,
16,
17,
18
] |
S = input()
T = int(input())
B = abs(S.count('L') - S.count('R')) + abs(S.count('U') - S.count('D'))
print(B + S.count('?') if T == 1 else max(B - S.count('?'), (B - S.count(
'?')) % 2))
|
normal
|
{
"blob_id": "ce263424b856c07e04bd66cda7ebda646583b1fe",
"index": 5962,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(B + S.count('?') if T == 1 else max(B - S.count('?'), (B - S.count(\n '?')) % 2))\n",
"step-3": "S = input()\nT = int(input())\nB = abs(S.count('L') - S.count('R')) + abs(S.count('U') - S.count('D'))\nprint(B + S.count('?') if T == 1 else max(B - S.count('?'), (B - S.count(\n '?')) % 2))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Differences between Python 2 and Python 3
print "hello world"
# become
print("hello world") # in Pyton 3
raw_input('What is your name?')
# become
input('What is your name?') # in Python 3
# the language of Python
# Reserved words
and
as
assert
break
class
continue
def
del
elif
else
except
finally
for
from
global
if
import
in
is
lambda
nonlocal
not
or
pass
raise
return
try
while
with
yield
# Section 1.2
|
normal
|
{
"blob_id": "40471bfcf05ef45fbb070bbb5bfd4c425fe59b1c",
"index": 7523,
"step-1": "# Differences between Python 2 and Python 3\n\nprint \"hello world\" \n # become \nprint(\"hello world\") # in Pyton 3\n\nraw_input('What is your name?') \n# become\ninput('What is your name?') # in Python 3\n\n\n# the language of Python \n# Reserved words\nand\nas\nassert\nbreak\nclass\ncontinue\ndef\ndel\nelif\nelse\nexcept\nfinally\nfor\nfrom\nglobal\nif\nimport\nin\nis\nlambda\nnonlocal\nnot\nor \npass\nraise\nreturn\ntry\nwhile\nwith\nyield\n\n# Section 1.2 ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
clf.fit(X, y)
<|reserved_special_token_0|>
print('prediction = ' + str(prediction) + ', actual = ' + str(actual))
plt.matshow(digits.images[-1])
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
digits = datasets.load_digits()
X, y = digits.data[:-1], digits.target[:-1]
clf = svm.SVC(gamma=0.1, C=100)
clf.fit(X, y)
prediction = clf.predict(digits.data[-1:])
actual = digits.target[-1:]
print('prediction = ' + str(prediction) + ', actual = ' + str(actual))
plt.matshow(digits.images[-1])
plt.show()
<|reserved_special_token_1|>
from sklearn import datasets, svm
import matplotlib.pyplot as plt
digits = datasets.load_digits()
X, y = digits.data[:-1], digits.target[:-1]
clf = svm.SVC(gamma=0.1, C=100)
clf.fit(X, y)
prediction = clf.predict(digits.data[-1:])
actual = digits.target[-1:]
print('prediction = ' + str(prediction) + ', actual = ' + str(actual))
plt.matshow(digits.images[-1])
plt.show()
<|reserved_special_token_1|>
from sklearn import datasets, svm
import matplotlib.pyplot as plt
digits = datasets.load_digits()
X, y = digits.data[:-1], digits.target[:-1]
clf = svm.SVC(gamma=0.1, C=100)
clf.fit(X, y)
prediction = clf.predict(digits.data[-1:])
actual = digits.target[-1:]
print("prediction = " + str(prediction) + ", actual = " + str(actual))
plt.matshow(digits.images[-1])
plt.show()
|
flexible
|
{
"blob_id": "0d98472d1c04bfc52378aa6401a47d96582696a2",
"index": 4046,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nclf.fit(X, y)\n<mask token>\nprint('prediction = ' + str(prediction) + ', actual = ' + str(actual))\nplt.matshow(digits.images[-1])\nplt.show()\n",
"step-3": "<mask token>\ndigits = datasets.load_digits()\nX, y = digits.data[:-1], digits.target[:-1]\nclf = svm.SVC(gamma=0.1, C=100)\nclf.fit(X, y)\nprediction = clf.predict(digits.data[-1:])\nactual = digits.target[-1:]\nprint('prediction = ' + str(prediction) + ', actual = ' + str(actual))\nplt.matshow(digits.images[-1])\nplt.show()\n",
"step-4": "from sklearn import datasets, svm\nimport matplotlib.pyplot as plt\ndigits = datasets.load_digits()\nX, y = digits.data[:-1], digits.target[:-1]\nclf = svm.SVC(gamma=0.1, C=100)\nclf.fit(X, y)\nprediction = clf.predict(digits.data[-1:])\nactual = digits.target[-1:]\nprint('prediction = ' + str(prediction) + ', actual = ' + str(actual))\nplt.matshow(digits.images[-1])\nplt.show()\n",
"step-5": "from sklearn import datasets, svm\nimport matplotlib.pyplot as plt\n\ndigits = datasets.load_digits()\nX, y = digits.data[:-1], digits.target[:-1]\n\nclf = svm.SVC(gamma=0.1, C=100)\nclf.fit(X, y)\n\nprediction = clf.predict(digits.data[-1:])\nactual = digits.target[-1:]\nprint(\"prediction = \" + str(prediction) + \", actual = \" + str(actual))\n\nplt.matshow(digits.images[-1])\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import queue
import copy
import heapq
import sys
sys.setrecursionlimit(100000)
dx =[1,0,0,-1]
dy=[0,1,-1,0]
class PriorityQueue:
pq=[]
elements={}
task=0
def insert(self , priority,x_val,y_val):
entry = [priority, self.task,x_val,y_val]
self.elements[self.task]=entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self,task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val , y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val , y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where , out_list,ans,row,col):
f = open( where + "_output.txt", 'w')
for i in range(1,row+1):
for j in range(1,col+1):
data ="%d " %out_list[i][j]
f.write(data)
f.write("\n")
f.write("---\n")
data2 = "length = %d\n" %ans[0]
f.write(data2)
data3 = "time = %d" %ans[1]
f.write(data3)
f.close()
def text_info(where):
f = open("./input/" + where+".txt" , 'r')
line = f.readline()
line = line.replace("\n", "")
result = line.split(" ")
a=[int(result[0]),int(result[1]),int(result[2])]
return a
def text_read(where,row,col):
f = open("./input/"+where+".txt", 'r')
line = f.readline()
list1 = [[0 for cols in range(col + 1)] for rows in range(row + 1)]
a = 1
line2 = f.readline()
while line2:
line2 = line2.replace("\n", "")
result2 = line2.split(" ")
for v in range(col):
list1[a][v + 1] = int(result2[v])
line2 = f.readline()
a += 1
f.close()
return list1
def position_check(pos , out_list , row , col):
for r in range(1,row+1):
for c in range(1,col+1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c] == 4:
pos.append([r,c])
return pos
def bfs(start ,end, out_list , row , col , ans):
des = [[0 for c in range(col+1)] for r in range(row+1)]
visit = [[0 for c in range(col+1)] for r in range(row+1)]
q = queue.Queue()
q.put(start)
visit[start[0]][start[1]]=1;
des[start[0]][start[1]]=0;
ans[1] +=1
while not q.empty():
if visit[end[0]][end[1]] ==1:
break
cur_task = q.get()
x=cur_task[0]
y=cur_task[1]
for k in range (4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <=row and ny >=1 and ny<=col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] =1
des[nx][ny] = des[x][y] +1
q.put([nx,ny])
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0],end[1]]
for n in range(num,0,-1):
tx=target[0]
ty=target[1]
out_list[tx][ty]=5
for k in range(4):
ntx=tx+dx[k]
nty=ty+dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n-1:
target=[ntx,nty]
return out_list
def IDS(start , end , out_list , row , col , ans):
des = [[0 for c in range(col + 1)] for r in range(row + 1)]
find=[0]
limit = 0
while find[0] != 1:
limit +=1
visit = [[0 for c in range(col + 1)] for r in range(row + 1)]
des[start[0]][start[1]] = 0;
visit[start[0]][start[1]] = 1
dfs(start, end, out_list, row, col, ans, limit, des, visit, find)
ans[0] += limit
num=limit
target = [end[0],end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def dfs(start , end , out_list , row , col ,ans , limit,des,visit,find):
if visit[end[0]][end[1]] == 1:
find[0]=1
return
x=start[0]
y=start[1]
for k in range(4):
nx = x+dx[k]
ny=y+dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
if des[x][y]+1 <=limit:
visit[nx][ny]=1
des[nx][ny] = des[x][y]+1
next_start=[nx,ny]
ans[1]+=1
dfs(next_start , end , out_list , row , col , ans , limit, des , visit,find)
def astar(start , end , out_list , row , col , ans):
des = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1;
des[start[0]][start[1]] = 0;
pq2 = PriorityQueue()
while pq2.size() !=0:
pq2.pop()
manhattan_d = abs(start[0]-end[0])+abs(start[1]-end[1])
pq2.insert(manhattan_d,start[0],start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny]=1
des[nx][ny]=des[x_val][y_val]+1
d=abs(nx-end[0])+abs(ny-end[1])+des[nx][ny]
pq2.insert(d,nx,ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num,0,-1):
tx=target[0]
ty=target[1]
out_list[tx][ty]=5
for k in range(4):
ntx=tx+dx[k]
nty=ty+dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n-1:
target=[ntx,nty]
return out_list
def greedy(start , end , out_list , row , col , ans):
des = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1;
des[start[0]][start[1]] = 0;
pq2 = PriorityQueue()
while pq2.size() !=0:
pq2.pop()
manhattan_d = abs(start[0]-end[0])+abs(start[1]-end[1])
pq2.insert(manhattan_d,start[0],start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny]=1
des[nx][ny]=des[x_val][y_val]+1
d=abs(nx-end[0])+abs(ny-end[1])
pq2.insert(d,nx,ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num,0,-1):
tx=target[0]
ty=target[1]
out_list[tx][ty]=5
for k in range(4):
ntx=tx+dx[k]
nty=ty+dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n-1:
target=[ntx,nty]
return out_list
def test_floor():
where = "test1"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where , row , col)
pos = []
pos = position_check(pos,out_list, row , col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans=[0,0]
#path1=bfs(pos[0],pos[1],deepcopy_copy1,row,col,ans)
#path2=bfs(pos[1],pos[2],deepcopy_copy2,row,col,ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0],pos[1],deepcopy_copy1,row,col,ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0],pos[1],deepcopy_copy1,row,col,ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def fifth_floor():
where = "fifth_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def forth_floor():
where = "fourth_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def third_floor():
where = "third_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def second_floor():
where = "second_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def first_floor():
where = "first_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
#test_floor()
fifth_floor()
forth_floor()
third_floor()
second_floor()
first_floor()
|
normal
|
{
"blob_id": "6192099bdecffd9ce3576f4034567478145115a0",
"index": 1291,
"step-1": "<mask token>\n\n\nclass PriorityQueue:\n pq = []\n elements = {}\n task = 0\n\n def insert(self, priority, x_val, y_val):\n entry = [priority, self.task, x_val, y_val]\n self.elements[self.task] = entry\n heapq.heappush(self.pq, entry)\n self.task += 1\n\n def delete(self, task):\n entry = self.elements[task]\n entry[-1] = None\n\n def pop(self):\n while self.pq:\n priority, task, x_val, y_val = heapq.heappop(self.pq)\n if task != None:\n del self.elements[task]\n return priority, x_val, y_val\n raise KeyError('Pop from an empty Priority Queue')\n\n def size(self):\n return len(self.elements)\n\n\ndef text_write(where, out_list, ans, row, col):\n f = open(where + '_output.txt', 'w')\n for i in range(1, row + 1):\n for j in range(1, col + 1):\n data = '%d ' % out_list[i][j]\n f.write(data)\n f.write('\\n')\n f.write('---\\n')\n data2 = 'length = %d\\n' % ans[0]\n f.write(data2)\n data3 = 'time = %d' % ans[1]\n f.write(data3)\n f.close()\n\n\n<mask token>\n\n\ndef position_check(pos, out_list, row, col):\n for r in range(1, row + 1):\n for c in range(1, col + 1):\n if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c\n ] == 4:\n pos.append([r, c])\n return pos\n\n\n<mask token>\n\n\ndef dfs(start, end, out_list, row, col, ans, limit, des, visit, find):\n if visit[end[0]][end[1]] == 1:\n find[0] = 1\n return\n x = start[0]\n y = start[1]\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny\n ] != 1:\n if visit[nx][ny] != 1:\n if des[x][y] + 1 <= limit:\n visit[nx][ny] = 1\n des[nx][ny] = des[x][y] + 1\n next_start = [nx, ny]\n ans[1] += 1\n dfs(next_start, end, out_list, row, col, ans, limit,\n des, visit, find)\n\n\ndef astar(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef greedy(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1])\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\n<mask token>\n\n\ndef forth_floor():\n where = 'fourth_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PriorityQueue:\n pq = []\n elements = {}\n task = 0\n\n def insert(self, priority, x_val, y_val):\n entry = [priority, self.task, x_val, y_val]\n self.elements[self.task] = entry\n heapq.heappush(self.pq, entry)\n self.task += 1\n\n def delete(self, task):\n entry = self.elements[task]\n entry[-1] = None\n\n def pop(self):\n while self.pq:\n priority, task, x_val, y_val = heapq.heappop(self.pq)\n if task != None:\n del self.elements[task]\n return priority, x_val, y_val\n raise KeyError('Pop from an empty Priority Queue')\n\n def size(self):\n return len(self.elements)\n\n\ndef text_write(where, out_list, ans, row, col):\n f = open(where + '_output.txt', 'w')\n for i in range(1, row + 1):\n for j in range(1, col + 1):\n data = '%d ' % out_list[i][j]\n f.write(data)\n f.write('\\n')\n f.write('---\\n')\n data2 = 'length = %d\\n' % ans[0]\n f.write(data2)\n data3 = 'time = %d' % ans[1]\n f.write(data3)\n f.close()\n\n\ndef text_info(where):\n f = open('./input/' + where + '.txt', 'r')\n line = f.readline()\n line = line.replace('\\n', '')\n result = line.split(' ')\n a = [int(result[0]), int(result[1]), int(result[2])]\n return a\n\n\n<mask token>\n\n\ndef position_check(pos, out_list, row, col):\n for r in range(1, row + 1):\n for c in range(1, col + 1):\n if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c\n ] == 4:\n pos.append([r, c])\n return pos\n\n\n<mask token>\n\n\ndef IDS(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n find = [0]\n limit = 0\n while find[0] != 1:\n limit += 1\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n des[start[0]][start[1]] = 0\n visit[start[0]][start[1]] = 1\n dfs(start, end, out_list, row, col, ans, limit, des, visit, find)\n ans[0] += limit\n num = limit\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef dfs(start, end, out_list, row, col, ans, limit, des, visit, find):\n if visit[end[0]][end[1]] == 1:\n find[0] = 1\n return\n x = start[0]\n y = start[1]\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny\n ] != 1:\n if visit[nx][ny] != 1:\n if des[x][y] + 1 <= limit:\n visit[nx][ny] = 1\n des[nx][ny] = des[x][y] + 1\n next_start = [nx, ny]\n ans[1] += 1\n dfs(next_start, end, out_list, row, col, ans, limit,\n des, visit, find)\n\n\ndef astar(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef greedy(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1])\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\n<mask token>\n\n\ndef forth_floor():\n where = 'fourth_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef third_floor():\n where = 'third_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef second_floor():\n where = 'second_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef first_floor():\n where = 'first_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PriorityQueue:\n pq = []\n elements = {}\n task = 0\n\n def insert(self, priority, x_val, y_val):\n entry = [priority, self.task, x_val, y_val]\n self.elements[self.task] = entry\n heapq.heappush(self.pq, entry)\n self.task += 1\n\n def delete(self, task):\n entry = self.elements[task]\n entry[-1] = None\n\n def pop(self):\n while self.pq:\n priority, task, x_val, y_val = heapq.heappop(self.pq)\n if task != None:\n del self.elements[task]\n return priority, x_val, y_val\n raise KeyError('Pop from an empty Priority Queue')\n\n def size(self):\n return len(self.elements)\n\n\ndef text_write(where, out_list, ans, row, col):\n f = open(where + '_output.txt', 'w')\n for i in range(1, row + 1):\n for j in range(1, col + 1):\n data = '%d ' % out_list[i][j]\n f.write(data)\n f.write('\\n')\n f.write('---\\n')\n data2 = 'length = %d\\n' % ans[0]\n f.write(data2)\n data3 = 'time = %d' % ans[1]\n f.write(data3)\n f.close()\n\n\ndef text_info(where):\n f = open('./input/' + where + '.txt', 'r')\n line = f.readline()\n line = line.replace('\\n', '')\n result = line.split(' ')\n a = [int(result[0]), int(result[1]), int(result[2])]\n return a\n\n\ndef text_read(where, row, col):\n f = open('./input/' + where + '.txt', 'r')\n line = f.readline()\n list1 = [[(0) for cols in range(col + 1)] for rows in range(row + 1)]\n a = 1\n line2 = f.readline()\n while line2:\n line2 = line2.replace('\\n', '')\n result2 = line2.split(' ')\n for v in range(col):\n list1[a][v + 1] = int(result2[v])\n line2 = f.readline()\n a += 1\n f.close()\n return list1\n\n\ndef position_check(pos, out_list, row, col):\n for r in range(1, row + 1):\n for c in range(1, col + 1):\n if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c\n ] == 4:\n pos.append([r, c])\n return pos\n\n\n<mask token>\n\n\ndef IDS(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n find = [0]\n limit = 0\n while find[0] != 1:\n limit += 1\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n des[start[0]][start[1]] = 0\n visit[start[0]][start[1]] = 1\n dfs(start, end, out_list, row, col, ans, limit, des, visit, find)\n ans[0] += limit\n num = limit\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef dfs(start, end, out_list, row, col, ans, limit, des, visit, find):\n if visit[end[0]][end[1]] == 1:\n find[0] = 1\n return\n x = start[0]\n y = start[1]\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny\n ] != 1:\n if visit[nx][ny] != 1:\n if des[x][y] + 1 <= limit:\n visit[nx][ny] = 1\n des[nx][ny] = des[x][y] + 1\n next_start = [nx, ny]\n ans[1] += 1\n dfs(next_start, end, out_list, row, col, ans, limit,\n des, visit, find)\n\n\ndef astar(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef greedy(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1])\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\n<mask token>\n\n\ndef forth_floor():\n where = 'fourth_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef third_floor():\n where = 'third_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef second_floor():\n where = 'second_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef first_floor():\n where = 'first_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\n<mask token>\n",
"step-4": "<mask token>\nsys.setrecursionlimit(100000)\ndx = [1, 0, 0, -1]\ndy = [0, 1, -1, 0]\n\n\nclass PriorityQueue:\n pq = []\n elements = {}\n task = 0\n\n def insert(self, priority, x_val, y_val):\n entry = [priority, self.task, x_val, y_val]\n self.elements[self.task] = entry\n heapq.heappush(self.pq, entry)\n self.task += 1\n\n def delete(self, task):\n entry = self.elements[task]\n entry[-1] = None\n\n def pop(self):\n while self.pq:\n priority, task, x_val, y_val = heapq.heappop(self.pq)\n if task != None:\n del self.elements[task]\n return priority, x_val, y_val\n raise KeyError('Pop from an empty Priority Queue')\n\n def size(self):\n return len(self.elements)\n\n\ndef text_write(where, out_list, ans, row, col):\n f = open(where + '_output.txt', 'w')\n for i in range(1, row + 1):\n for j in range(1, col + 1):\n data = '%d ' % out_list[i][j]\n f.write(data)\n f.write('\\n')\n f.write('---\\n')\n data2 = 'length = %d\\n' % ans[0]\n f.write(data2)\n data3 = 'time = %d' % ans[1]\n f.write(data3)\n f.close()\n\n\ndef text_info(where):\n f = open('./input/' + where + '.txt', 'r')\n line = f.readline()\n line = line.replace('\\n', '')\n result = line.split(' ')\n a = [int(result[0]), int(result[1]), int(result[2])]\n return a\n\n\ndef text_read(where, row, col):\n f = open('./input/' + where + '.txt', 'r')\n line = f.readline()\n list1 = [[(0) for cols in range(col + 1)] for rows in range(row + 1)]\n a = 1\n line2 = f.readline()\n while line2:\n line2 = line2.replace('\\n', '')\n result2 = line2.split(' ')\n for v in range(col):\n list1[a][v + 1] = int(result2[v])\n line2 = f.readline()\n a += 1\n f.close()\n return list1\n\n\ndef position_check(pos, out_list, row, col):\n for r in range(1, row + 1):\n for c in range(1, col + 1):\n if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c\n ] == 4:\n pos.append([r, c])\n return pos\n\n\ndef bfs(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n q = queue.Queue()\n q.put(start)\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n ans[1] += 1\n while not q.empty():\n if visit[end[0]][end[1]] == 1:\n break\n cur_task = q.get()\n x = cur_task[0]\n y = cur_task[1]\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x][y] + 1\n q.put([nx, ny])\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef IDS(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n find = [0]\n limit = 0\n while find[0] != 1:\n limit += 1\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n des[start[0]][start[1]] = 0\n visit[start[0]][start[1]] = 1\n dfs(start, end, out_list, row, col, ans, limit, des, visit, find)\n ans[0] += limit\n num = limit\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef dfs(start, end, out_list, row, col, ans, limit, des, visit, find):\n if visit[end[0]][end[1]] == 1:\n find[0] = 1\n return\n x = start[0]\n y = start[1]\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny\n ] != 1:\n if visit[nx][ny] != 1:\n if des[x][y] + 1 <= limit:\n visit[nx][ny] = 1\n des[nx][ny] = des[x][y] + 1\n next_start = [nx, ny]\n ans[1] += 1\n dfs(next_start, end, out_list, row, col, ans, limit,\n des, visit, find)\n\n\ndef astar(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef greedy(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1])\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef test_floor():\n where = 'test1'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef fifth_floor():\n where = 'fifth_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef forth_floor():\n where = 'fourth_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef third_floor():\n where = 'third_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef second_floor():\n where = 'second_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef first_floor():\n where = 'first_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\nfifth_floor()\nforth_floor()\nthird_floor()\nsecond_floor()\nfirst_floor()\n",
"step-5": "import queue\nimport copy\nimport heapq\nimport sys\nsys.setrecursionlimit(100000)\n\ndx =[1,0,0,-1]\ndy=[0,1,-1,0]\n\nclass PriorityQueue:\n pq=[]\n elements={}\n task=0\n\n def insert(self , priority,x_val,y_val):\n entry = [priority, self.task,x_val,y_val]\n self.elements[self.task]=entry\n heapq.heappush(self.pq, entry)\n self.task += 1\n\n def delete(self,task):\n entry = self.elements[task]\n entry[-1] = None\n\n def pop(self):\n while self.pq:\n priority, task, x_val , y_val = heapq.heappop(self.pq)\n if task != None:\n del self.elements[task]\n return priority, x_val , y_val\n raise KeyError('Pop from an empty Priority Queue')\n\n def size(self):\n return len(self.elements)\n\n\n\ndef text_write(where , out_list,ans,row,col):\n f = open( where + \"_output.txt\", 'w')\n\n for i in range(1,row+1):\n for j in range(1,col+1):\n data =\"%d \" %out_list[i][j]\n f.write(data)\n f.write(\"\\n\")\n f.write(\"---\\n\")\n data2 = \"length = %d\\n\" %ans[0]\n f.write(data2)\n data3 = \"time = %d\" %ans[1]\n f.write(data3)\n f.close()\n\n\n\ndef text_info(where):\n f = open(\"./input/\" + where+\".txt\" , 'r')\n line = f.readline()\n line = line.replace(\"\\n\", \"\")\n result = line.split(\" \")\n a=[int(result[0]),int(result[1]),int(result[2])]\n return a\n\ndef text_read(where,row,col):\n f = open(\"./input/\"+where+\".txt\", 'r')\n line = f.readline()\n list1 = [[0 for cols in range(col + 1)] for rows in range(row + 1)]\n a = 1\n line2 = f.readline()\n while line2:\n line2 = line2.replace(\"\\n\", \"\")\n result2 = line2.split(\" \")\n for v in range(col):\n list1[a][v + 1] = int(result2[v])\n line2 = f.readline()\n a += 1\n f.close()\n return list1\n\ndef position_check(pos , out_list , row , col):\n for r in range(1,row+1):\n for c in range(1,col+1):\n if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c] == 4:\n pos.append([r,c])\n return pos\n\ndef bfs(start ,end, out_list , row , col , ans):\n des = [[0 for c in range(col+1)] for r in range(row+1)]\n visit = [[0 for c in range(col+1)] for r in range(row+1)]\n q = queue.Queue()\n q.put(start)\n visit[start[0]][start[1]]=1;\n des[start[0]][start[1]]=0;\n ans[1] +=1\n while not q.empty():\n if visit[end[0]][end[1]] ==1:\n break\n cur_task = q.get()\n x=cur_task[0]\n y=cur_task[1]\n for k in range (4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <=row and ny >=1 and ny<=col and out_list[nx][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] =1\n des[nx][ny] = des[x][y] +1\n q.put([nx,ny])\n ans[1] += 1\n\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0],end[1]]\n\n for n in range(num,0,-1):\n tx=target[0]\n ty=target[1]\n out_list[tx][ty]=5\n for k in range(4):\n ntx=tx+dx[k]\n nty=ty+dy[k]\n if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:\n if des[ntx][nty] == n-1:\n target=[ntx,nty]\n return out_list\n\ndef IDS(start , end , out_list , row , col , ans):\n des = [[0 for c in range(col + 1)] for r in range(row + 1)]\n find=[0]\n limit = 0\n while find[0] != 1:\n limit +=1\n visit = [[0 for c in range(col + 1)] for r in range(row + 1)]\n des[start[0]][start[1]] = 0;\n visit[start[0]][start[1]] = 1\n\n dfs(start, end, out_list, row, col, ans, limit, des, visit, find)\n\n ans[0] += limit\n num=limit\n target = [end[0],end[1]]\n\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n\n return out_list\n\ndef dfs(start , end , out_list , row , col ,ans , limit,des,visit,find):\n if visit[end[0]][end[1]] == 1:\n find[0]=1\n return\n x=start[0]\n y=start[1]\n for k in range(4):\n nx = x+dx[k]\n ny=y+dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:\n if visit[nx][ny] != 1:\n if des[x][y]+1 <=limit:\n visit[nx][ny]=1\n des[nx][ny] = des[x][y]+1\n next_start=[nx,ny]\n ans[1]+=1\n dfs(next_start , end , out_list , row , col , ans , limit, des , visit,find)\n\ndef astar(start , end , out_list , row , col , ans):\n des = [[0 for c in range(col + 1)] for r in range(row + 1)]\n visit = [[0 for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1;\n des[start[0]][start[1]] = 0;\n\n pq2 = PriorityQueue()\n while pq2.size() !=0:\n pq2.pop()\n manhattan_d = abs(start[0]-end[0])+abs(start[1]-end[1])\n pq2.insert(manhattan_d,start[0],start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny]=1\n des[nx][ny]=des[x_val][y_val]+1\n d=abs(nx-end[0])+abs(ny-end[1])+des[nx][ny]\n pq2.insert(d,nx,ny)\n ans[1] += 1\n\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n\n for n in range(num,0,-1):\n tx=target[0]\n ty=target[1]\n out_list[tx][ty]=5\n for k in range(4):\n ntx=tx+dx[k]\n nty=ty+dy[k]\n if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:\n if des[ntx][nty] == n-1:\n target=[ntx,nty]\n return out_list\n\ndef greedy(start , end , out_list , row , col , ans):\n des = [[0 for c in range(col + 1)] for r in range(row + 1)]\n visit = [[0 for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1;\n des[start[0]][start[1]] = 0;\n\n pq2 = PriorityQueue()\n while pq2.size() !=0:\n pq2.pop()\n manhattan_d = abs(start[0]-end[0])+abs(start[1]-end[1])\n pq2.insert(manhattan_d,start[0],start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny]=1\n des[nx][ny]=des[x_val][y_val]+1\n d=abs(nx-end[0])+abs(ny-end[1])\n pq2.insert(d,nx,ny)\n ans[1] += 1\n\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n\n for n in range(num,0,-1):\n tx=target[0]\n ty=target[1]\n out_list[tx][ty]=5\n for k in range(4):\n ntx=tx+dx[k]\n nty=ty+dy[k]\n if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:\n if des[ntx][nty] == n-1:\n target=[ntx,nty]\n return out_list\n\n\ndef test_floor():\n where = \"test1\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where , row , col)\n pos = []\n pos = position_check(pos,out_list, row , col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans=[0,0]\n\n #path1=bfs(pos[0],pos[1],deepcopy_copy1,row,col,ans)\n #path2=bfs(pos[1],pos[2],deepcopy_copy2,row,col,ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0],pos[1],deepcopy_copy1,row,col,ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0],pos[1],deepcopy_copy1,row,col,ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\n\ndef fifth_floor():\n where = \"fifth_floor\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n #path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\n\ndef forth_floor():\n where = \"fourth_floor\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n\n #path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\n\ndef third_floor():\n where = \"third_floor\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n\n #path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\ndef second_floor():\n where = \"second_floor\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n\n #path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\ndef first_floor():\n where = \"first_floor\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n\n #path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\n#test_floor()\nfifth_floor()\nforth_floor()\nthird_floor()\nsecond_floor()\nfirst_floor()\n\n",
"step-ids": [
12,
17,
18,
23,
25
]
}
|
[
12,
17,
18,
23,
25
] |
# -*- coding: utf-8 -*-
from django.test import TestCase
from ..printer import Printer
class TestSunlumoProjectPrinter(TestCase):
def test_printer(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
tmpFile = '/tmp/printtmp'
sl_prj.printToPdf({
'tmpFile': tmpFile, 'layout': 'test_layout',
'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines', 'points'],
'transparencies': [50, 0, 0]
})
with open(tmpFile + '.pdf', 'rb') as pdfFile:
# we just want to test if the PDF file in not blank
data = pdfFile.read()
self.assertEqual(len(data), 426652)
def test_printer_missing_required_params(self):
sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')
with self.assertRaises(RuntimeError):
sl_prj.printToPdf({})
|
normal
|
{
"blob_id": "5e0cba6952cdc677c640a0df325426ffc89189cd",
"index": 658,
"step-1": "<mask token>\n\n\nclass TestSunlumoProjectPrinter(TestCase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestSunlumoProjectPrinter(TestCase):\n\n def test_printer(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n tmpFile = '/tmp/printtmp'\n sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',\n 'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',\n 'points'], 'transparencies': [50, 0, 0]})\n with open(tmpFile + '.pdf', 'rb') as pdfFile:\n data = pdfFile.read()\n self.assertEqual(len(data), 426652)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestSunlumoProjectPrinter(TestCase):\n\n def test_printer(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n tmpFile = '/tmp/printtmp'\n sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',\n 'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',\n 'points'], 'transparencies': [50, 0, 0]})\n with open(tmpFile + '.pdf', 'rb') as pdfFile:\n data = pdfFile.read()\n self.assertEqual(len(data), 426652)\n\n def test_printer_missing_required_params(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n with self.assertRaises(RuntimeError):\n sl_prj.printToPdf({})\n",
"step-4": "from django.test import TestCase\nfrom ..printer import Printer\n\n\nclass TestSunlumoProjectPrinter(TestCase):\n\n def test_printer(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n tmpFile = '/tmp/printtmp'\n sl_prj.printToPdf({'tmpFile': tmpFile, 'layout': 'test_layout',\n 'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines',\n 'points'], 'transparencies': [50, 0, 0]})\n with open(tmpFile + '.pdf', 'rb') as pdfFile:\n data = pdfFile.read()\n self.assertEqual(len(data), 426652)\n\n def test_printer_missing_required_params(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n with self.assertRaises(RuntimeError):\n sl_prj.printToPdf({})\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\n\nfrom ..printer import Printer\n\n\nclass TestSunlumoProjectPrinter(TestCase):\n def test_printer(self):\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n\n tmpFile = '/tmp/printtmp'\n sl_prj.printToPdf({\n 'tmpFile': tmpFile, 'layout': 'test_layout',\n 'bbox': [-2, -2, 2, 2], 'layers': ['polygons', 'lines', 'points'],\n 'transparencies': [50, 0, 0]\n })\n\n with open(tmpFile + '.pdf', 'rb') as pdfFile:\n # we just want to test if the PDF file in not blank\n data = pdfFile.read()\n self.assertEqual(len(data), 426652)\n\n def test_printer_missing_required_params(self):\n\n sl_prj = Printer('./sunlumo_mapserver/test_data/test_sunlumo.qgs')\n\n with self.assertRaises(RuntimeError):\n sl_prj.printToPdf({})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with ADBConnection('a95x01', domain='dohmens.local') as conn:
print(conn.conn)
<|reserved_special_token_1|>
from adb_local_installer.connection import ADBConnection
with ADBConnection('a95x01', domain='dohmens.local') as conn:
print(conn.conn)
<|reserved_special_token_1|>
from adb_local_installer.connection import ADBConnection
with ADBConnection("a95x01", domain="dohmens.local") as conn:
print(conn.conn)
|
flexible
|
{
"blob_id": "6f583fde0eeab84984629b795e428300503a49c9",
"index": 9852,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith ADBConnection('a95x01', domain='dohmens.local') as conn:\n print(conn.conn)\n",
"step-3": "from adb_local_installer.connection import ADBConnection\nwith ADBConnection('a95x01', domain='dohmens.local') as conn:\n print(conn.conn)\n",
"step-4": "from adb_local_installer.connection import ADBConnection\n\n\nwith ADBConnection(\"a95x01\", domain=\"dohmens.local\") as conn:\n print(conn.conn)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, '/path/to/mm-api/python')
sys.path.insert(0, '/path/to/mm-api/distrib/python_osx')
print(sys.path)
<|reserved_special_token_0|>
remote.connect()
<|reserved_special_token_0|>
remote.shutdown()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, '/path/to/mm-api/python')
sys.path.insert(0, '/path/to/mm-api/distrib/python_osx')
print(sys.path)
<|reserved_special_token_0|>
examples_dir = '/dir/of/models/'
part_filename1 = os.path.join(examples_dir, 'model1.stl')
part_filename2 = os.path.join(examples_dir, 'model2.stl')
remote = mmRemote()
remote.connect()
cmd = mmapi.StoredCommands()
new_obj1 = mm.append_objects_from_file(remote, part_filename1)
new_obj1 = mm.append_objects_from_file(remote, part_filename2)
remote.shutdown()
<|reserved_special_token_1|>
import os
import sys
sys.path.insert(0, '/path/to/mm-api/python')
sys.path.insert(0, '/path/to/mm-api/distrib/python_osx')
print(sys.path)
import mmapi
from mmRemote import *
import mm
examples_dir = '/dir/of/models/'
part_filename1 = os.path.join(examples_dir, 'model1.stl')
part_filename2 = os.path.join(examples_dir, 'model2.stl')
remote = mmRemote()
remote.connect()
cmd = mmapi.StoredCommands()
new_obj1 = mm.append_objects_from_file(remote, part_filename1)
new_obj1 = mm.append_objects_from_file(remote, part_filename2)
remote.shutdown()
<|reserved_special_token_1|>
import os
import sys
sys.path.insert(0, "/path/to/mm-api/python")
sys.path.insert(0, "/path/to/mm-api/distrib/python_osx")
print(sys.path)
import mmapi
from mmRemote import *
import mm;
# assumption: we are running
examples_dir = "/dir/of/models/"
part_filename1 = os.path.join( examples_dir, "model1.stl" )
part_filename2 = os.path.join( examples_dir, "model2.stl" )
# initialize connection
remote = mmRemote()
remote.connect()
cmd = mmapi.StoredCommands()
new_obj1 = mm.append_objects_from_file(remote, part_filename1);
new_obj1 = mm.append_objects_from_file(remote, part_filename2);
#done!
remote.shutdown()
|
flexible
|
{
"blob_id": "bf6d1ddf66bc0d54320c0491e344925a5f507df7",
"index": 861,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, '/path/to/mm-api/python')\nsys.path.insert(0, '/path/to/mm-api/distrib/python_osx')\nprint(sys.path)\n<mask token>\nremote.connect()\n<mask token>\nremote.shutdown()\n",
"step-3": "<mask token>\nsys.path.insert(0, '/path/to/mm-api/python')\nsys.path.insert(0, '/path/to/mm-api/distrib/python_osx')\nprint(sys.path)\n<mask token>\nexamples_dir = '/dir/of/models/'\npart_filename1 = os.path.join(examples_dir, 'model1.stl')\npart_filename2 = os.path.join(examples_dir, 'model2.stl')\nremote = mmRemote()\nremote.connect()\ncmd = mmapi.StoredCommands()\nnew_obj1 = mm.append_objects_from_file(remote, part_filename1)\nnew_obj1 = mm.append_objects_from_file(remote, part_filename2)\nremote.shutdown()\n",
"step-4": "import os\nimport sys\nsys.path.insert(0, '/path/to/mm-api/python')\nsys.path.insert(0, '/path/to/mm-api/distrib/python_osx')\nprint(sys.path)\nimport mmapi\nfrom mmRemote import *\nimport mm\nexamples_dir = '/dir/of/models/'\npart_filename1 = os.path.join(examples_dir, 'model1.stl')\npart_filename2 = os.path.join(examples_dir, 'model2.stl')\nremote = mmRemote()\nremote.connect()\ncmd = mmapi.StoredCommands()\nnew_obj1 = mm.append_objects_from_file(remote, part_filename1)\nnew_obj1 = mm.append_objects_from_file(remote, part_filename2)\nremote.shutdown()\n",
"step-5": "import os\nimport sys\nsys.path.insert(0, \"/path/to/mm-api/python\")\nsys.path.insert(0, \"/path/to/mm-api/distrib/python_osx\")\nprint(sys.path)\n\n\nimport mmapi\nfrom mmRemote import *\nimport mm;\n\n# assumption: we are running\nexamples_dir = \"/dir/of/models/\"\npart_filename1 = os.path.join( examples_dir, \"model1.stl\" )\npart_filename2 = os.path.join( examples_dir, \"model2.stl\" )\n\n# initialize connection\nremote = mmRemote()\nremote.connect()\n\ncmd = mmapi.StoredCommands()\n\n\nnew_obj1 = mm.append_objects_from_file(remote, part_filename1);\nnew_obj1 = mm.append_objects_from_file(remote, part_filename2);\n\n#done!\nremote.shutdown()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def run_train(model_name, base_trainer, X, y):
cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)
trainer = RSACVTrainer(cv, base_trainer)
trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)
models = trainer.get_model()
dump_pickle(models, f'../data/titanic/model/{model_name}_models.pkl')
oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)
target = trainer.get_cv_targets()
metric = accuracy_score(target, oof)
return trainer, metric
def eval_single_model(trainer, data, target):
pred = np.array(trainer.predict(data)).T
ensembler = SimpleAgerageEnsember()
pred_avg = ensembler.predict(pred)
pred_avg = np.where(pred_avg > 0.5, 1, 0)
score = accuracy_score(target, pred_avg)
return score
def save_importance(model_name, trainer):
name, mean_importance, std_importance = trainer.get_importance()
fig = plot_importance(name, mean_importance, std_importance)
fig.savefig(f'../data/titanic/working/{model_name}_importance.png')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_train(model_name, base_trainer, X, y):
cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)
trainer = RSACVTrainer(cv, base_trainer)
trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)
models = trainer.get_model()
dump_pickle(models, f'../data/titanic/model/{model_name}_models.pkl')
oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)
target = trainer.get_cv_targets()
metric = accuracy_score(target, oof)
return trainer, metric
def eval_single_model(trainer, data, target):
pred = np.array(trainer.predict(data)).T
ensembler = SimpleAgerageEnsember()
pred_avg = ensembler.predict(pred)
pred_avg = np.where(pred_avg > 0.5, 1, 0)
score = accuracy_score(target, pred_avg)
return score
def save_importance(model_name, trainer):
name, mean_importance, std_importance = trainer.get_importance()
fig = plot_importance(name, mean_importance, std_importance)
fig.savefig(f'../data/titanic/working/{model_name}_importance.png')
def main():
src_dir = '../data/titanic/feature/'
feature_files = config.FeatureList.features
feature_files = [os.path.join(src_dir, f'{filename}.pkl') for filename in
feature_files]
X = load_feature(feature_files)
y = load_pickle(os.path.join(src_dir, 'target.pkl'))
print(X.head())
print(y.head())
X_train, X_eval, y_train, y_eval = train_test_split(X, y, test_size=0.2,
random_state=config.SEED, stratify=y)
cv_metrics = {}
eval_metrics = {}
base_trainers = {'LGBM': LGBMTrainer(config.LightgbmParams.params,
config.LightgbmParams.train_params)}
fit_trainers = {}
for model_name, base_trainer in base_trainers.items():
trainer, metric = run_train(model_name, base_trainer, X_train, y_train)
fit_trainers[model_name] = trainer
cv_metrics[model_name] = metric
eval_metrics[model_name] = eval_single_model(trainer, X_eval, y_eval)
save_importance(model_name, trainer)
pred_first = []
for model_name, _trainer in fit_trainers.items():
pred_first.append(np.array(_trainer.predict(X_eval)).T)
pred_first = np.concatenate(pred_first, axis=1)
pred_first = pd.DataFrame(pred_first)
base_trainer = SklearnRegressionTrainer(model=Ridge(random_state=config
.SEED))
trainer, metric = run_train('stack_ridge', base_trainer, pred_first, y_eval
)
eval_metrics['Stack'] = metric
for model_name, metric in cv_metrics.items():
print(f'{model_name:>8} CV Metric: {metric:.08f}')
for model_name, metric in eval_metrics.items():
print(f'{model_name:>8} Eval Metric: {metric:.08f}')
if config.DEBUG is not True:
writer = MlflowWriter(config.MLflowConfig.experiment_name,
tracking_uri=os.path.abspath('../mlruns'))
writer.set_run_name(config.MLflowConfig.run_name)
writer.set_note_content(config.MLflowConfig.experiment_note)
writer.log_param('Feature', ', '.join(config.FeatureList.features))
writer.log_param('SEED', config.SEED)
writer.log_param('NUM_SEED', config.NUM_SEED)
writer.log_param('LGBM_params', {'params': config.LightgbmParams.
params, 'train_params': config.LightgbmParams.train_params})
for model_name, _metric in cv_metrics.items():
writer.log_metric(f'{model_name} CV Metric', _metric)
for model_name, _metric in eval_metrics.items():
writer.log_metric(f'{model_name} Eval Metric', _metric)
writer.set_terminated()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('..')
<|reserved_special_token_0|>
def run_train(model_name, base_trainer, X, y):
cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)
trainer = RSACVTrainer(cv, base_trainer)
trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)
models = trainer.get_model()
dump_pickle(models, f'../data/titanic/model/{model_name}_models.pkl')
oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)
target = trainer.get_cv_targets()
metric = accuracy_score(target, oof)
return trainer, metric
def eval_single_model(trainer, data, target):
pred = np.array(trainer.predict(data)).T
ensembler = SimpleAgerageEnsember()
pred_avg = ensembler.predict(pred)
pred_avg = np.where(pred_avg > 0.5, 1, 0)
score = accuracy_score(target, pred_avg)
return score
def save_importance(model_name, trainer):
name, mean_importance, std_importance = trainer.get_importance()
fig = plot_importance(name, mean_importance, std_importance)
fig.savefig(f'../data/titanic/working/{model_name}_importance.png')
def main():
src_dir = '../data/titanic/feature/'
feature_files = config.FeatureList.features
feature_files = [os.path.join(src_dir, f'{filename}.pkl') for filename in
feature_files]
X = load_feature(feature_files)
y = load_pickle(os.path.join(src_dir, 'target.pkl'))
print(X.head())
print(y.head())
X_train, X_eval, y_train, y_eval = train_test_split(X, y, test_size=0.2,
random_state=config.SEED, stratify=y)
cv_metrics = {}
eval_metrics = {}
base_trainers = {'LGBM': LGBMTrainer(config.LightgbmParams.params,
config.LightgbmParams.train_params)}
fit_trainers = {}
for model_name, base_trainer in base_trainers.items():
trainer, metric = run_train(model_name, base_trainer, X_train, y_train)
fit_trainers[model_name] = trainer
cv_metrics[model_name] = metric
eval_metrics[model_name] = eval_single_model(trainer, X_eval, y_eval)
save_importance(model_name, trainer)
pred_first = []
for model_name, _trainer in fit_trainers.items():
pred_first.append(np.array(_trainer.predict(X_eval)).T)
pred_first = np.concatenate(pred_first, axis=1)
pred_first = pd.DataFrame(pred_first)
base_trainer = SklearnRegressionTrainer(model=Ridge(random_state=config
.SEED))
trainer, metric = run_train('stack_ridge', base_trainer, pred_first, y_eval
)
eval_metrics['Stack'] = metric
for model_name, metric in cv_metrics.items():
print(f'{model_name:>8} CV Metric: {metric:.08f}')
for model_name, metric in eval_metrics.items():
print(f'{model_name:>8} Eval Metric: {metric:.08f}')
if config.DEBUG is not True:
writer = MlflowWriter(config.MLflowConfig.experiment_name,
tracking_uri=os.path.abspath('../mlruns'))
writer.set_run_name(config.MLflowConfig.run_name)
writer.set_note_content(config.MLflowConfig.experiment_note)
writer.log_param('Feature', ', '.join(config.FeatureList.features))
writer.log_param('SEED', config.SEED)
writer.log_param('NUM_SEED', config.NUM_SEED)
writer.log_param('LGBM_params', {'params': config.LightgbmParams.
params, 'train_params': config.LightgbmParams.train_params})
for model_name, _metric in cv_metrics.items():
writer.log_metric(f'{model_name} CV Metric', _metric)
for model_name, _metric in eval_metrics.items():
writer.log_metric(f'{model_name} Eval Metric', _metric)
writer.set_terminated()
if __name__ == '__main__':
with timer('Train Processing'):
main()
<|reserved_special_token_1|>
import os
import sys
sys.path.append('..')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import accuracy_score
import config
from mikasa.common import timer
from mikasa.io import load_pickle, dump_pickle, load_feature
from mikasa.trainer.gbdt import LGBMTrainer
from mikasa.trainer.base import SklearnRegressionTrainer
from mikasa.trainer.cross_validation import RSACVTrainer
from mikasa.ensemble import SimpleAgerageEnsember
from mikasa.plot import plot_importance
from mikasa.mlflow_writer import MlflowWriter
def run_train(model_name, base_trainer, X, y):
cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)
trainer = RSACVTrainer(cv, base_trainer)
trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)
models = trainer.get_model()
dump_pickle(models, f'../data/titanic/model/{model_name}_models.pkl')
oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)
target = trainer.get_cv_targets()
metric = accuracy_score(target, oof)
return trainer, metric
def eval_single_model(trainer, data, target):
pred = np.array(trainer.predict(data)).T
ensembler = SimpleAgerageEnsember()
pred_avg = ensembler.predict(pred)
pred_avg = np.where(pred_avg > 0.5, 1, 0)
score = accuracy_score(target, pred_avg)
return score
def save_importance(model_name, trainer):
name, mean_importance, std_importance = trainer.get_importance()
fig = plot_importance(name, mean_importance, std_importance)
fig.savefig(f'../data/titanic/working/{model_name}_importance.png')
def main():
src_dir = '../data/titanic/feature/'
feature_files = config.FeatureList.features
feature_files = [os.path.join(src_dir, f'{filename}.pkl') for filename in
feature_files]
X = load_feature(feature_files)
y = load_pickle(os.path.join(src_dir, 'target.pkl'))
print(X.head())
print(y.head())
X_train, X_eval, y_train, y_eval = train_test_split(X, y, test_size=0.2,
random_state=config.SEED, stratify=y)
cv_metrics = {}
eval_metrics = {}
base_trainers = {'LGBM': LGBMTrainer(config.LightgbmParams.params,
config.LightgbmParams.train_params)}
fit_trainers = {}
for model_name, base_trainer in base_trainers.items():
trainer, metric = run_train(model_name, base_trainer, X_train, y_train)
fit_trainers[model_name] = trainer
cv_metrics[model_name] = metric
eval_metrics[model_name] = eval_single_model(trainer, X_eval, y_eval)
save_importance(model_name, trainer)
pred_first = []
for model_name, _trainer in fit_trainers.items():
pred_first.append(np.array(_trainer.predict(X_eval)).T)
pred_first = np.concatenate(pred_first, axis=1)
pred_first = pd.DataFrame(pred_first)
base_trainer = SklearnRegressionTrainer(model=Ridge(random_state=config
.SEED))
trainer, metric = run_train('stack_ridge', base_trainer, pred_first, y_eval
)
eval_metrics['Stack'] = metric
for model_name, metric in cv_metrics.items():
print(f'{model_name:>8} CV Metric: {metric:.08f}')
for model_name, metric in eval_metrics.items():
print(f'{model_name:>8} Eval Metric: {metric:.08f}')
if config.DEBUG is not True:
writer = MlflowWriter(config.MLflowConfig.experiment_name,
tracking_uri=os.path.abspath('../mlruns'))
writer.set_run_name(config.MLflowConfig.run_name)
writer.set_note_content(config.MLflowConfig.experiment_note)
writer.log_param('Feature', ', '.join(config.FeatureList.features))
writer.log_param('SEED', config.SEED)
writer.log_param('NUM_SEED', config.NUM_SEED)
writer.log_param('LGBM_params', {'params': config.LightgbmParams.
params, 'train_params': config.LightgbmParams.train_params})
for model_name, _metric in cv_metrics.items():
writer.log_metric(f'{model_name} CV Metric', _metric)
for model_name, _metric in eval_metrics.items():
writer.log_metric(f'{model_name} Eval Metric', _metric)
writer.set_terminated()
if __name__ == '__main__':
with timer('Train Processing'):
main()
<|reserved_special_token_1|>
import os
import sys
sys.path.append("..")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import accuracy_score
import config
from mikasa.common import timer
from mikasa.io import load_pickle, dump_pickle, load_feature
from mikasa.trainer.gbdt import LGBMTrainer
from mikasa.trainer.base import SklearnRegressionTrainer
from mikasa.trainer.cross_validation import RSACVTrainer
from mikasa.ensemble import SimpleAgerageEnsember
from mikasa.plot import plot_importance
from mikasa.mlflow_writer import MlflowWriter
def run_train(model_name, base_trainer, X, y):
cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)
trainer = RSACVTrainer(cv, base_trainer)
trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)
# Save model.
models = trainer.get_model()
dump_pickle(models, f"../data/titanic/model/{model_name}_models.pkl")
# Evaluation by cv.
oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)
target = trainer.get_cv_targets()
metric = accuracy_score(target, oof)
return trainer, metric
def eval_single_model(trainer, data, target):
pred = np.array(trainer.predict(data)).T
ensembler = SimpleAgerageEnsember()
pred_avg = ensembler.predict(pred)
pred_avg = np.where(pred_avg > 0.5, 1, 0)
score = accuracy_score(target, pred_avg)
return score
def save_importance(model_name, trainer):
name, mean_importance, std_importance = trainer.get_importance()
fig = plot_importance(name, mean_importance, std_importance)
fig.savefig(f"../data/titanic/working/{model_name}_importance.png")
def main():
# Load data.
src_dir = "../data/titanic/feature/"
feature_files = config.FeatureList.features
feature_files = [
os.path.join(src_dir, f"{filename}.pkl") for filename in feature_files
]
X = load_feature(feature_files)
y = load_pickle(os.path.join(src_dir, "target.pkl"))
print(X.head())
print(y.head())
# Split data
X_train, X_eval, y_train, y_eval = train_test_split(
X, y, test_size=0.2, random_state=config.SEED, stratify=y
)
cv_metrics = {}
eval_metrics = {}
# Train model.
base_trainers = {
"LGBM": LGBMTrainer(
config.LightgbmParams.params, config.LightgbmParams.train_params
),
}
fit_trainers = {}
for model_name, base_trainer in base_trainers.items():
trainer, metric = run_train(model_name, base_trainer, X_train, y_train)
fit_trainers[model_name] = trainer
cv_metrics[model_name] = metric
eval_metrics[model_name] = eval_single_model(trainer, X_eval, y_eval)
save_importance(model_name, trainer)
# Stacking
pred_first = []
for model_name, _trainer in fit_trainers.items():
pred_first.append(np.array(_trainer.predict(X_eval)).T)
pred_first = np.concatenate(pred_first, axis=1)
pred_first = pd.DataFrame(pred_first)
base_trainer = SklearnRegressionTrainer(model=Ridge(random_state=config.SEED))
trainer, metric = run_train("stack_ridge", base_trainer, pred_first, y_eval)
eval_metrics["Stack"] = metric
# Evaluation
for model_name, metric in cv_metrics.items():
print(f"{model_name:>8} CV Metric: {metric:.08f}")
for model_name, metric in eval_metrics.items():
print(f"{model_name:>8} Eval Metric: {metric:.08f}")
# Domp logs to mlflow.
if config.DEBUG is not True:
writer = MlflowWriter(
config.MLflowConfig.experiment_name,
tracking_uri=os.path.abspath("../mlruns"),
)
writer.set_run_name(config.MLflowConfig.run_name)
writer.set_note_content(config.MLflowConfig.experiment_note)
# Features
writer.log_param("Feature", ", ".join(config.FeatureList.features))
# Paraeters
writer.log_param("SEED", config.SEED)
writer.log_param("NUM_SEED", config.NUM_SEED)
writer.log_param(
"LGBM_params",
{
"params": config.LightgbmParams.params,
"train_params": config.LightgbmParams.train_params,
},
)
# Metric
for model_name, _metric in cv_metrics.items():
writer.log_metric(f"{model_name} CV Metric", _metric)
for model_name, _metric in eval_metrics.items():
writer.log_metric(f"{model_name} Eval Metric", _metric)
# Close writer client.
writer.set_terminated()
if __name__ == "__main__":
with timer("Train Processing"):
main()
|
flexible
|
{
"blob_id": "23f0ba622097eb4065337ea77ea8104a610d6857",
"index": 6317,
"step-1": "<mask token>\n\n\ndef run_train(model_name, base_trainer, X, y):\n cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)\n trainer = RSACVTrainer(cv, base_trainer)\n trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)\n models = trainer.get_model()\n dump_pickle(models, f'../data/titanic/model/{model_name}_models.pkl')\n oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)\n target = trainer.get_cv_targets()\n metric = accuracy_score(target, oof)\n return trainer, metric\n\n\ndef eval_single_model(trainer, data, target):\n pred = np.array(trainer.predict(data)).T\n ensembler = SimpleAgerageEnsember()\n pred_avg = ensembler.predict(pred)\n pred_avg = np.where(pred_avg > 0.5, 1, 0)\n score = accuracy_score(target, pred_avg)\n return score\n\n\ndef save_importance(model_name, trainer):\n name, mean_importance, std_importance = trainer.get_importance()\n fig = plot_importance(name, mean_importance, std_importance)\n fig.savefig(f'../data/titanic/working/{model_name}_importance.png')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_train(model_name, base_trainer, X, y):\n cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)\n trainer = RSACVTrainer(cv, base_trainer)\n trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)\n models = trainer.get_model()\n dump_pickle(models, f'../data/titanic/model/{model_name}_models.pkl')\n oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)\n target = trainer.get_cv_targets()\n metric = accuracy_score(target, oof)\n return trainer, metric\n\n\ndef eval_single_model(trainer, data, target):\n pred = np.array(trainer.predict(data)).T\n ensembler = SimpleAgerageEnsember()\n pred_avg = ensembler.predict(pred)\n pred_avg = np.where(pred_avg > 0.5, 1, 0)\n score = accuracy_score(target, pred_avg)\n return score\n\n\ndef save_importance(model_name, trainer):\n name, mean_importance, std_importance = trainer.get_importance()\n fig = plot_importance(name, mean_importance, std_importance)\n fig.savefig(f'../data/titanic/working/{model_name}_importance.png')\n\n\ndef main():\n src_dir = '../data/titanic/feature/'\n feature_files = config.FeatureList.features\n feature_files = [os.path.join(src_dir, f'{filename}.pkl') for filename in\n feature_files]\n X = load_feature(feature_files)\n y = load_pickle(os.path.join(src_dir, 'target.pkl'))\n print(X.head())\n print(y.head())\n X_train, X_eval, y_train, y_eval = train_test_split(X, y, test_size=0.2,\n random_state=config.SEED, stratify=y)\n cv_metrics = {}\n eval_metrics = {}\n base_trainers = {'LGBM': LGBMTrainer(config.LightgbmParams.params,\n config.LightgbmParams.train_params)}\n fit_trainers = {}\n for model_name, base_trainer in base_trainers.items():\n trainer, metric = run_train(model_name, base_trainer, X_train, y_train)\n fit_trainers[model_name] = trainer\n cv_metrics[model_name] = metric\n eval_metrics[model_name] = eval_single_model(trainer, X_eval, y_eval)\n save_importance(model_name, trainer)\n pred_first = []\n for model_name, _trainer in fit_trainers.items():\n pred_first.append(np.array(_trainer.predict(X_eval)).T)\n pred_first = np.concatenate(pred_first, axis=1)\n pred_first = pd.DataFrame(pred_first)\n base_trainer = SklearnRegressionTrainer(model=Ridge(random_state=config\n .SEED))\n trainer, metric = run_train('stack_ridge', base_trainer, pred_first, y_eval\n )\n eval_metrics['Stack'] = metric\n for model_name, metric in cv_metrics.items():\n print(f'{model_name:>8} CV Metric: {metric:.08f}')\n for model_name, metric in eval_metrics.items():\n print(f'{model_name:>8} Eval Metric: {metric:.08f}')\n if config.DEBUG is not True:\n writer = MlflowWriter(config.MLflowConfig.experiment_name,\n tracking_uri=os.path.abspath('../mlruns'))\n writer.set_run_name(config.MLflowConfig.run_name)\n writer.set_note_content(config.MLflowConfig.experiment_note)\n writer.log_param('Feature', ', '.join(config.FeatureList.features))\n writer.log_param('SEED', config.SEED)\n writer.log_param('NUM_SEED', config.NUM_SEED)\n writer.log_param('LGBM_params', {'params': config.LightgbmParams.\n params, 'train_params': config.LightgbmParams.train_params})\n for model_name, _metric in cv_metrics.items():\n writer.log_metric(f'{model_name} CV Metric', _metric)\n for model_name, _metric in eval_metrics.items():\n writer.log_metric(f'{model_name} Eval Metric', _metric)\n writer.set_terminated()\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('..')\n<mask token>\n\n\ndef run_train(model_name, base_trainer, X, y):\n cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)\n trainer = RSACVTrainer(cv, base_trainer)\n trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)\n models = trainer.get_model()\n dump_pickle(models, f'../data/titanic/model/{model_name}_models.pkl')\n oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)\n target = trainer.get_cv_targets()\n metric = accuracy_score(target, oof)\n return trainer, metric\n\n\ndef eval_single_model(trainer, data, target):\n pred = np.array(trainer.predict(data)).T\n ensembler = SimpleAgerageEnsember()\n pred_avg = ensembler.predict(pred)\n pred_avg = np.where(pred_avg > 0.5, 1, 0)\n score = accuracy_score(target, pred_avg)\n return score\n\n\ndef save_importance(model_name, trainer):\n name, mean_importance, std_importance = trainer.get_importance()\n fig = plot_importance(name, mean_importance, std_importance)\n fig.savefig(f'../data/titanic/working/{model_name}_importance.png')\n\n\ndef main():\n src_dir = '../data/titanic/feature/'\n feature_files = config.FeatureList.features\n feature_files = [os.path.join(src_dir, f'{filename}.pkl') for filename in\n feature_files]\n X = load_feature(feature_files)\n y = load_pickle(os.path.join(src_dir, 'target.pkl'))\n print(X.head())\n print(y.head())\n X_train, X_eval, y_train, y_eval = train_test_split(X, y, test_size=0.2,\n random_state=config.SEED, stratify=y)\n cv_metrics = {}\n eval_metrics = {}\n base_trainers = {'LGBM': LGBMTrainer(config.LightgbmParams.params,\n config.LightgbmParams.train_params)}\n fit_trainers = {}\n for model_name, base_trainer in base_trainers.items():\n trainer, metric = run_train(model_name, base_trainer, X_train, y_train)\n fit_trainers[model_name] = trainer\n cv_metrics[model_name] = metric\n eval_metrics[model_name] = eval_single_model(trainer, X_eval, y_eval)\n save_importance(model_name, trainer)\n pred_first = []\n for model_name, _trainer in fit_trainers.items():\n pred_first.append(np.array(_trainer.predict(X_eval)).T)\n pred_first = np.concatenate(pred_first, axis=1)\n pred_first = pd.DataFrame(pred_first)\n base_trainer = SklearnRegressionTrainer(model=Ridge(random_state=config\n .SEED))\n trainer, metric = run_train('stack_ridge', base_trainer, pred_first, y_eval\n )\n eval_metrics['Stack'] = metric\n for model_name, metric in cv_metrics.items():\n print(f'{model_name:>8} CV Metric: {metric:.08f}')\n for model_name, metric in eval_metrics.items():\n print(f'{model_name:>8} Eval Metric: {metric:.08f}')\n if config.DEBUG is not True:\n writer = MlflowWriter(config.MLflowConfig.experiment_name,\n tracking_uri=os.path.abspath('../mlruns'))\n writer.set_run_name(config.MLflowConfig.run_name)\n writer.set_note_content(config.MLflowConfig.experiment_note)\n writer.log_param('Feature', ', '.join(config.FeatureList.features))\n writer.log_param('SEED', config.SEED)\n writer.log_param('NUM_SEED', config.NUM_SEED)\n writer.log_param('LGBM_params', {'params': config.LightgbmParams.\n params, 'train_params': config.LightgbmParams.train_params})\n for model_name, _metric in cv_metrics.items():\n writer.log_metric(f'{model_name} CV Metric', _metric)\n for model_name, _metric in eval_metrics.items():\n writer.log_metric(f'{model_name} Eval Metric', _metric)\n writer.set_terminated()\n\n\nif __name__ == '__main__':\n with timer('Train Processing'):\n main()\n",
"step-4": "import os\nimport sys\nsys.path.append('..')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\nfrom sklearn.metrics import accuracy_score\nimport config\nfrom mikasa.common import timer\nfrom mikasa.io import load_pickle, dump_pickle, load_feature\nfrom mikasa.trainer.gbdt import LGBMTrainer\nfrom mikasa.trainer.base import SklearnRegressionTrainer\nfrom mikasa.trainer.cross_validation import RSACVTrainer\nfrom mikasa.ensemble import SimpleAgerageEnsember\nfrom mikasa.plot import plot_importance\nfrom mikasa.mlflow_writer import MlflowWriter\n\n\ndef run_train(model_name, base_trainer, X, y):\n cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)\n trainer = RSACVTrainer(cv, base_trainer)\n trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)\n models = trainer.get_model()\n dump_pickle(models, f'../data/titanic/model/{model_name}_models.pkl')\n oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)\n target = trainer.get_cv_targets()\n metric = accuracy_score(target, oof)\n return trainer, metric\n\n\ndef eval_single_model(trainer, data, target):\n pred = np.array(trainer.predict(data)).T\n ensembler = SimpleAgerageEnsember()\n pred_avg = ensembler.predict(pred)\n pred_avg = np.where(pred_avg > 0.5, 1, 0)\n score = accuracy_score(target, pred_avg)\n return score\n\n\ndef save_importance(model_name, trainer):\n name, mean_importance, std_importance = trainer.get_importance()\n fig = plot_importance(name, mean_importance, std_importance)\n fig.savefig(f'../data/titanic/working/{model_name}_importance.png')\n\n\ndef main():\n src_dir = '../data/titanic/feature/'\n feature_files = config.FeatureList.features\n feature_files = [os.path.join(src_dir, f'{filename}.pkl') for filename in\n feature_files]\n X = load_feature(feature_files)\n y = load_pickle(os.path.join(src_dir, 'target.pkl'))\n print(X.head())\n print(y.head())\n X_train, X_eval, y_train, y_eval = train_test_split(X, y, test_size=0.2,\n random_state=config.SEED, stratify=y)\n cv_metrics = {}\n eval_metrics = {}\n base_trainers = {'LGBM': LGBMTrainer(config.LightgbmParams.params,\n config.LightgbmParams.train_params)}\n fit_trainers = {}\n for model_name, base_trainer in base_trainers.items():\n trainer, metric = run_train(model_name, base_trainer, X_train, y_train)\n fit_trainers[model_name] = trainer\n cv_metrics[model_name] = metric\n eval_metrics[model_name] = eval_single_model(trainer, X_eval, y_eval)\n save_importance(model_name, trainer)\n pred_first = []\n for model_name, _trainer in fit_trainers.items():\n pred_first.append(np.array(_trainer.predict(X_eval)).T)\n pred_first = np.concatenate(pred_first, axis=1)\n pred_first = pd.DataFrame(pred_first)\n base_trainer = SklearnRegressionTrainer(model=Ridge(random_state=config\n .SEED))\n trainer, metric = run_train('stack_ridge', base_trainer, pred_first, y_eval\n )\n eval_metrics['Stack'] = metric\n for model_name, metric in cv_metrics.items():\n print(f'{model_name:>8} CV Metric: {metric:.08f}')\n for model_name, metric in eval_metrics.items():\n print(f'{model_name:>8} Eval Metric: {metric:.08f}')\n if config.DEBUG is not True:\n writer = MlflowWriter(config.MLflowConfig.experiment_name,\n tracking_uri=os.path.abspath('../mlruns'))\n writer.set_run_name(config.MLflowConfig.run_name)\n writer.set_note_content(config.MLflowConfig.experiment_note)\n writer.log_param('Feature', ', '.join(config.FeatureList.features))\n writer.log_param('SEED', config.SEED)\n writer.log_param('NUM_SEED', config.NUM_SEED)\n writer.log_param('LGBM_params', {'params': config.LightgbmParams.\n params, 'train_params': config.LightgbmParams.train_params})\n for model_name, _metric in cv_metrics.items():\n writer.log_metric(f'{model_name} CV Metric', _metric)\n for model_name, _metric in eval_metrics.items():\n writer.log_metric(f'{model_name} Eval Metric', _metric)\n writer.set_terminated()\n\n\nif __name__ == '__main__':\n with timer('Train Processing'):\n main()\n",
"step-5": "import os\nimport sys\n\nsys.path.append(\"..\")\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\nfrom sklearn.metrics import accuracy_score\n\nimport config\nfrom mikasa.common import timer\nfrom mikasa.io import load_pickle, dump_pickle, load_feature\n\nfrom mikasa.trainer.gbdt import LGBMTrainer\nfrom mikasa.trainer.base import SklearnRegressionTrainer\nfrom mikasa.trainer.cross_validation import RSACVTrainer\nfrom mikasa.ensemble import SimpleAgerageEnsember\n\nfrom mikasa.plot import plot_importance\nfrom mikasa.mlflow_writer import MlflowWriter\n\n\ndef run_train(model_name, base_trainer, X, y):\n cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)\n trainer = RSACVTrainer(cv, base_trainer)\n trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)\n # Save model.\n models = trainer.get_model()\n dump_pickle(models, f\"../data/titanic/model/{model_name}_models.pkl\")\n # Evaluation by cv.\n oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)\n target = trainer.get_cv_targets()\n metric = accuracy_score(target, oof)\n return trainer, metric\n\n\ndef eval_single_model(trainer, data, target):\n pred = np.array(trainer.predict(data)).T\n\n ensembler = SimpleAgerageEnsember()\n pred_avg = ensembler.predict(pred)\n pred_avg = np.where(pred_avg > 0.5, 1, 0)\n score = accuracy_score(target, pred_avg)\n return score\n\n\ndef save_importance(model_name, trainer):\n name, mean_importance, std_importance = trainer.get_importance()\n fig = plot_importance(name, mean_importance, std_importance)\n fig.savefig(f\"../data/titanic/working/{model_name}_importance.png\")\n\n\ndef main():\n # Load data.\n src_dir = \"../data/titanic/feature/\"\n feature_files = config.FeatureList.features\n feature_files = [\n os.path.join(src_dir, f\"{filename}.pkl\") for filename in feature_files\n ]\n X = load_feature(feature_files)\n y = load_pickle(os.path.join(src_dir, \"target.pkl\"))\n print(X.head())\n print(y.head())\n\n # Split data\n X_train, X_eval, y_train, y_eval = train_test_split(\n X, y, test_size=0.2, random_state=config.SEED, stratify=y\n )\n\n cv_metrics = {}\n eval_metrics = {}\n\n # Train model.\n base_trainers = {\n \"LGBM\": LGBMTrainer(\n config.LightgbmParams.params, config.LightgbmParams.train_params\n ),\n }\n fit_trainers = {}\n for model_name, base_trainer in base_trainers.items():\n trainer, metric = run_train(model_name, base_trainer, X_train, y_train)\n\n fit_trainers[model_name] = trainer\n cv_metrics[model_name] = metric\n eval_metrics[model_name] = eval_single_model(trainer, X_eval, y_eval)\n save_importance(model_name, trainer)\n\n # Stacking\n pred_first = []\n for model_name, _trainer in fit_trainers.items():\n pred_first.append(np.array(_trainer.predict(X_eval)).T)\n pred_first = np.concatenate(pred_first, axis=1)\n pred_first = pd.DataFrame(pred_first)\n\n base_trainer = SklearnRegressionTrainer(model=Ridge(random_state=config.SEED))\n trainer, metric = run_train(\"stack_ridge\", base_trainer, pred_first, y_eval)\n eval_metrics[\"Stack\"] = metric\n\n # Evaluation\n for model_name, metric in cv_metrics.items():\n print(f\"{model_name:>8} CV Metric: {metric:.08f}\")\n for model_name, metric in eval_metrics.items():\n print(f\"{model_name:>8} Eval Metric: {metric:.08f}\")\n\n # Domp logs to mlflow.\n if config.DEBUG is not True:\n writer = MlflowWriter(\n config.MLflowConfig.experiment_name,\n tracking_uri=os.path.abspath(\"../mlruns\"),\n )\n writer.set_run_name(config.MLflowConfig.run_name)\n writer.set_note_content(config.MLflowConfig.experiment_note)\n # Features\n writer.log_param(\"Feature\", \", \".join(config.FeatureList.features))\n # Paraeters\n writer.log_param(\"SEED\", config.SEED)\n writer.log_param(\"NUM_SEED\", config.NUM_SEED)\n writer.log_param(\n \"LGBM_params\",\n {\n \"params\": config.LightgbmParams.params,\n \"train_params\": config.LightgbmParams.train_params,\n },\n )\n # Metric\n for model_name, _metric in cv_metrics.items():\n writer.log_metric(f\"{model_name} CV Metric\", _metric)\n for model_name, _metric in eval_metrics.items():\n writer.log_metric(f\"{model_name} Eval Metric\", _metric)\n # Close writer client.\n writer.set_terminated()\n\n\nif __name__ == \"__main__\":\n with timer(\"Train Processing\"):\n main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class SudukoBoard:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Cell:
def __init__(self, board, row, col):
self._values = [None] * SudukoBoard.sz
self._value = None
self.sets = []
self.row = row
self.col = col
self.open = SudukoBoard.sz
self.board = board
def add_set(self, set):
self.sets.append(set)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if self._value is not None and self._value != value:
raise ValueError('Conflicting value for cell', self.row,
self.col, self._value, value)
if self._value != value:
self._value = value
self._values = [False] * SudukoBoard.sz
self._values[value - 1] = True
self.open = 0
self.board.open -= 1
for s in self.sets:
for c in s.entries:
if c != self:
c.cantbe(value)
def cantbe(self, value):
if self._values[value - 1] == True:
raise ValueError('Conflicting cant be for cell, already set',
self.row, self.col, self._value, value)
if self._values[value - 1] != False:
self._values[value - 1] = False
self.open -= 1
cnt = 0
nidx = None
for idx, v in enumerate(self._values):
if v is None:
cnt += 1
nidx = idx
if cnt == 1:
self.value = nidx + 1
def couldbe(self, value):
return self._values[value - 1]
def couldbelist(self):
return [(idx + 1) for idx, x in enumerate(self._values) if x is
None]
class Set:
def __init__(self):
self.entries = []
def add_cell(self, cell):
self.entries.append(cell)
cell.add_set(self)
def update(self, entry):
value = entry.value
for other in self.entries:
if other == entry:
continue
if other.value == value:
raise Exception('Illegal value')
else:
other.value = not value
def __init__(self):
self.initial = 0
self.open = SudukoBoard.sz ** 2
self.cells = []
self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
s3 = SudukoBoard.side * SudukoBoard.sz
for i in range(SudukoBoard.sz ** 2):
cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %
SudukoBoard.sz)
self.cells.append(cell)
for cell in self.cells:
self.rows[cell.row].add_cell(cell)
self.cols[cell.col].add_cell(cell)
self.blks[cell.row // SudukoBoard.side + cell.col //
SudukoBoard.side * SudukoBoard.side].add_cell(cell)
def setup(self, txt):
trows = txt.split(',')
if len(trows) != SudukoBoard.sz:
raise Exception('Incorrect number of rows')
cnt = 0
for ridx, trow in enumerate(trows):
if len(trows) != SudukoBoard.sz:
raise Exception('Incorrect number of columns row ', ridx)
for cidx, c in enumerate(trow):
if c != '.':
v = int(c)
cnt += 1
self.set(ridx, cidx, v)
def set(self, row, col, value):
self.rows[row].entries[col].value = value
<|reserved_special_token_0|>
def solve(self, depth=0, guesses=[]):
for i in range(1000):
print('Iteration ', depth, i)
open = [Counter([len(c.couldbelist()) for c in self.cells])]
print('open cells', open)
for c in self.cells:
if c.open != 1:
continue
if c.open != len(c.couldbelist()):
pass
value = c.couldbelist()
c.set(value)
if self.open > 0 and not 1 in open:
print('We have to guess depth {} and {} cells open'.format(
depth, self.open))
bestguess = []
for c in self.cells:
for guess in c.couldbelist():
other = deepcopy(self)
try:
other.set(c.row, c.col, guess)
bestguess.append((other.open, (c.row, c.col,
guess)))
except ValueError as e:
pass
except Exception as e:
print_exc()
for open, (row, col, guess) in sorted(bestguess):
print('Best guess ', row, col, guess, depth)
other = deepcopy(self)
other.set(row, col, guess)
soln, soln_guesses = other.solve(depth + 1, guesses + [
(row, col, guess)])
if soln.open == 0:
print('guess return')
return soln, soln_guesses
return self, guesses
def leftopen(self):
cnt = 0
for c in self.cells:
if c.value is None:
cnt += 1
if cnt != self.open:
assert 'BAD'
return cnt
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SudukoBoard:
side = 3
sz = side * side
class Cell:
def __init__(self, board, row, col):
self._values = [None] * SudukoBoard.sz
self._value = None
self.sets = []
self.row = row
self.col = col
self.open = SudukoBoard.sz
self.board = board
def add_set(self, set):
self.sets.append(set)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if self._value is not None and self._value != value:
raise ValueError('Conflicting value for cell', self.row,
self.col, self._value, value)
if self._value != value:
self._value = value
self._values = [False] * SudukoBoard.sz
self._values[value - 1] = True
self.open = 0
self.board.open -= 1
for s in self.sets:
for c in s.entries:
if c != self:
c.cantbe(value)
def cantbe(self, value):
if self._values[value - 1] == True:
raise ValueError('Conflicting cant be for cell, already set',
self.row, self.col, self._value, value)
if self._values[value - 1] != False:
self._values[value - 1] = False
self.open -= 1
cnt = 0
nidx = None
for idx, v in enumerate(self._values):
if v is None:
cnt += 1
nidx = idx
if cnt == 1:
self.value = nidx + 1
def couldbe(self, value):
return self._values[value - 1]
def couldbelist(self):
return [(idx + 1) for idx, x in enumerate(self._values) if x is
None]
class Set:
def __init__(self):
self.entries = []
def add_cell(self, cell):
self.entries.append(cell)
cell.add_set(self)
def update(self, entry):
value = entry.value
for other in self.entries:
if other == entry:
continue
if other.value == value:
raise Exception('Illegal value')
else:
other.value = not value
def __init__(self):
self.initial = 0
self.open = SudukoBoard.sz ** 2
self.cells = []
self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
s3 = SudukoBoard.side * SudukoBoard.sz
for i in range(SudukoBoard.sz ** 2):
cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %
SudukoBoard.sz)
self.cells.append(cell)
for cell in self.cells:
self.rows[cell.row].add_cell(cell)
self.cols[cell.col].add_cell(cell)
self.blks[cell.row // SudukoBoard.side + cell.col //
SudukoBoard.side * SudukoBoard.side].add_cell(cell)
def setup(self, txt):
trows = txt.split(',')
if len(trows) != SudukoBoard.sz:
raise Exception('Incorrect number of rows')
cnt = 0
for ridx, trow in enumerate(trows):
if len(trows) != SudukoBoard.sz:
raise Exception('Incorrect number of columns row ', ridx)
for cidx, c in enumerate(trow):
if c != '.':
v = int(c)
cnt += 1
self.set(ridx, cidx, v)
def set(self, row, col, value):
self.rows[row].entries[col].value = value
def print(self):
for ridx, r in enumerate(self.rows):
for cidx, c in enumerate(r.entries):
print('.' if c.value is None else c.value, end='')
if (cidx + 1) % SudukoBoard.side == 0:
print('|', end='')
print()
if (ridx + 1) % SudukoBoard.side == 0:
print('{}'.format('-' * (SudukoBoard.sz + SudukoBoard.side)))
def solve(self, depth=0, guesses=[]):
for i in range(1000):
print('Iteration ', depth, i)
open = [Counter([len(c.couldbelist()) for c in self.cells])]
print('open cells', open)
for c in self.cells:
if c.open != 1:
continue
if c.open != len(c.couldbelist()):
pass
value = c.couldbelist()
c.set(value)
if self.open > 0 and not 1 in open:
print('We have to guess depth {} and {} cells open'.format(
depth, self.open))
bestguess = []
for c in self.cells:
for guess in c.couldbelist():
other = deepcopy(self)
try:
other.set(c.row, c.col, guess)
bestguess.append((other.open, (c.row, c.col,
guess)))
except ValueError as e:
pass
except Exception as e:
print_exc()
for open, (row, col, guess) in sorted(bestguess):
print('Best guess ', row, col, guess, depth)
other = deepcopy(self)
other.set(row, col, guess)
soln, soln_guesses = other.solve(depth + 1, guesses + [
(row, col, guess)])
if soln.open == 0:
print('guess return')
return soln, soln_guesses
return self, guesses
def leftopen(self):
cnt = 0
for c in self.cells:
if c.value is None:
cnt += 1
if cnt != self.open:
assert 'BAD'
return cnt
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SudukoBoard:
side = 3
sz = side * side
class Cell:
def __init__(self, board, row, col):
self._values = [None] * SudukoBoard.sz
self._value = None
self.sets = []
self.row = row
self.col = col
self.open = SudukoBoard.sz
self.board = board
def add_set(self, set):
self.sets.append(set)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if self._value is not None and self._value != value:
raise ValueError('Conflicting value for cell', self.row,
self.col, self._value, value)
if self._value != value:
self._value = value
self._values = [False] * SudukoBoard.sz
self._values[value - 1] = True
self.open = 0
self.board.open -= 1
for s in self.sets:
for c in s.entries:
if c != self:
c.cantbe(value)
def cantbe(self, value):
if self._values[value - 1] == True:
raise ValueError('Conflicting cant be for cell, already set',
self.row, self.col, self._value, value)
if self._values[value - 1] != False:
self._values[value - 1] = False
self.open -= 1
cnt = 0
nidx = None
for idx, v in enumerate(self._values):
if v is None:
cnt += 1
nidx = idx
if cnt == 1:
self.value = nidx + 1
def couldbe(self, value):
return self._values[value - 1]
def couldbelist(self):
return [(idx + 1) for idx, x in enumerate(self._values) if x is
None]
class Set:
def __init__(self):
self.entries = []
def add_cell(self, cell):
self.entries.append(cell)
cell.add_set(self)
def update(self, entry):
value = entry.value
for other in self.entries:
if other == entry:
continue
if other.value == value:
raise Exception('Illegal value')
else:
other.value = not value
def __init__(self):
self.initial = 0
self.open = SudukoBoard.sz ** 2
self.cells = []
self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
s3 = SudukoBoard.side * SudukoBoard.sz
for i in range(SudukoBoard.sz ** 2):
cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %
SudukoBoard.sz)
self.cells.append(cell)
for cell in self.cells:
self.rows[cell.row].add_cell(cell)
self.cols[cell.col].add_cell(cell)
self.blks[cell.row // SudukoBoard.side + cell.col //
SudukoBoard.side * SudukoBoard.side].add_cell(cell)
def setup(self, txt):
trows = txt.split(',')
if len(trows) != SudukoBoard.sz:
raise Exception('Incorrect number of rows')
cnt = 0
for ridx, trow in enumerate(trows):
if len(trows) != SudukoBoard.sz:
raise Exception('Incorrect number of columns row ', ridx)
for cidx, c in enumerate(trow):
if c != '.':
v = int(c)
cnt += 1
self.set(ridx, cidx, v)
def set(self, row, col, value):
self.rows[row].entries[col].value = value
def print(self):
for ridx, r in enumerate(self.rows):
for cidx, c in enumerate(r.entries):
print('.' if c.value is None else c.value, end='')
if (cidx + 1) % SudukoBoard.side == 0:
print('|', end='')
print()
if (ridx + 1) % SudukoBoard.side == 0:
print('{}'.format('-' * (SudukoBoard.sz + SudukoBoard.side)))
def solve(self, depth=0, guesses=[]):
for i in range(1000):
print('Iteration ', depth, i)
open = [Counter([len(c.couldbelist()) for c in self.cells])]
print('open cells', open)
for c in self.cells:
if c.open != 1:
continue
if c.open != len(c.couldbelist()):
pass
value = c.couldbelist()
c.set(value)
if self.open > 0 and not 1 in open:
print('We have to guess depth {} and {} cells open'.format(
depth, self.open))
bestguess = []
for c in self.cells:
for guess in c.couldbelist():
other = deepcopy(self)
try:
other.set(c.row, c.col, guess)
bestguess.append((other.open, (c.row, c.col,
guess)))
except ValueError as e:
pass
except Exception as e:
print_exc()
for open, (row, col, guess) in sorted(bestguess):
print('Best guess ', row, col, guess, depth)
other = deepcopy(self)
other.set(row, col, guess)
soln, soln_guesses = other.solve(depth + 1, guesses + [
(row, col, guess)])
if soln.open == 0:
print('guess return')
return soln, soln_guesses
return self, guesses
def leftopen(self):
cnt = 0
for c in self.cells:
if c.value is None:
cnt += 1
if cnt != self.open:
assert 'BAD'
return cnt
if __name__ == '__main__':
board = SudukoBoard()
evil = (
'..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1..'
)
evil2 = (
'..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3..'
)
medium = (
'8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2'
)
hard = (
'......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......'
)
easy = (
'.7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1.'
)
board.setup(evil2)
board.print()
print()
soln, guesses = board.solve()
print('Final : guesses', guesses)
soln.print()
pass
<|reserved_special_token_1|>
from collections import Counter
from copy import deepcopy
from itertools import count
from traceback import print_exc
class SudukoBoard:
side = 3
sz = side * side
class Cell:
def __init__(self, board, row, col):
self._values = [None] * SudukoBoard.sz
self._value = None
self.sets = []
self.row = row
self.col = col
self.open = SudukoBoard.sz
self.board = board
def add_set(self, set):
self.sets.append(set)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if self._value is not None and self._value != value:
raise ValueError('Conflicting value for cell', self.row,
self.col, self._value, value)
if self._value != value:
self._value = value
self._values = [False] * SudukoBoard.sz
self._values[value - 1] = True
self.open = 0
self.board.open -= 1
for s in self.sets:
for c in s.entries:
if c != self:
c.cantbe(value)
def cantbe(self, value):
if self._values[value - 1] == True:
raise ValueError('Conflicting cant be for cell, already set',
self.row, self.col, self._value, value)
if self._values[value - 1] != False:
self._values[value - 1] = False
self.open -= 1
cnt = 0
nidx = None
for idx, v in enumerate(self._values):
if v is None:
cnt += 1
nidx = idx
if cnt == 1:
self.value = nidx + 1
def couldbe(self, value):
return self._values[value - 1]
def couldbelist(self):
return [(idx + 1) for idx, x in enumerate(self._values) if x is
None]
class Set:
def __init__(self):
self.entries = []
def add_cell(self, cell):
self.entries.append(cell)
cell.add_set(self)
def update(self, entry):
value = entry.value
for other in self.entries:
if other == entry:
continue
if other.value == value:
raise Exception('Illegal value')
else:
other.value = not value
def __init__(self):
self.initial = 0
self.open = SudukoBoard.sz ** 2
self.cells = []
self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]
s3 = SudukoBoard.side * SudukoBoard.sz
for i in range(SudukoBoard.sz ** 2):
cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %
SudukoBoard.sz)
self.cells.append(cell)
for cell in self.cells:
self.rows[cell.row].add_cell(cell)
self.cols[cell.col].add_cell(cell)
self.blks[cell.row // SudukoBoard.side + cell.col //
SudukoBoard.side * SudukoBoard.side].add_cell(cell)
def setup(self, txt):
trows = txt.split(',')
if len(trows) != SudukoBoard.sz:
raise Exception('Incorrect number of rows')
cnt = 0
for ridx, trow in enumerate(trows):
if len(trows) != SudukoBoard.sz:
raise Exception('Incorrect number of columns row ', ridx)
for cidx, c in enumerate(trow):
if c != '.':
v = int(c)
cnt += 1
self.set(ridx, cidx, v)
def set(self, row, col, value):
self.rows[row].entries[col].value = value
def print(self):
for ridx, r in enumerate(self.rows):
for cidx, c in enumerate(r.entries):
print('.' if c.value is None else c.value, end='')
if (cidx + 1) % SudukoBoard.side == 0:
print('|', end='')
print()
if (ridx + 1) % SudukoBoard.side == 0:
print('{}'.format('-' * (SudukoBoard.sz + SudukoBoard.side)))
def solve(self, depth=0, guesses=[]):
for i in range(1000):
print('Iteration ', depth, i)
open = [Counter([len(c.couldbelist()) for c in self.cells])]
print('open cells', open)
for c in self.cells:
if c.open != 1:
continue
if c.open != len(c.couldbelist()):
pass
value = c.couldbelist()
c.set(value)
if self.open > 0 and not 1 in open:
print('We have to guess depth {} and {} cells open'.format(
depth, self.open))
bestguess = []
for c in self.cells:
for guess in c.couldbelist():
other = deepcopy(self)
try:
other.set(c.row, c.col, guess)
bestguess.append((other.open, (c.row, c.col,
guess)))
except ValueError as e:
pass
except Exception as e:
print_exc()
for open, (row, col, guess) in sorted(bestguess):
print('Best guess ', row, col, guess, depth)
other = deepcopy(self)
other.set(row, col, guess)
soln, soln_guesses = other.solve(depth + 1, guesses + [
(row, col, guess)])
if soln.open == 0:
print('guess return')
return soln, soln_guesses
return self, guesses
def leftopen(self):
cnt = 0
for c in self.cells:
if c.value is None:
cnt += 1
if cnt != self.open:
assert 'BAD'
return cnt
if __name__ == '__main__':
board = SudukoBoard()
evil = (
'..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1..'
)
evil2 = (
'..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3..'
)
medium = (
'8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2'
)
hard = (
'......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......'
)
easy = (
'.7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1.'
)
board.setup(evil2)
board.print()
print()
soln, guesses = board.solve()
print('Final : guesses', guesses)
soln.print()
pass
<|reserved_special_token_1|>
from collections import Counter
from copy import deepcopy
from itertools import count
from traceback import print_exc
#https://www.websudoku.com/?level=4
class SudukoBoard:
side=3
sz=side*side
class Cell:
def __init__(self,board,row,col):
self._values= [None] * SudukoBoard.sz
self._value=None
self.sets=[]
self.row=row
self.col=col
self.open=SudukoBoard.sz
self.board=board
def add_set(self,set):
self.sets.append(set)
@property
def value(self):
return self._value
@value.setter
def value(self,value):
if self._value is not None and self._value!=value:
raise ValueError("Conflicting value for cell",self.row,self.col,self._value,value)
if self._value != value:
self._value=value
self._values=[False]*SudukoBoard.sz
self._values[value-1]=True
self.open=0
self.board.open-=1
for s in self.sets:
for c in s.entries:
if c!=self:
c.cantbe(value)
def cantbe(self, value):
if self._values[value - 1] == True:
raise ValueError("Conflicting cant be for cell, already set",self.row,self.col,self._value,value)
if self._values[value-1] != False:
self._values[value-1]=False
self.open -=1
cnt=0
nidx=None
for idx,v in enumerate(self._values):
if v is None:
cnt+=1
nidx=idx
if cnt==1:
self.value=nidx+1
def couldbe(self, value):
return self._values[value - 1]
def couldbelist(self):
return [idx+1 for idx,x in enumerate(self._values) if x is None]
class Set:
def __init__(self):
self.entries=[]
def add_cell(self,cell):
self.entries.append(cell)
cell.add_set(self)
def update(self,entry):
value=entry.value
for other in self.entries:
if other==entry:
continue
if other.value == value:
raise Exception("Illegal value")
else:
other.value=not value
def __init__(self):
self.initial=0
self.open=SudukoBoard.sz**2
self.cells=[]
self.rows=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.cols=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.blks=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]
s3=SudukoBoard.side*SudukoBoard.sz
for i in range(SudukoBoard.sz**2):
cell=SudukoBoard.Cell(self,i//SudukoBoard.sz,i%SudukoBoard.sz)
self.cells.append(cell)
for cell in self.cells:
self.rows[cell.row].add_cell(cell)
self.cols[cell.col].add_cell(cell)
self.blks[(cell.row)//SudukoBoard.side+((cell.col)//SudukoBoard.side)*SudukoBoard.side].add_cell(cell)
def setup(self,txt):
trows=txt.split(",")
if len(trows)!=SudukoBoard.sz:
raise Exception("Incorrect number of rows")
cnt=0
for ridx,trow in enumerate(trows):
if len(trows) != SudukoBoard.sz:
raise Exception("Incorrect number of columns row ",ridx)
for cidx,c in enumerate(trow):
if c != '.':
v=int(c)
cnt+=1
self.set(ridx,cidx,v)
# print("Set ",ridx+1,cidx+1, " tot ",cnt," left ",self.open,
# " auto ",SudukoBoard.sz**2-self.open-cnt)
# self.print()
def set(self,row,col,value):
self.rows[row].entries[col].value=value
def print(self):
for ridx,r in enumerate(self.rows):
for cidx,c in enumerate(r.entries):
print("." if c.value is None else c.value,end='')
if (cidx+1)%SudukoBoard.side == 0:
print("|",end='')
print()
if (ridx+1)%SudukoBoard.side == 0:
print("{}".format("-"*(SudukoBoard.sz+SudukoBoard.side)))
def solve(self,depth=0,guesses=[]):
for i in range(1000):
print("Iteration ",depth,i)
# for c in self.cells:
# print(c.row,c.col,c.couldbelist(),c._value,c._values)
open=[Counter([len(c.couldbelist()) for c in self.cells])]
print("open cells",open)
for c in self.cells:
if c.open!=1:
continue
if c.open != len(c.couldbelist()):
pass
value=c.couldbelist()
c.set(value)
if self.open >0 and not 1 in open:
print("We have to guess depth {} and {} cells open".format(depth,self.open))
bestguess=[]
for c in self.cells:
for guess in c.couldbelist():
other=deepcopy(self)
try:
other.set(c.row,c.col,guess)
bestguess.append((other.open,(c.row,c.col,guess)))
except ValueError as e:
pass
except Exception as e:
print_exc()
for open,(row,col,guess) in sorted(bestguess):
print("Best guess ",row,col,guess,depth)
other = deepcopy(self)
other.set(row,col,guess)
soln,soln_guesses = other.solve(depth + 1,guesses+[(row,col,guess)])
if soln.open == 0:
print("guess return")
return soln,soln_guesses
# if self.open == 0:
# print("Solved with {} guesses {}".format(depth,guesses))
# self.print()
return self,guesses
def leftopen(self):
cnt=0
for c in self.cells:
if c.value is None:
cnt+=1
if cnt != self.open:
assert "BAD"
return cnt
if __name__ == "__main__":
board=SudukoBoard()
evil="..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1.."
evil2="..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3.."
medium="8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2"
hard="......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......"
easy=".7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1."
board.setup(evil2)
board.print()
print()
soln,guesses=board.solve()
print("Final : guesses",guesses)
soln.print()
pass
|
flexible
|
{
"blob_id": "44d9e628e31cdb36088b969da2f6e9af1b1d3efe",
"index": 7841,
"step-1": "<mask token>\n\n\nclass SudukoBoard:\n <mask token>\n <mask token>\n\n\n class Cell:\n\n def __init__(self, board, row, col):\n self._values = [None] * SudukoBoard.sz\n self._value = None\n self.sets = []\n self.row = row\n self.col = col\n self.open = SudukoBoard.sz\n self.board = board\n\n def add_set(self, set):\n self.sets.append(set)\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if self._value is not None and self._value != value:\n raise ValueError('Conflicting value for cell', self.row,\n self.col, self._value, value)\n if self._value != value:\n self._value = value\n self._values = [False] * SudukoBoard.sz\n self._values[value - 1] = True\n self.open = 0\n self.board.open -= 1\n for s in self.sets:\n for c in s.entries:\n if c != self:\n c.cantbe(value)\n\n def cantbe(self, value):\n if self._values[value - 1] == True:\n raise ValueError('Conflicting cant be for cell, already set',\n self.row, self.col, self._value, value)\n if self._values[value - 1] != False:\n self._values[value - 1] = False\n self.open -= 1\n cnt = 0\n nidx = None\n for idx, v in enumerate(self._values):\n if v is None:\n cnt += 1\n nidx = idx\n if cnt == 1:\n self.value = nidx + 1\n\n def couldbe(self, value):\n return self._values[value - 1]\n\n def couldbelist(self):\n return [(idx + 1) for idx, x in enumerate(self._values) if x is\n None]\n\n\n class Set:\n\n def __init__(self):\n self.entries = []\n\n def add_cell(self, cell):\n self.entries.append(cell)\n cell.add_set(self)\n\n def update(self, entry):\n value = entry.value\n for other in self.entries:\n if other == entry:\n continue\n if other.value == value:\n raise Exception('Illegal value')\n else:\n other.value = not value\n\n def __init__(self):\n self.initial = 0\n self.open = SudukoBoard.sz ** 2\n self.cells = []\n self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n s3 = SudukoBoard.side * SudukoBoard.sz\n for i in range(SudukoBoard.sz ** 2):\n cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %\n SudukoBoard.sz)\n self.cells.append(cell)\n for cell in self.cells:\n self.rows[cell.row].add_cell(cell)\n self.cols[cell.col].add_cell(cell)\n self.blks[cell.row // SudukoBoard.side + cell.col //\n SudukoBoard.side * SudukoBoard.side].add_cell(cell)\n\n def setup(self, txt):\n trows = txt.split(',')\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of rows')\n cnt = 0\n for ridx, trow in enumerate(trows):\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of columns row ', ridx)\n for cidx, c in enumerate(trow):\n if c != '.':\n v = int(c)\n cnt += 1\n self.set(ridx, cidx, v)\n\n def set(self, row, col, value):\n self.rows[row].entries[col].value = value\n <mask token>\n\n def solve(self, depth=0, guesses=[]):\n for i in range(1000):\n print('Iteration ', depth, i)\n open = [Counter([len(c.couldbelist()) for c in self.cells])]\n print('open cells', open)\n for c in self.cells:\n if c.open != 1:\n continue\n if c.open != len(c.couldbelist()):\n pass\n value = c.couldbelist()\n c.set(value)\n if self.open > 0 and not 1 in open:\n print('We have to guess depth {} and {} cells open'.format(\n depth, self.open))\n bestguess = []\n for c in self.cells:\n for guess in c.couldbelist():\n other = deepcopy(self)\n try:\n other.set(c.row, c.col, guess)\n bestguess.append((other.open, (c.row, c.col,\n guess)))\n except ValueError as e:\n pass\n except Exception as e:\n print_exc()\n for open, (row, col, guess) in sorted(bestguess):\n print('Best guess ', row, col, guess, depth)\n other = deepcopy(self)\n other.set(row, col, guess)\n soln, soln_guesses = other.solve(depth + 1, guesses + [\n (row, col, guess)])\n if soln.open == 0:\n print('guess return')\n return soln, soln_guesses\n return self, guesses\n\n def leftopen(self):\n cnt = 0\n for c in self.cells:\n if c.value is None:\n cnt += 1\n if cnt != self.open:\n assert 'BAD'\n return cnt\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SudukoBoard:\n side = 3\n sz = side * side\n\n\n class Cell:\n\n def __init__(self, board, row, col):\n self._values = [None] * SudukoBoard.sz\n self._value = None\n self.sets = []\n self.row = row\n self.col = col\n self.open = SudukoBoard.sz\n self.board = board\n\n def add_set(self, set):\n self.sets.append(set)\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if self._value is not None and self._value != value:\n raise ValueError('Conflicting value for cell', self.row,\n self.col, self._value, value)\n if self._value != value:\n self._value = value\n self._values = [False] * SudukoBoard.sz\n self._values[value - 1] = True\n self.open = 0\n self.board.open -= 1\n for s in self.sets:\n for c in s.entries:\n if c != self:\n c.cantbe(value)\n\n def cantbe(self, value):\n if self._values[value - 1] == True:\n raise ValueError('Conflicting cant be for cell, already set',\n self.row, self.col, self._value, value)\n if self._values[value - 1] != False:\n self._values[value - 1] = False\n self.open -= 1\n cnt = 0\n nidx = None\n for idx, v in enumerate(self._values):\n if v is None:\n cnt += 1\n nidx = idx\n if cnt == 1:\n self.value = nidx + 1\n\n def couldbe(self, value):\n return self._values[value - 1]\n\n def couldbelist(self):\n return [(idx + 1) for idx, x in enumerate(self._values) if x is\n None]\n\n\n class Set:\n\n def __init__(self):\n self.entries = []\n\n def add_cell(self, cell):\n self.entries.append(cell)\n cell.add_set(self)\n\n def update(self, entry):\n value = entry.value\n for other in self.entries:\n if other == entry:\n continue\n if other.value == value:\n raise Exception('Illegal value')\n else:\n other.value = not value\n\n def __init__(self):\n self.initial = 0\n self.open = SudukoBoard.sz ** 2\n self.cells = []\n self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n s3 = SudukoBoard.side * SudukoBoard.sz\n for i in range(SudukoBoard.sz ** 2):\n cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %\n SudukoBoard.sz)\n self.cells.append(cell)\n for cell in self.cells:\n self.rows[cell.row].add_cell(cell)\n self.cols[cell.col].add_cell(cell)\n self.blks[cell.row // SudukoBoard.side + cell.col //\n SudukoBoard.side * SudukoBoard.side].add_cell(cell)\n\n def setup(self, txt):\n trows = txt.split(',')\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of rows')\n cnt = 0\n for ridx, trow in enumerate(trows):\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of columns row ', ridx)\n for cidx, c in enumerate(trow):\n if c != '.':\n v = int(c)\n cnt += 1\n self.set(ridx, cidx, v)\n\n def set(self, row, col, value):\n self.rows[row].entries[col].value = value\n\n def print(self):\n for ridx, r in enumerate(self.rows):\n for cidx, c in enumerate(r.entries):\n print('.' if c.value is None else c.value, end='')\n if (cidx + 1) % SudukoBoard.side == 0:\n print('|', end='')\n print()\n if (ridx + 1) % SudukoBoard.side == 0:\n print('{}'.format('-' * (SudukoBoard.sz + SudukoBoard.side)))\n\n def solve(self, depth=0, guesses=[]):\n for i in range(1000):\n print('Iteration ', depth, i)\n open = [Counter([len(c.couldbelist()) for c in self.cells])]\n print('open cells', open)\n for c in self.cells:\n if c.open != 1:\n continue\n if c.open != len(c.couldbelist()):\n pass\n value = c.couldbelist()\n c.set(value)\n if self.open > 0 and not 1 in open:\n print('We have to guess depth {} and {} cells open'.format(\n depth, self.open))\n bestguess = []\n for c in self.cells:\n for guess in c.couldbelist():\n other = deepcopy(self)\n try:\n other.set(c.row, c.col, guess)\n bestguess.append((other.open, (c.row, c.col,\n guess)))\n except ValueError as e:\n pass\n except Exception as e:\n print_exc()\n for open, (row, col, guess) in sorted(bestguess):\n print('Best guess ', row, col, guess, depth)\n other = deepcopy(self)\n other.set(row, col, guess)\n soln, soln_guesses = other.solve(depth + 1, guesses + [\n (row, col, guess)])\n if soln.open == 0:\n print('guess return')\n return soln, soln_guesses\n return self, guesses\n\n def leftopen(self):\n cnt = 0\n for c in self.cells:\n if c.value is None:\n cnt += 1\n if cnt != self.open:\n assert 'BAD'\n return cnt\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SudukoBoard:\n side = 3\n sz = side * side\n\n\n class Cell:\n\n def __init__(self, board, row, col):\n self._values = [None] * SudukoBoard.sz\n self._value = None\n self.sets = []\n self.row = row\n self.col = col\n self.open = SudukoBoard.sz\n self.board = board\n\n def add_set(self, set):\n self.sets.append(set)\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if self._value is not None and self._value != value:\n raise ValueError('Conflicting value for cell', self.row,\n self.col, self._value, value)\n if self._value != value:\n self._value = value\n self._values = [False] * SudukoBoard.sz\n self._values[value - 1] = True\n self.open = 0\n self.board.open -= 1\n for s in self.sets:\n for c in s.entries:\n if c != self:\n c.cantbe(value)\n\n def cantbe(self, value):\n if self._values[value - 1] == True:\n raise ValueError('Conflicting cant be for cell, already set',\n self.row, self.col, self._value, value)\n if self._values[value - 1] != False:\n self._values[value - 1] = False\n self.open -= 1\n cnt = 0\n nidx = None\n for idx, v in enumerate(self._values):\n if v is None:\n cnt += 1\n nidx = idx\n if cnt == 1:\n self.value = nidx + 1\n\n def couldbe(self, value):\n return self._values[value - 1]\n\n def couldbelist(self):\n return [(idx + 1) for idx, x in enumerate(self._values) if x is\n None]\n\n\n class Set:\n\n def __init__(self):\n self.entries = []\n\n def add_cell(self, cell):\n self.entries.append(cell)\n cell.add_set(self)\n\n def update(self, entry):\n value = entry.value\n for other in self.entries:\n if other == entry:\n continue\n if other.value == value:\n raise Exception('Illegal value')\n else:\n other.value = not value\n\n def __init__(self):\n self.initial = 0\n self.open = SudukoBoard.sz ** 2\n self.cells = []\n self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n s3 = SudukoBoard.side * SudukoBoard.sz\n for i in range(SudukoBoard.sz ** 2):\n cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %\n SudukoBoard.sz)\n self.cells.append(cell)\n for cell in self.cells:\n self.rows[cell.row].add_cell(cell)\n self.cols[cell.col].add_cell(cell)\n self.blks[cell.row // SudukoBoard.side + cell.col //\n SudukoBoard.side * SudukoBoard.side].add_cell(cell)\n\n def setup(self, txt):\n trows = txt.split(',')\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of rows')\n cnt = 0\n for ridx, trow in enumerate(trows):\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of columns row ', ridx)\n for cidx, c in enumerate(trow):\n if c != '.':\n v = int(c)\n cnt += 1\n self.set(ridx, cidx, v)\n\n def set(self, row, col, value):\n self.rows[row].entries[col].value = value\n\n def print(self):\n for ridx, r in enumerate(self.rows):\n for cidx, c in enumerate(r.entries):\n print('.' if c.value is None else c.value, end='')\n if (cidx + 1) % SudukoBoard.side == 0:\n print('|', end='')\n print()\n if (ridx + 1) % SudukoBoard.side == 0:\n print('{}'.format('-' * (SudukoBoard.sz + SudukoBoard.side)))\n\n def solve(self, depth=0, guesses=[]):\n for i in range(1000):\n print('Iteration ', depth, i)\n open = [Counter([len(c.couldbelist()) for c in self.cells])]\n print('open cells', open)\n for c in self.cells:\n if c.open != 1:\n continue\n if c.open != len(c.couldbelist()):\n pass\n value = c.couldbelist()\n c.set(value)\n if self.open > 0 and not 1 in open:\n print('We have to guess depth {} and {} cells open'.format(\n depth, self.open))\n bestguess = []\n for c in self.cells:\n for guess in c.couldbelist():\n other = deepcopy(self)\n try:\n other.set(c.row, c.col, guess)\n bestguess.append((other.open, (c.row, c.col,\n guess)))\n except ValueError as e:\n pass\n except Exception as e:\n print_exc()\n for open, (row, col, guess) in sorted(bestguess):\n print('Best guess ', row, col, guess, depth)\n other = deepcopy(self)\n other.set(row, col, guess)\n soln, soln_guesses = other.solve(depth + 1, guesses + [\n (row, col, guess)])\n if soln.open == 0:\n print('guess return')\n return soln, soln_guesses\n return self, guesses\n\n def leftopen(self):\n cnt = 0\n for c in self.cells:\n if c.value is None:\n cnt += 1\n if cnt != self.open:\n assert 'BAD'\n return cnt\n\n\nif __name__ == '__main__':\n board = SudukoBoard()\n evil = (\n '..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1..'\n )\n evil2 = (\n '..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3..'\n )\n medium = (\n '8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2'\n )\n hard = (\n '......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......'\n )\n easy = (\n '.7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1.'\n )\n board.setup(evil2)\n board.print()\n print()\n soln, guesses = board.solve()\n print('Final : guesses', guesses)\n soln.print()\n pass\n",
"step-4": "from collections import Counter\nfrom copy import deepcopy\nfrom itertools import count\nfrom traceback import print_exc\n\n\nclass SudukoBoard:\n side = 3\n sz = side * side\n\n\n class Cell:\n\n def __init__(self, board, row, col):\n self._values = [None] * SudukoBoard.sz\n self._value = None\n self.sets = []\n self.row = row\n self.col = col\n self.open = SudukoBoard.sz\n self.board = board\n\n def add_set(self, set):\n self.sets.append(set)\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if self._value is not None and self._value != value:\n raise ValueError('Conflicting value for cell', self.row,\n self.col, self._value, value)\n if self._value != value:\n self._value = value\n self._values = [False] * SudukoBoard.sz\n self._values[value - 1] = True\n self.open = 0\n self.board.open -= 1\n for s in self.sets:\n for c in s.entries:\n if c != self:\n c.cantbe(value)\n\n def cantbe(self, value):\n if self._values[value - 1] == True:\n raise ValueError('Conflicting cant be for cell, already set',\n self.row, self.col, self._value, value)\n if self._values[value - 1] != False:\n self._values[value - 1] = False\n self.open -= 1\n cnt = 0\n nidx = None\n for idx, v in enumerate(self._values):\n if v is None:\n cnt += 1\n nidx = idx\n if cnt == 1:\n self.value = nidx + 1\n\n def couldbe(self, value):\n return self._values[value - 1]\n\n def couldbelist(self):\n return [(idx + 1) for idx, x in enumerate(self._values) if x is\n None]\n\n\n class Set:\n\n def __init__(self):\n self.entries = []\n\n def add_cell(self, cell):\n self.entries.append(cell)\n cell.add_set(self)\n\n def update(self, entry):\n value = entry.value\n for other in self.entries:\n if other == entry:\n continue\n if other.value == value:\n raise Exception('Illegal value')\n else:\n other.value = not value\n\n def __init__(self):\n self.initial = 0\n self.open = SudukoBoard.sz ** 2\n self.cells = []\n self.rows = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.cols = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.blks = [SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n s3 = SudukoBoard.side * SudukoBoard.sz\n for i in range(SudukoBoard.sz ** 2):\n cell = SudukoBoard.Cell(self, i // SudukoBoard.sz, i %\n SudukoBoard.sz)\n self.cells.append(cell)\n for cell in self.cells:\n self.rows[cell.row].add_cell(cell)\n self.cols[cell.col].add_cell(cell)\n self.blks[cell.row // SudukoBoard.side + cell.col //\n SudukoBoard.side * SudukoBoard.side].add_cell(cell)\n\n def setup(self, txt):\n trows = txt.split(',')\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of rows')\n cnt = 0\n for ridx, trow in enumerate(trows):\n if len(trows) != SudukoBoard.sz:\n raise Exception('Incorrect number of columns row ', ridx)\n for cidx, c in enumerate(trow):\n if c != '.':\n v = int(c)\n cnt += 1\n self.set(ridx, cidx, v)\n\n def set(self, row, col, value):\n self.rows[row].entries[col].value = value\n\n def print(self):\n for ridx, r in enumerate(self.rows):\n for cidx, c in enumerate(r.entries):\n print('.' if c.value is None else c.value, end='')\n if (cidx + 1) % SudukoBoard.side == 0:\n print('|', end='')\n print()\n if (ridx + 1) % SudukoBoard.side == 0:\n print('{}'.format('-' * (SudukoBoard.sz + SudukoBoard.side)))\n\n def solve(self, depth=0, guesses=[]):\n for i in range(1000):\n print('Iteration ', depth, i)\n open = [Counter([len(c.couldbelist()) for c in self.cells])]\n print('open cells', open)\n for c in self.cells:\n if c.open != 1:\n continue\n if c.open != len(c.couldbelist()):\n pass\n value = c.couldbelist()\n c.set(value)\n if self.open > 0 and not 1 in open:\n print('We have to guess depth {} and {} cells open'.format(\n depth, self.open))\n bestguess = []\n for c in self.cells:\n for guess in c.couldbelist():\n other = deepcopy(self)\n try:\n other.set(c.row, c.col, guess)\n bestguess.append((other.open, (c.row, c.col,\n guess)))\n except ValueError as e:\n pass\n except Exception as e:\n print_exc()\n for open, (row, col, guess) in sorted(bestguess):\n print('Best guess ', row, col, guess, depth)\n other = deepcopy(self)\n other.set(row, col, guess)\n soln, soln_guesses = other.solve(depth + 1, guesses + [\n (row, col, guess)])\n if soln.open == 0:\n print('guess return')\n return soln, soln_guesses\n return self, guesses\n\n def leftopen(self):\n cnt = 0\n for c in self.cells:\n if c.value is None:\n cnt += 1\n if cnt != self.open:\n assert 'BAD'\n return cnt\n\n\nif __name__ == '__main__':\n board = SudukoBoard()\n evil = (\n '..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1..'\n )\n evil2 = (\n '..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3..'\n )\n medium = (\n '8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2'\n )\n hard = (\n '......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......'\n )\n easy = (\n '.7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1.'\n )\n board.setup(evil2)\n board.print()\n print()\n soln, guesses = board.solve()\n print('Final : guesses', guesses)\n soln.print()\n pass\n",
"step-5": "from collections import Counter\nfrom copy import deepcopy\nfrom itertools import count\nfrom traceback import print_exc\n\n#https://www.websudoku.com/?level=4\n\nclass SudukoBoard:\n side=3\n sz=side*side\n class Cell:\n def __init__(self,board,row,col):\n self._values= [None] * SudukoBoard.sz\n self._value=None\n self.sets=[]\n self.row=row\n self.col=col\n self.open=SudukoBoard.sz\n self.board=board\n\n def add_set(self,set):\n self.sets.append(set)\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self,value):\n if self._value is not None and self._value!=value:\n raise ValueError(\"Conflicting value for cell\",self.row,self.col,self._value,value)\n if self._value != value:\n self._value=value\n self._values=[False]*SudukoBoard.sz\n self._values[value-1]=True\n self.open=0\n self.board.open-=1\n for s in self.sets:\n for c in s.entries:\n if c!=self:\n c.cantbe(value)\n\n def cantbe(self, value):\n if self._values[value - 1] == True:\n raise ValueError(\"Conflicting cant be for cell, already set\",self.row,self.col,self._value,value)\n if self._values[value-1] != False:\n self._values[value-1]=False\n self.open -=1\n cnt=0\n nidx=None\n for idx,v in enumerate(self._values):\n if v is None:\n cnt+=1\n nidx=idx\n if cnt==1:\n self.value=nidx+1\n\n def couldbe(self, value):\n return self._values[value - 1]\n\n def couldbelist(self):\n return [idx+1 for idx,x in enumerate(self._values) if x is None]\n\n class Set:\n def __init__(self):\n self.entries=[]\n\n def add_cell(self,cell):\n self.entries.append(cell)\n cell.add_set(self)\n\n def update(self,entry):\n value=entry.value\n for other in self.entries:\n if other==entry:\n continue\n if other.value == value:\n raise Exception(\"Illegal value\")\n else:\n other.value=not value\n\n def __init__(self):\n self.initial=0\n self.open=SudukoBoard.sz**2\n self.cells=[]\n self.rows=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.cols=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n self.blks=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]\n s3=SudukoBoard.side*SudukoBoard.sz\n for i in range(SudukoBoard.sz**2):\n cell=SudukoBoard.Cell(self,i//SudukoBoard.sz,i%SudukoBoard.sz)\n self.cells.append(cell)\n for cell in self.cells:\n self.rows[cell.row].add_cell(cell)\n self.cols[cell.col].add_cell(cell)\n self.blks[(cell.row)//SudukoBoard.side+((cell.col)//SudukoBoard.side)*SudukoBoard.side].add_cell(cell)\n\n def setup(self,txt):\n trows=txt.split(\",\")\n if len(trows)!=SudukoBoard.sz:\n raise Exception(\"Incorrect number of rows\")\n cnt=0\n for ridx,trow in enumerate(trows):\n if len(trows) != SudukoBoard.sz:\n raise Exception(\"Incorrect number of columns row \",ridx)\n for cidx,c in enumerate(trow):\n if c != '.':\n v=int(c)\n cnt+=1\n self.set(ridx,cidx,v)\n # print(\"Set \",ridx+1,cidx+1, \" tot \",cnt,\" left \",self.open,\n # \" auto \",SudukoBoard.sz**2-self.open-cnt)\n # self.print()\n\n def set(self,row,col,value):\n self.rows[row].entries[col].value=value\n\n def print(self):\n for ridx,r in enumerate(self.rows):\n for cidx,c in enumerate(r.entries):\n print(\".\" if c.value is None else c.value,end='')\n if (cidx+1)%SudukoBoard.side == 0:\n print(\"|\",end='')\n print()\n if (ridx+1)%SudukoBoard.side == 0:\n print(\"{}\".format(\"-\"*(SudukoBoard.sz+SudukoBoard.side)))\n\n def solve(self,depth=0,guesses=[]):\n for i in range(1000):\n print(\"Iteration \",depth,i)\n # for c in self.cells:\n # print(c.row,c.col,c.couldbelist(),c._value,c._values)\n open=[Counter([len(c.couldbelist()) for c in self.cells])]\n print(\"open cells\",open)\n for c in self.cells:\n if c.open!=1:\n continue\n if c.open != len(c.couldbelist()):\n pass\n value=c.couldbelist()\n c.set(value)\n\n if self.open >0 and not 1 in open:\n print(\"We have to guess depth {} and {} cells open\".format(depth,self.open))\n bestguess=[]\n for c in self.cells:\n for guess in c.couldbelist():\n other=deepcopy(self)\n try:\n other.set(c.row,c.col,guess)\n bestguess.append((other.open,(c.row,c.col,guess)))\n except ValueError as e:\n pass\n except Exception as e:\n print_exc()\n for open,(row,col,guess) in sorted(bestguess):\n print(\"Best guess \",row,col,guess,depth)\n other = deepcopy(self)\n other.set(row,col,guess)\n soln,soln_guesses = other.solve(depth + 1,guesses+[(row,col,guess)])\n if soln.open == 0:\n print(\"guess return\")\n return soln,soln_guesses\n # if self.open == 0:\n # print(\"Solved with {} guesses {}\".format(depth,guesses))\n # self.print()\n return self,guesses\n\n\n\n def leftopen(self):\n cnt=0\n for c in self.cells:\n if c.value is None:\n cnt+=1\n if cnt != self.open:\n assert \"BAD\"\n return cnt\nif __name__ == \"__main__\":\n board=SudukoBoard()\n evil=\"..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1..\"\n evil2=\"..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3..\"\n medium=\"8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2\"\n hard=\"......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......\"\n easy=\".7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1.\"\n board.setup(evil2)\n board.print()\n print()\n soln,guesses=board.solve()\n print(\"Final : guesses\",guesses)\n soln.print()\n pass",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
driver.implicitly_wait(10)
driver.maximize_window()
driver.get('http://demo.automationtesting.in/Register.html')
<|reserved_special_token_0|>
actions.move_to_element(interactions).move_to_element(drag).move_to_element(
static).click().perform()
time.sleep(5)
driver.get('http://testautomationpractice.blogspot.com/')
<|reserved_special_token_0|>
actions.double_click(ele).perform()
time.sleep(5)
driver.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.implicitly_wait(10)
driver.maximize_window()
driver.get('http://demo.automationtesting.in/Register.html')
interactions = driver.find_element_by_xpath(
"//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]")
drag = driver.find_element_by_xpath(
"//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]")
static = driver.find_element_by_xpath(
"//ul[@class='childmenu ']//a[contains(text(),'Static ')]")
actions = ActionChains(driver)
actions.move_to_element(interactions).move_to_element(drag).move_to_element(
static).click().perform()
time.sleep(5)
driver.get('http://testautomationpractice.blogspot.com/')
ele = driver.find_element_by_xpath("//*[@id='HTML10']/div[1]/button")
actions.double_click(ele).perform()
time.sleep(5)
driver.close()
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.implicitly_wait(10)
driver.maximize_window()
driver.get('http://demo.automationtesting.in/Register.html')
interactions = driver.find_element_by_xpath(
"//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]")
drag = driver.find_element_by_xpath(
"//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]")
static = driver.find_element_by_xpath(
"//ul[@class='childmenu ']//a[contains(text(),'Static ')]")
actions = ActionChains(driver)
actions.move_to_element(interactions).move_to_element(drag).move_to_element(
static).click().perform()
time.sleep(5)
driver.get('http://testautomationpractice.blogspot.com/')
ele = driver.find_element_by_xpath("//*[@id='HTML10']/div[1]/button")
actions.double_click(ele).perform()
time.sleep(5)
driver.close()
<|reserved_special_token_1|>
from selenium import webdriver;
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.implicitly_wait(10)
driver.maximize_window()
driver.get("http://demo.automationtesting.in/Register.html")
interactions = driver.find_element_by_xpath("//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]")
drag = driver.find_element_by_xpath("//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]")
static = driver.find_element_by_xpath("//ul[@class='childmenu ']//a[contains(text(),'Static ')]")
actions = ActionChains(driver)
actions.move_to_element(interactions).move_to_element(drag).move_to_element(static).click().perform()
time.sleep(5)
driver.get("http://testautomationpractice.blogspot.com/")
ele = driver.find_element_by_xpath("//*[@id='HTML10']/div[1]/button")
actions.double_click(ele).perform()
time.sleep(5)
driver.close()
|
flexible
|
{
"blob_id": "1a1a217b382f3c58c6c4cd3c1c3f556ae945f5a7",
"index": 7850,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get('http://demo.automationtesting.in/Register.html')\n<mask token>\nactions.move_to_element(interactions).move_to_element(drag).move_to_element(\n static).click().perform()\ntime.sleep(5)\ndriver.get('http://testautomationpractice.blogspot.com/')\n<mask token>\nactions.double_click(ele).perform()\ntime.sleep(5)\ndriver.close()\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get('http://demo.automationtesting.in/Register.html')\ninteractions = driver.find_element_by_xpath(\n \"//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]\")\ndrag = driver.find_element_by_xpath(\n \"//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]\")\nstatic = driver.find_element_by_xpath(\n \"//ul[@class='childmenu ']//a[contains(text(),'Static ')]\")\nactions = ActionChains(driver)\nactions.move_to_element(interactions).move_to_element(drag).move_to_element(\n static).click().perform()\ntime.sleep(5)\ndriver.get('http://testautomationpractice.blogspot.com/')\nele = driver.find_element_by_xpath(\"//*[@id='HTML10']/div[1]/button\")\nactions.double_click(ele).perform()\ntime.sleep(5)\ndriver.close()\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.keys import Keys\nimport time\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get('http://demo.automationtesting.in/Register.html')\ninteractions = driver.find_element_by_xpath(\n \"//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]\")\ndrag = driver.find_element_by_xpath(\n \"//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]\")\nstatic = driver.find_element_by_xpath(\n \"//ul[@class='childmenu ']//a[contains(text(),'Static ')]\")\nactions = ActionChains(driver)\nactions.move_to_element(interactions).move_to_element(drag).move_to_element(\n static).click().perform()\ntime.sleep(5)\ndriver.get('http://testautomationpractice.blogspot.com/')\nele = driver.find_element_by_xpath(\"//*[@id='HTML10']/div[1]/button\")\nactions.double_click(ele).perform()\ntime.sleep(5)\ndriver.close()\n",
"step-5": "from selenium import webdriver;\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get(\"http://demo.automationtesting.in/Register.html\")\ninteractions = driver.find_element_by_xpath(\"//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]\")\ndrag = driver.find_element_by_xpath(\"//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]\")\nstatic = driver.find_element_by_xpath(\"//ul[@class='childmenu ']//a[contains(text(),'Static ')]\")\nactions = ActionChains(driver)\nactions.move_to_element(interactions).move_to_element(drag).move_to_element(static).click().perform()\ntime.sleep(5)\ndriver.get(\"http://testautomationpractice.blogspot.com/\")\nele = driver.find_element_by_xpath(\"//*[@id='HTML10']/div[1]/button\")\nactions.double_click(ele).perform()\ntime.sleep(5)\ndriver.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import pickle
from matplotlib import pyplot as plt
cwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',
'CrypOpt_RiskNeutralDensity')
data_path = os.path.join(cwd, 'data') + '/'
day = '2020-03-11'
res = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))
# ---------------------------------------------------------------------- SMILES
fig1, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')
ax.plot(res[key]['M'], res[key]['smile'])
ax.text(0.99, 0.99, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes)
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)
# ------------------------------------------------------------------------ RNDs
fig2, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['K'][::-1], res[key]['q'])
ax.text(0.99, 0.99, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('risk neutral density')
axes.flatten()[4].set_ylabel('risk neutral density')
axes.flatten()[4].set_xlabel('spot price')
axes.flatten()[5].set_xlabel('spot price')
axes.flatten()[6].set_xlabel('spot price')
axes.flatten()[7].set_xlabel('spot price')
plt.tight_layout()
fig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)
# ----------------------------------------------------------------- DERIVATIVES
fig3, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['M'], res[key]['smile'])
ax.plot(res[key]['M'], res[key]['first'])
ax.plot(res[key]['M'], res[key]['second'])
ax.text(0.99, 0.01, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='bottom',
transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)), transparent=True)
# ----------------------------------------------------------------- TAU PROCESS
for key in res:
s = res[key]
fig4, axes = plt.subplots(1,3, figsize=(10,4))
ax = axes[0]
ax.plot(s['df'].M, s['df'].iv, '.', c='r')
ax.plot(s['M'], s['smile'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[1]
ax.plot(s['M'], s['smile'])
ax.plot(s['M'], s['first'])
ax.plot(s['M'], s['second'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[2]
ax.plot(s['S'], s['q'])
ax.set_xlabel('spot price')
ax.set_ylabel(r'risk neutral density')
ax.set_yticks([])
plt.tight_layout()
fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)), transparent=True)
|
normal
|
{
"blob_id": "a01f812584e4cee14c9fe15e9fb6ede4ae3e937a",
"index": 4953,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\n<mask token>\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\n<mask token>\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='bottom', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),\n transparent=True)\nfor key in res:\n s = res[key]\n fig4, axes = plt.subplots(1, 3, figsize=(10, 4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel('risk neutral density')\n ax.set_yticks([])\n plt.tight_layout()\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),\n transparent=True)\n",
"step-3": "<mask token>\ncwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',\n 'CrypOpt_RiskNeutralDensity')\ndata_path = os.path.join(cwd, 'data') + '/'\nday = '2020-03-11'\nres = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))\nfig1, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\nfig2, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\nfig3, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='bottom', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),\n transparent=True)\nfor key in res:\n s = res[key]\n fig4, axes = plt.subplots(1, 3, figsize=(10, 4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel('risk neutral density')\n ax.set_yticks([])\n plt.tight_layout()\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),\n transparent=True)\n",
"step-4": "import os\nimport pickle\nfrom matplotlib import pyplot as plt\ncwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',\n 'CrypOpt_RiskNeutralDensity')\ndata_path = os.path.join(cwd, 'data') + '/'\nday = '2020-03-11'\nres = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))\nfig1, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\nfig2, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\nfig3, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='bottom', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),\n transparent=True)\nfor key in res:\n s = res[key]\n fig4, axes = plt.subplots(1, 3, figsize=(10, 4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel('risk neutral density')\n ax.set_yticks([])\n plt.tight_layout()\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),\n transparent=True)\n",
"step-5": "import os\nimport pickle\nfrom matplotlib import pyplot as plt\n\ncwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',\n 'CrypOpt_RiskNeutralDensity')\ndata_path = os.path.join(cwd, 'data') + '/'\n\nday = '2020-03-11'\nres = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))\n\n\n# ---------------------------------------------------------------------- SMILES\nfig1, axes = plt.subplots(2,4, figsize=(10,7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, r'$\\tau$ = ' + str(key),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\n\n\n# ------------------------------------------------------------------------ RNDs\nfig2, axes = plt.subplots(2,4, figsize=(10,7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, r'$\\tau$ = ' + str(key),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\n\n\n# ----------------------------------------------------------------- DERIVATIVES\nfig3, axes = plt.subplots(2,4, figsize=(10,7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, r'$\\tau$ = ' + str(key),\n horizontalalignment='right',\n verticalalignment='bottom',\n transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)), transparent=True)\n\n\n# ----------------------------------------------------------------- TAU PROCESS\nfor key in res:\n s = res[key]\n\n fig4, axes = plt.subplots(1,3, figsize=(10,4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel(r'risk neutral density')\n ax.set_yticks([])\n\n plt.tight_layout()\n\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)), transparent=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#! /usr/bin/python3
import pprint
import tkinter as tk
from tkinter import messagebox
from PIL import Image
from tkinter import *
from prettytable import PrettyTable
import ttk
import os
import subprocess
import mysql.connector
from datetime import datetime
import time
db=mysql.connector.connect(host='localhost',user='root',passwd='PASSWORD',database='DATABASENAME')
cur=db.cursor()
root=Tk()
root.title("WELCOME TO AGRI MARKET")
#stored procedure
"""
DELIMITER $$
CREATE PROCEDURE getMonth(
IN month VARCHAR(2))
BEGIN
SELECT * FROM payment
WHERE p_date LIKE CONCAT('____-',month,'%');
END$$
DELIMITER ;
"""
T1,T2,T3=0,0,0
def First_page(root):
global T1,T2,T3
frame=Frame(root,height=500,width=800,bg='ivory')
frame.pack()
label=Label(root,text='WELCOME TO AGRI MARKET',font=('Times new roman',25))
label.place(x=200,y=50)
button=Button(root,text='LogIn',font=('times new roman',20),command=check_pass,bg='green')
button.place(x=350,y=350)
L1 = tk.Label(root, text="Username", font=("Arial Bold", 15), bg='ivory')
L1.place(x=150, y=200)
T1 = tk.Entry(root, width = 30, bd = 5)
T1.place(x=280, y=200)
L2 = tk.Label(root, text="Password", font=("Arial Bold", 15), bg='ivory')
L2.place(x=150, y=250)
T2 = tk.Entry(root, width = 30, show='*', bd = 5)
T2.place(x=280, y=250)
reg_button=Button(root,text='Register',font=("Arial Bold",15),bg='blue',command=create_pass)
reg_button.place(x=340,y=400)
def check_pass():
global root,T1,T2,T3
try:
with open('password.txt','r')as f:
lines=f.read()
if T1.get()+'='+T2.get() in lines and T1.get()!='' and T2.get()!='':
entity_page()
else:
label=Label(root,text='Invalid username or password.Try again',font=('times new roman',15))
label.place(x=200,y=100)
except:
label=Label(root,text='Invalid username or password.Try again',font=('times new roman',15))
label.place(x=200,y=100)
def create_pass():
global root,T1,T2,T3
#to clean up previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='ivory')
label.place(x=0,y=0)
#this window
L1 = tk.Label(root, text="Username", font=("Arial Bold", 15), bg='ivory')
L1.place(x=150, y=200)
T1 = tk.Entry(root, width = 30, bd = 5)
T1.place(x=380, y=200)
L2 = tk.Label(root, text="Password", font=("Arial Bold", 15), bg='ivory')
L2.place(x=150, y=250)
T2 = tk.Entry(root, width = 30, show='*', bd = 5)
T2.place(x=380, y=250)
L2 = tk.Label(root, text="Confirm Password", font=("Arial Bold", 15), bg='ivory')
L2.place(x=150, y=300)
T3 = tk.Entry(root, width = 30, show='*', bd = 5)
T3.place(x=380, y=300)
reg_button=Button(root,text='Done',font=("Arial Bold",15),bg='blue',command=add_pass)
reg_button.place(x=440,y=400)
def add_pass():
global root,T1,T2,T3
if T2.get()!=T3.get():
label=Label(root,text='Incorrect Password. Enter again',font=('times new roman',20))
label.place(x=100,y=100)
else:
try:
with open('password.txt','r')as f:
data=f.read()
with open('password.txt','w')as f:
f.write(data+'\n')
f.write(T1.get()+'='+T2.get())
entity_page()
except:
with open('password.txt','w')as f:
f.write(T1.get()+'='+T2.get())
entity_page()
def entity_page():
global root
#cleaning previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='ivory')
label.place(x=0,y=0)
#this window
label=Label(root,text='WELCOME TO AGRI MARKET ',font=('Times new roman',20),bg='blue')
label.place(x=200,y=20)
label=Label(root,text='Choose the Entity ',font=('Times new roman',20),bg='white')
label.place(x=250,y=100)
Button = tk.Button(root, text="Farmers", font=("Arial", 15),command=farmer)
Button.place(x=100, y=150+25)
Button = tk.Button(root, text="Company", font=("Arial", 15),command=company)
Button.place(x=300, y=150+25)
Button = tk.Button(root, text="Fertilizer", font=("Arial", 15),command=fertilizer)
Button.place(x=500, y=150+25)
Button = tk.Button(root, text="Order", font=("Arial", 15),command=orders)
Button.place(x=200, y=300+25)
Button = tk.Button(root, text="Payment", font=("Arial", 15),command=payment)
Button.place(x=400, y=300+25)
Button = tk.Button(root, text="GET BOOKING HISTORY", font=("Arial", 15),command=history)
Button.place(x=200, y=400+25)
#history
def history():
global root,cur,db
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
cur.execute("CALL getMonth(%s);",[datetime.today().strftime("%m")])
data=cur.fetchall()
label=Label(root,text="The Transaction History of this month",font=("Arial",15))
label.place(x=200,y=20)
button=Button(root,text='BACK',command=entity_page)
button.place(x=20,y=20)
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("trans_id",'p_f_id','p_date','p_amount','p_method'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('trans_id',text="Transaction Id")
table.heading('p_f_id',text="Farmer Id")
table.heading('p_date',text="Payment Date")
table.heading('p_amount',text="Amount")
table.heading('p_method',text="Payment Method")
#table.heading('f_address',text="Farmer Address")
table['show']='headings'
#table.column("f_id",width=100)
table.pack()
#cur.execute("SELECT * FROM payment;")
#data =cur.fetchall()
#db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
db.close()
db=mysql.connector.connect(host='localhost',user='root',passwd='bhushi',database='farmer_app')
cur=db.cursor()
#farmer page
def farmer():
global root
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Farmer Table',font=('Times new roman',15),bg='white')
label.place(x=350,y=10)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=entity_page)
Button.place(x=10, y=50)
Button = tk.Button(root, text="Insert", font=("Arial", 15),command=insert_farmer)
Button.place(x=110, y=50)
Button = tk.Button(root, text="Delete", font=("Arial", 15),command=delete_farmer)
Button.place(x=210, y=50)
Button = tk.Button(root, text="Update", font=("Arial", 15),command=update_farmer)
Button.place(x=310, y=50)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_farmer)
Button.place(x=410, y=50)
view_farmer()
def view_farmer():
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("f_id",'f_name','f_phone','f_mail','f_locality','f_address'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('f_id',text="Farmer Id")
table.heading('f_name',text="Farmer Name")
table.heading('f_phone',text="Farmer Phone")
table.heading('f_mail',text="Farmer Mail")
table.heading('f_locality',text="Farmer Locality")
table.heading('f_address',text="Farmer Address")
table['show']='headings'
table.column("f_id",width=100)
table.pack()
cur.execute("SELECT * FROM farmer;")
data =cur.fetchall()
db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
e1,e2,e3,e4,e5,e6=0,0,0,0,0,0
def insert_farmer():
global e1,e2,e3,e4,e5,e6
#clean the window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#create the window
label=Label(root,text='Farmer_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Farmer_name',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Farmer_phone',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Farmer_mail',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Farmer_locality',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
label=Label(root,text='Farmer_address',font=('Times new roman',20),bg='white')
label.place(x=50,y=270)
e1=Entry(root,width=50)
e2=Entry(root,width=50)
e3=Entry(root,width=50)
e4=Entry(root,width=50)
e5=Entry(root,width=50)
e6=Entry(root,width=50)
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
e6.place(x=350,y=270)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=farmer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=insert_farmer_command)
Button.place(x=400, y=400)
def insert_farmer_command():
global root
try:
sql="INSERT INTO farmer values(%s,%s,%s,%s,%s,%s);"
if len(e1.get())>3:
invalid('farmer')
else:
vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get(),e6.get()
cur.executemany(sql,[vals])
db.commit()
farmer()
except:
insert_farmer()
def invalid(page):
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
if page=='farmer':
label=Label(root,text='Enter valid farmer_id',font=('Times new roman',30),bg='white')
label.place(x=170,y=200)
button=Button(root,text='Re-enter',font=('Times new roman',20),command=insert_farmer)
button.place(x=300,y=400)
elif page=='company':
label=Label(root,text='Enter valid company_id',font=('Times new roman',30),bg='white')
label.place(x=170,y=200)
button=Button(root,text='Re-enter',font=('Times new roman',20),command=insert_company)
button.place(x=300,y=400)
def delete_farmer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Farmer Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=farmer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=delete_farmer_command)
Button.place(x=400, y=400)
def delete_farmer_command():
try:
sql="DELETE FROM farmer WHERE f_id=%s;"
cur.execute(sql,[e1.get()])
db.commit()
farmer()
except:
l=Label(root,text='Invalid Entry',font=('times new roman',15))
l.place(x=100,y=300)
def update_farmer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Farmer Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=update)
Button.place(x=300, y=400)
def update():
try:
global e1,e2,e3,e4,e5,e6
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
sql='SELECT * FROM farmer WHERE f_id=%s;'
vals=[e1.get()]
cur.execute(sql,vals)
label=Label(root,text='Farmer_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Farmer_name',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Farmer_phone',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Farmer_mail',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Farmer_locality',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
label=Label(root,text='Farmer_address',font=('Times new roman',20),bg='white')
label.place(x=50,y=270)
e1=Entry(root)
e2=Entry(root)
e3=Entry(root)
e4=Entry(root)
e5=Entry(root)
e6=Entry(root)
data=cur.fetchall()
arr=[e1,e2,e3,e4,e5,e6]
count=0
for val in data[0]:
arr[count].insert(0,val)
count+=1
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
e6.place(x=350,y=270)
label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command)
label.place(x=300,y=400)
except:
l=Label(root,text='Invalid Farmer_id',font=('times new roman',15))
l.place(x=100,y=300)
update_farmer()
def update_command():
try:
sql="UPDATE farmer SET f_name=%s,f_phone_no=%s,f_mail=%s,f_locality=%s,f_address=%s WHERE f_id=%s;"
vals=e2.get(),e3.get(),e4.get(),e5.get(),e6.get(),e1.get()
cur.executemany(sql,[vals])
db.commit()
farmer()
except:
update_farmer()
def search_farmer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Farmer Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=farmer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search)
Button.place(x=400, y=400)
def search():
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
try:
sql='SELECT * FROM farmer WHERE f_id=%s;'
val=[e1.get()]
cur.execute(sql,val)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=farmer)
Button.place(x=300, y=400)
for val in cur:
count=0
Y=50
names=['farmer id: ','farmer name: ','farmer phone: ','farmer mail: ','farmer locality: ','farmer address: ']
for i in val:
label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')
label.place(x=10,y=Y)
Y+=50
count+=1
db.commit()
except:
l=Label(root,text='Invalid Farmer Id',font=('times new roman',15))
l.place(x=100,y=300)
search_farmer()
#company page
def company():
global root
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Company Table',font=('Times new roman',15),bg='white')
label.place(x=350,y=10)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=entity_page)
Button.place(x=10, y=50)
Button = tk.Button(root, text="Insert", font=("Arial", 15),command=insert_company)
Button.place(x=110, y=50)
Button = tk.Button(root, text="Delete", font=("Arial", 15),command=delete_company)
Button.place(x=210, y=50)
Button = tk.Button(root, text="Update", font=("Arial", 15),command=update_company)
Button.place(x=310, y=50)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_company)
Button.place(x=410, y=50)
view_company()
def view_company():
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("c_id",'c_name','c_address'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('c_id',text="Company Id")
table.heading('c_name',text="Company Name")
table.heading('c_address',text="Company Address")
table['show']='headings'
table.column("c_id",width=100)
table.pack()
cur.execute("SELECT * FROM company;")
data =cur.fetchall()
db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
def insert_company():
global e1,e2,e3,e4,e5,e6
#clean the window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#create the window
label=Label(root,text='Company_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Company_name',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Company_address',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root,width=50)
e2=Entry(root,width=50)
e3=Entry(root,width=50)
e1.place(x=350,y=10)
e2.place(x=350,y=110)
e3.place(x=350,y=210)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=company)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=insert_company_command)
Button.place(x=400, y=400)
def insert_company_command():
try:
if len(e1.get())>3:
invalid("company")
else:
sql="INSERT INTO company values(%s,%s,%s);"
vals=e1.get(),e2.get(),e3.get()
cur.executemany(sql,[vals])
db.commit()
company()
except:
insert_company()
def delete_company():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Company Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=company)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=delete_company_command)
Button.place(x=400, y=400)
def delete_company_command():
try:
sql="DELETE FROM company WHERE c_id=%s;"
cur.execute(sql,[int(e1.get())])
db.commit()
company()
except:
l=Label(root,text='Invalid Entry',font=('times new roman',15))
l.place(x=100,y=300)
def update_company():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Company Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=update_c)
Button.place(x=300, y=400)
def update_c():
try:
global e1,e2,e3,e4,e5,e6
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
sql='SELECT * FROM company WHERE c_id=%s;'
vals=[e1.get()]
cur.execute(sql,vals)
label=Label(root,text='Company_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Company_name',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Company_address',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root)
e2=Entry(root)
e3=Entry(root)
data=cur.fetchall()
arr=[e1,e2,e3]
count=0
for val in data[0]:
arr[count].insert(0,val)
count+=1
e1.place(x=350,y=10)
e2.place(x=350,y=110)
e3.place(x=350,y=210)
label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_c)
label.place(x=300,y=400)
except:
l=Label(root,text='Invalid Farmer_id',font=('times new roman',15))
l.place(x=100,y=300)
update_company()
def update_command_c():
try:
sql="UPDATE company SET c_name=%s,c_address=%s WHERE c_id=%s;"
vals=e2.get(),e3.get(),e1.get()
cur.executemany(sql,[vals])
db.commit()
company()
except:
update_company()
def search_company():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Company Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=company)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_c)
Button.place(x=400, y=400)
def search_c():
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
try:
sql='SELECT * FROM company WHERE c_id=%s;'
val=[e1.get()]
cur.execute(sql,val)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=company)
Button.place(x=300, y=400)
for val in cur:
count=0
Y=50
names=['company id: ','company name: ','company address: ']
for i in val:
label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')
label.place(x=10,y=Y)
Y+=50
count+=1
db.commit()
except:
l=Label(root,text='Invalid Company Id',font=('times new roman',15))
l.place(x=100,y=300)
search_company()
#fertilizer page
def fertilizer():
global root
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Fertilizer Table',font=('Times new roman',15),bg='white')
label.place(x=350,y=10)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=entity_page)
Button.place(x=10, y=50)
Button = tk.Button(root, text="Insert", font=("Arial", 15),command=insert_fer)
Button.place(x=110, y=50)
Button = tk.Button(root, text="Delete", font=("Arial", 15),command=delete_fer)
Button.place(x=210, y=50)
Button = tk.Button(root, text="Update", font=("Arial", 15),command=update_fer)
Button.place(x=310, y=50)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_fer)
Button.place(x=410, y=50)
view_fer()
def view_fer():
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("fe_formula",'fe_name','fe_content','fe_price','company_id'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('fe_formula',text="Fertilizer Formula")
table.heading('fe_name',text="Fertilizer name")
table.heading('fe_content',text="Fertilizer content")
table.heading('fe_price',text="Fertilizer price")
table.heading('company_id',text="Company_id")
#table.heading('f_address',text="Farmer Address")
table['show']='headings'
#table.column("f_id",width=100)
table.pack()
cur.execute("SELECT * FROM fertilizer;")
data =cur.fetchall()
db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
e1,e2,e3,e4,e5,e6=0,0,0,0,0,0
def insert_fer():
global e1,e2,e3,e4,e5,e6
#clean the window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#create the window
label=Label(root,text='Fertlizer formula',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Fertlizer name',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Fertilizer content',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Fertlizer price',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Company id',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root,width=50)
e2=Entry(root,width=50)
e3=Entry(root,width=50)
e4=Entry(root,width=50)
e5=Entry(root,width=50)
#e6=Entry(root,width=50)
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
#e6.place(x=350,y=270)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=fertilizer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=insert_fer_command)
Button.place(x=400, y=400)
def insert_fer_command():
try:
sql="INSERT INTO fertilizer values(%s,%s,%s,%s,%s);"
vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get()
cur.executemany(sql,[vals])
db.commit()
fertilizer()
except:
insert_fer()
def delete_fer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Fertilizer formula:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=fertilizer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=delete_fer_command)
Button.place(x=400, y=400)
def delete_fer_command():
try:
sql="DELETE FROM fertilizer WHERE fe_formula=%s;"
cur.execute(sql,[e1.get()])
db.commit()
fertilizer()
except:
l=Label(root,text='Invalid Entry',font=('times new roman',15))
l.place(x=100,y=300)
def update_fer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Fertlizer formula:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=update_fe)
Button.place(x=300, y=400)
def update_fe():
try:
global e1,e2,e3,e4,e5,e6
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
sql='SELECT * FROM fertilizer WHERE fe_formula=%s;'
vals=[e1.get()]
cur.execute(sql,vals)
label=Label(root,text='Fertlizer formula',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Fertlizer name',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Fertlizer content',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Fertlizer price',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='comapny_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root)
e2=Entry(root)
e3=Entry(root)
e4=Entry(root)
e5=Entry(root)
#e6=Entry(root)
data=cur.fetchall()
arr=[e1,e2,e3,e4,e5,e6]
count=0
for val in data[0]:
arr[count].insert(0,val)
count+=1
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
#e6.place(x=350,y=270)
label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_fe)
label.place(x=300,y=400)
except:
l=Label(root,text='Invalid Farmer_id',font=('times new roman',15))
l.place(x=100,y=300)
update_fer()
def update_command_fe():
sql="UPDATE fertilizer SET fe_name=%s,fe_content=%s,fe_price=%s,company_id=%s WHERE fe_formula=%s;"
vals=e2.get(),e3.get(),e4.get(),e5.get(),e1.get()
cur.executemany(sql,[vals])
db.commit()
fertilizer()
def search_fer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Fertlizer formula:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=fertilizer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_fe)
Button.place(x=400, y=400)
def search_fe():
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
try:
sql='SELECT * FROM fertilizer WHERE fe_formula=%s;'
val=[e1.get()]
cur.execute(sql,val)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=fertilizer)
Button.place(x=300, y=400)
for val in cur:
count=0
Y=50
names=['fertilizer formula: ','fertilizer name: ','fertilizer content: ','fertilizer price: ','company_id: ']
for i in val:
label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')
label.place(x=10,y=Y)
Y+=50
count+=1
db.commit()
except:
l=Label(root,text='Invalid Fertilizer formula',font=('times new roman',15))
l.place(x=100,y=300)
search_fer()
#order page
def orders():
global root
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Orders Table',font=('Times new roman',15),bg='white')
label.place(x=350,y=10)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=entity_page)
Button.place(x=10, y=50)
Button = tk.Button(root, text="Insert", font=("Arial", 15),command=insert_ord)
Button.place(x=110, y=50)
Button = tk.Button(root, text="Delete", font=("Arial", 15),command=delete_ord)
Button.place(x=210, y=50)
Button = tk.Button(root, text="Update", font=("Arial", 15),command=update_ord)
Button.place(x=310, y=50)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_ord)
Button.place(x=410, y=50)
view_ord()
def view_ord():
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("or_id",'or_date','or_fid','or_formula','or_to'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('or_id',text="Order Id")
table.heading('or_date',text="Order Date")
table.heading('or_fid',text="Ordered Farmer Id")
table.heading('or_formula',text="Order (item)formula")
table.heading('or_to',text="Order to")
#table.heading('f_address',text="Farmer Address")
table['show']='headings'
#table.column("f_id",width=100)
table.pack()
cur.execute("SELECT * FROM orders;")
data =cur.fetchall()
db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
e1,e2,e3,e4,e5,e6=0,0,0,0,0,0
def insert_ord():
global e1,e2,e3,e4,e5,e6
#clean the window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#create the window
label=Label(root,text='Order Id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Order date',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Order FID',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Order formula',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Order to',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root,width=50)
e2=Entry(root,width=50)
e3=Entry(root,width=50)
e4=Entry(root,width=50)
e5=Entry(root,width=50)
#e6=Entry(root,width=50)
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e2.insert(0,datetime.now())
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
#e6.place(x=350,y=270)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=orders)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=insert_ord_command)
Button.place(x=400, y=400)
def insert_ord_command():
try:
sql="INSERT INTO orders values(%s,%s,%s,%s,%s);"
vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get()
cur.executemany(sql,[vals])
db.commit()
orders()
except:
insert_ord()
def delete_ord():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Order Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=orders)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=delete_ord_command)
Button.place(x=400, y=400)
def delete_ord_command():
try:
sql="DELETE FROM orders WHERE or_id=%s;"
cur.execute(sql,[e1.get()])
db.commit()
orders()
except:
l=Label(root,text='Invalid Entry',font=('times new roman',15))
l.place(x=100,y=300)
def update_ord():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Order Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=update_or)
Button.place(x=300, y=400)
def update_or():
try:
global e1,e2,e3,e4,e5,e6
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
sql='SELECT * FROM orders WHERE or_id=%s;'
vals=[e1.get()]
cur.execute(sql,vals)
label=Label(root,text='Order Id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Order Date',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Order f_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Order formula',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Order to',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root)
e2=Entry(root)
e3=Entry(root)
e4=Entry(root)
e5=Entry(root)
#e6=Entry(root)
data=cur.fetchall()
arr=[e1,e2,e3,e4,e5,e6]
count=0
for val in data[0]:
arr[count].insert(0,val)
count+=1
e1.place(x=350,y=10)
e2.place(x=350,y=60)
#e2.insert(0,datetime.now())
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
#e6.place(x=350,y=270)
label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_ord)
label.place(x=300,y=400)
except:
l=Label(root,text='Invalid Order_id',font=('times new roman',15))
l.place(x=100,y=300)
update_ord()
def update_command_ord():
sql="UPDATE orders SET or_date=%s,or_fid=%s,or_formula=%s,or_to=%s WHERE or_id=%s;"
vals=e2.get(),e3.get(),e4.get(),e5.get(),e1.get()
cur.executemany(sql,[vals])
db.commit()
orders()
def search_ord():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Order Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=orders)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_or)
Button.place(x=400, y=400)
def search_or():
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
try:
sql='SELECT * FROM orders WHERE or_id=%s;'
val=[e1.get()]
cur.execute(sql,val)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=orders)
Button.place(x=300, y=400)
for val in cur:
count=0
Y=50
names=['order Id: ','Order date: ','Order fid: ','Order formula: ','order to: ']
for i in val:
label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')
label.place(x=10,y=Y)
Y+=50
count+=1
db.commit()
except:
l=Label(root,text='Invalid order id',font=('times new roman',15))
l.place(x=100,y=300)
search_ord()
#payment page
def payment():
global root
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Payment Table',font=('Times new roman',15),bg='white')
label.place(x=350,y=10)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=entity_page)
Button.place(x=10, y=50)
Button = tk.Button(root, text="Insert", font=("Arial", 15),command=insert_pay)
Button.place(x=110, y=50)
Button = tk.Button(root, text="Delete", font=("Arial", 15),command=delete_pay)
Button.place(x=210, y=50)
Button = tk.Button(root, text="Update", font=("Arial", 15),command=update_pay)
Button.place(x=310, y=50)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_pay)
Button.place(x=410, y=50)
view_pay()
def view_pay():
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("trans_id",'p_f_id','p_date','p_amount','p_method'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('trans_id',text="Transaction Id")
table.heading('p_f_id',text="Farmer Id")
table.heading('p_date',text="Payment Date")
table.heading('p_amount',text="Amount")
table.heading('p_method',text="Payment Method")
#table.heading('f_address',text="Farmer Address")
table['show']='headings'
#table.column("f_id",width=100)
table.pack()
cur.execute("SELECT * FROM payment;")
data =cur.fetchall()
db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
e1,e2,e3,e4,e5,e6=0,0,0,0,0,0
def insert_pay():
global e1,e2,e3,e4,e5,e6
#clean the window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#create the window
label=Label(root,text='Transaction Id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Transaction farmer id',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Transaction date',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Transaction amount',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Transaction method',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root,width=50)
e2=Entry(root,width=50)
e3=Entry(root,width=50)
e4=Entry(root,width=50)
e5=Entry(root,width=50)
#e6=Entry(root,width=50)
e1.place(x=350,y=10)
e2.place(x=350,y=60)
#e2.insert(0,datetime.now())
e3.place(x=350,y=110)
e3.insert(0,datetime.now())
e4.place(x=350,y=160)
#e5.place(x=350,y=210)
e5 = StringVar(root)
e5.set("Debit card") # default value
w= OptionMenu(root, e5, "Credit Card", "UPI", "Cheque","Cash")
w.place(x=350,y=210)
#mainloop()
#e6.place(x=350,y=270)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=payment)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=insert_pay_command)
Button.place(x=400, y=400)
def insert_pay_command():
try:
sql="INSERT INTO payment values(%s,%s,%s,%s,%s);"
vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get()
cur.executemany(sql,[vals])
db.commit()
payment()
except:
insert_pay()
def delete_pay():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Transaction Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=payment)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=delete_pay_command)
Button.place(x=400, y=400)
def delete_pay_command():
try:
sql="DELETE FROM payment WHERE trans_id=%s;"
cur.execute(sql,[e1.get()])
db.commit()
payment()
except:
l=Label(root,text='Invalid Entry',font=('times new roman',15))
l.place(x=100,y=300)
def update_pay():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Transaction Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=update_pa)
Button.place(x=300, y=400)
def update_pa():
try:
global e1,e2,e3,e4,e5,e6
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
sql='SELECT * FROM payment WHERE trans_id=%s;'
vals=[e1.get()]
cur.execute(sql,vals)
label=Label(root,text='Transaction Id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Farmer_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Transaction date',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Transaction amount',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Transaction method',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root)
e2=Entry(root)
e3=Entry(root)
e4=Entry(root)
e5=Entry(root)
#e6=Entry(root)
data=cur.fetchall()
arr=[e1,e2,e3,e4,e5,e6]
count=0
for val in data[0]:
if count==5:
continue
arr[count].insert(0,val)
count+=1
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e3.place(x=350,y=110)
#e3.insert(0,datetime.now())
e4.place(x=350,y=160)
#e5.place(x=350,y=210)
#e6.place(x=350,y=270)
e5 = StringVar(root)
e5.set("Debit card") # default value
w= OptionMenu(root, e5, "Credit Card", "UPI", "Cheque","Cash")
w.place(x=350,y=210)
label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_pay)
label.place(x=300,y=400)
except:
l=Label(root,text='Invalid Order_id',font=('times new roman',15))
l.place(x=100,y=300)
update_pay()
def update_command_pay():
sql="UPDATE payment SET p_f_id=%s,p_date=%s,p_amount=%s,p_method=%s WHERE trans_id=%s;"
vals=e2.get(),e3.get(),e4.get(),e5.get(),e1.get()
cur.executemany(sql,[vals])
db.commit()
payment()
def search_pay():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window2
label=Label(root,text='Transaction Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=payment)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_pa)
Button.place(x=400, y=400)
def search_pa():
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
try:
sql='SELECT * FROM payment WHERE trans_id=%s;'
val=[e1.get()]
cur.execute(sql,val)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=payment)
Button.place(x=300, y=400)
for val in cur:
count=0
Y=50
names=['Transaction Id: ','Transaction fid: ','Transaction date: ','Transaction amount: ','Transaction method: ']
for i in val:
label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')
label.place(x=10,y=Y)
Y+=50
count+=1
db.commit()
except:
l=Label(root,text='Invalid order id',font=('times new roman',15))
l.place(x=100,y=300)
search_pay()
First_page(root)
root.mainloop()
|
normal
|
{
"blob_id": "9f3fcc6e097e37479e3ccf1385f20d70d7c3b6c7",
"index": 8228,
"step-1": "<mask token>\n\n\ndef First_page(root):\n global T1, T2, T3\n frame = Frame(root, height=500, width=800, bg='ivory')\n frame.pack()\n label = Label(root, text='WELCOME TO AGRI MARKET', font=(\n 'Times new roman', 25))\n label.place(x=200, y=50)\n button = Button(root, text='LogIn', font=('times new roman', 20),\n command=check_pass, bg='green')\n button.place(x=350, y=350)\n L1 = tk.Label(root, text='Username', font=('Arial Bold', 15), bg='ivory')\n L1.place(x=150, y=200)\n T1 = tk.Entry(root, width=30, bd=5)\n T1.place(x=280, y=200)\n L2 = tk.Label(root, text='Password', font=('Arial Bold', 15), bg='ivory')\n L2.place(x=150, y=250)\n T2 = tk.Entry(root, width=30, show='*', bd=5)\n T2.place(x=280, y=250)\n reg_button = Button(root, text='Register', font=('Arial Bold', 15), bg=\n 'blue', command=create_pass)\n reg_button.place(x=340, y=400)\n\n\n<mask token>\n\n\ndef create_pass():\n global root, T1, T2, T3\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'ivory')\n label.place(x=0, y=0)\n L1 = tk.Label(root, text='Username', font=('Arial Bold', 15), bg='ivory')\n L1.place(x=150, y=200)\n T1 = tk.Entry(root, width=30, bd=5)\n T1.place(x=380, y=200)\n L2 = tk.Label(root, text='Password', font=('Arial Bold', 15), bg='ivory')\n L2.place(x=150, y=250)\n T2 = tk.Entry(root, width=30, show='*', bd=5)\n T2.place(x=380, y=250)\n L2 = tk.Label(root, text='Confirm Password', font=('Arial Bold', 15),\n bg='ivory')\n L2.place(x=150, y=300)\n T3 = tk.Entry(root, width=30, show='*', bd=5)\n T3.place(x=380, y=300)\n reg_button = Button(root, text='Done', font=('Arial Bold', 15), bg=\n 'blue', command=add_pass)\n reg_button.place(x=440, y=400)\n\n\ndef add_pass():\n global root, T1, T2, T3\n if T2.get() != T3.get():\n label = Label(root, text='Incorrect Password. Enter again', font=(\n 'times new roman', 20))\n label.place(x=100, y=100)\n else:\n try:\n with open('password.txt', 'r') as f:\n data = f.read()\n with open('password.txt', 'w') as f:\n f.write(data + '\\n')\n f.write(T1.get() + '=' + T2.get())\n entity_page()\n except:\n with open('password.txt', 'w') as f:\n f.write(T1.get() + '=' + T2.get())\n entity_page()\n\n\ndef entity_page():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'ivory')\n label.place(x=0, y=0)\n label = Label(root, text='WELCOME TO AGRI MARKET ', font=(\n 'Times new roman', 20), bg='blue')\n label.place(x=200, y=20)\n label = Label(root, text='Choose the Entity ', font=('Times new roman',\n 20), bg='white')\n label.place(x=250, y=100)\n Button = tk.Button(root, text='Farmers', font=('Arial', 15), command=farmer\n )\n Button.place(x=100, y=150 + 25)\n Button = tk.Button(root, text='Company', font=('Arial', 15), command=\n company)\n Button.place(x=300, y=150 + 25)\n Button = tk.Button(root, text='Fertilizer', font=('Arial', 15), command\n =fertilizer)\n Button.place(x=500, y=150 + 25)\n Button = tk.Button(root, text='Order', font=('Arial', 15), command=orders)\n Button.place(x=200, y=300 + 25)\n Button = tk.Button(root, text='Payment', font=('Arial', 15), command=\n payment)\n Button.place(x=400, y=300 + 25)\n Button = tk.Button(root, text='GET BOOKING HISTORY', font=('Arial', 15),\n command=history)\n Button.place(x=200, y=400 + 25)\n\n\ndef history():\n global root, cur, db\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n cur.execute('CALL getMonth(%s);', [datetime.today().strftime('%m')])\n data = cur.fetchall()\n label = Label(root, text='The Transaction History of this month', font=\n ('Arial', 15))\n label.place(x=200, y=20)\n button = Button(root, text='BACK', command=entity_page)\n button.place(x=20, y=20)\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('trans_id', 'p_f_id', 'p_date',\n 'p_amount', 'p_method'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('trans_id', text='Transaction Id')\n table.heading('p_f_id', text='Farmer Id')\n table.heading('p_date', text='Payment Date')\n table.heading('p_amount', text='Amount')\n table.heading('p_method', text='Payment Method')\n table['show'] = 'headings'\n table.pack()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n db.close()\n db = mysql.connector.connect(host='localhost', user='root', passwd=\n 'bhushi', database='farmer_app')\n cur = db.cursor()\n\n\n<mask token>\n\n\ndef view_farmer():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('f_id', 'f_name', 'f_phone',\n 'f_mail', 'f_locality', 'f_address'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('f_id', text='Farmer Id')\n table.heading('f_name', text='Farmer Name')\n table.heading('f_phone', text='Farmer Phone')\n table.heading('f_mail', text='Farmer Mail')\n table.heading('f_locality', text='Farmer Locality')\n table.heading('f_address', text='Farmer Address')\n table['show'] = 'headings'\n table.column('f_id', width=100)\n table.pack()\n cur.execute('SELECT * FROM farmer;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_farmer():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer_id', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=10)\n label = Label(root, text='Farmer_name', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Farmer_phone', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Farmer_mail', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Farmer_locality', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=210)\n label = Label(root, text='Farmer_address', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=270)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e6 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n e6.place(x=350, y=270)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=farmer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_farmer_command)\n Button.place(x=400, y=400)\n\n\n<mask token>\n\n\ndef invalid(page):\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n if page == 'farmer':\n label = Label(root, text='Enter valid farmer_id', font=(\n 'Times new roman', 30), bg='white')\n label.place(x=170, y=200)\n button = Button(root, text='Re-enter', font=('Times new roman', 20),\n command=insert_farmer)\n button.place(x=300, y=400)\n elif page == 'company':\n label = Label(root, text='Enter valid company_id', font=(\n 'Times new roman', 30), bg='white')\n label.place(x=170, y=200)\n button = Button(root, text='Re-enter', font=('Times new roman', 20),\n command=insert_company)\n button.place(x=300, y=400)\n\n\n<mask token>\n\n\ndef delete_farmer_command():\n try:\n sql = 'DELETE FROM farmer WHERE f_id=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n farmer()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\ndef update_farmer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer Id:', font=('Times new roman', 20), bg\n ='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=update)\n Button.place(x=300, y=400)\n\n\n<mask token>\n\n\ndef update_command():\n try:\n sql = (\n 'UPDATE farmer SET f_name=%s,f_phone_no=%s,f_mail=%s,f_locality=%s,f_address=%s WHERE f_id=%s;'\n )\n vals = e2.get(), e3.get(), e4.get(), e5.get(), e6.get(), e1.get()\n cur.executemany(sql, [vals])\n db.commit()\n farmer()\n except:\n update_farmer()\n\n\ndef search_farmer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer Id:', font=('Times new roman', 20), bg\n ='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=farmer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=search)\n Button.place(x=400, y=400)\n\n\ndef search():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM farmer WHERE f_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=farmer)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['farmer id: ', 'farmer name: ', 'farmer phone: ',\n 'farmer mail: ', 'farmer locality: ', 'farmer address: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Farmer Id', font=('times new roman', 15))\n l.place(x=100, y=300)\n search_farmer()\n\n\ndef company():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_company)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_company)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_company)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_company)\n Button.place(x=410, y=50)\n view_company()\n\n\ndef view_company():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('c_id', 'c_name', 'c_address'),\n xscrollcommand=x_scroll.set, yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('c_id', text='Company Id')\n table.heading('c_name', text='Company Name')\n table.heading('c_address', text='Company Address')\n table['show'] = 'headings'\n table.column('c_id', width=100)\n table.pack()\n cur.execute('SELECT * FROM company;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\ndef insert_company():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company_id', font=('Times new roman', 20), bg\n ='white')\n label.place(x=50, y=10)\n label = Label(root, text='Company_name', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Company_address', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=110)\n e3.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_company_command)\n Button.place(x=400, y=400)\n\n\ndef insert_company_command():\n try:\n if len(e1.get()) > 3:\n invalid('company')\n else:\n sql = 'INSERT INTO company values(%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get()\n cur.executemany(sql, [vals])\n db.commit()\n company()\n except:\n insert_company()\n\n\ndef delete_company():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Id:', font=('Times new roman', 20),\n bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_company_command)\n Button.place(x=400, y=400)\n\n\ndef delete_company_command():\n try:\n sql = 'DELETE FROM company WHERE c_id=%s;'\n cur.execute(sql, [int(e1.get())])\n db.commit()\n company()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\n<mask token>\n\n\ndef update_c():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM company WHERE c_id=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Company_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Company_name', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Company_address', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=110)\n e3.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_c)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Farmer_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_company()\n\n\n<mask token>\n\n\ndef search_company():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Id:', font=('Times new roman', 20),\n bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_c)\n Button.place(x=400, y=400)\n\n\ndef search_c():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM company WHERE c_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=company\n )\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['company id: ', 'company name: ', 'company address: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Company Id', font=('times new roman', 15)\n )\n l.place(x=100, y=300)\n search_company()\n\n\n<mask token>\n\n\ndef view_fer():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('fe_formula', 'fe_name',\n 'fe_content', 'fe_price', 'company_id'), xscrollcommand=x_scroll.\n set, yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('fe_formula', text='Fertilizer Formula')\n table.heading('fe_name', text='Fertilizer name')\n table.heading('fe_content', text='Fertilizer content')\n table.heading('fe_price', text='Fertilizer price')\n table.heading('company_id', text='Company_id')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM fertilizer;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_fer_command():\n try:\n sql = 'INSERT INTO fertilizer values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n fertilizer()\n except:\n insert_fer()\n\n\ndef delete_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertilizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_fer_command)\n Button.place(x=400, y=400)\n\n\ndef delete_fer_command():\n try:\n sql = 'DELETE FROM fertilizer WHERE fe_formula=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n fertilizer()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\ndef update_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertlizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=update_fe)\n Button.place(x=300, y=400)\n\n\ndef update_fe():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM fertilizer WHERE fe_formula=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Fertlizer formula', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Fertlizer name', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Fertlizer content', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Fertlizer price', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='comapny_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n e4 = Entry(root)\n e5 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3, e4, e5, e6]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_fe)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Farmer_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_fer()\n\n\n<mask token>\n\n\ndef search_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertlizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_fe)\n Button.place(x=400, y=400)\n\n\ndef search_fe():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM fertilizer WHERE fe_formula=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['fertilizer formula: ', 'fertilizer name: ',\n 'fertilizer content: ', 'fertilizer price: ', 'company_id: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Fertilizer formula', font=(\n 'times new roman', 15))\n l.place(x=100, y=300)\n search_fer()\n\n\ndef orders():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Orders Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_ord)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_ord)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_ord)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_ord)\n Button.place(x=410, y=50)\n view_ord()\n\n\ndef view_ord():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('or_id', 'or_date', 'or_fid',\n 'or_formula', 'or_to'), xscrollcommand=x_scroll.set, yscrollcommand\n =y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('or_id', text='Order Id')\n table.heading('or_date', text='Order Date')\n table.heading('or_fid', text='Ordered Farmer Id')\n table.heading('or_formula', text='Order (item)formula')\n table.heading('or_to', text='Order to')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM orders;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_ord():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=10)\n label = Label(root, text='Order date', font=('Times new roman', 20), bg\n ='white')\n label.place(x=50, y=60)\n label = Label(root, text='Order FID', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=110)\n label = Label(root, text='Order formula', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Order to', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e2.insert(0, datetime.now())\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_ord_command)\n Button.place(x=400, y=400)\n\n\ndef insert_ord_command():\n try:\n sql = 'INSERT INTO orders values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n orders()\n except:\n insert_ord()\n\n\ndef delete_ord():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id:', font=('Times new roman', 20), bg=\n 'tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_ord_command)\n Button.place(x=400, y=400)\n\n\ndef delete_ord_command():\n try:\n sql = 'DELETE FROM orders WHERE or_id=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n orders()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\n<mask token>\n\n\ndef update_or():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM orders WHERE or_id=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Order Id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Order Date', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Order f_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Order formula', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Order to', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n e4 = Entry(root)\n e5 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3, e4, e5, e6]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_ord)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Order_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_ord()\n\n\ndef update_command_ord():\n sql = (\n 'UPDATE orders SET or_date=%s,or_fid=%s,or_formula=%s,or_to=%s WHERE or_id=%s;'\n )\n vals = e2.get(), e3.get(), e4.get(), e5.get(), e1.get()\n cur.executemany(sql, [vals])\n db.commit()\n orders()\n\n\ndef search_ord():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id:', font=('Times new roman', 20), bg=\n 'tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_or)\n Button.place(x=400, y=400)\n\n\ndef search_or():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM orders WHERE or_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=orders)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['order Id: ', 'Order date: ', 'Order fid: ',\n 'Order formula: ', 'order to: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid order id', font=('times new roman', 15))\n l.place(x=100, y=300)\n search_ord()\n\n\ndef payment():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Payment Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_pay)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_pay)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_pay)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_pay)\n Button.place(x=410, y=50)\n view_pay()\n\n\ndef view_pay():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('trans_id', 'p_f_id', 'p_date',\n 'p_amount', 'p_method'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('trans_id', text='Transaction Id')\n table.heading('p_f_id', text='Farmer Id')\n table.heading('p_date', text='Payment Date')\n table.heading('p_amount', text='Amount')\n table.heading('p_method', text='Payment Method')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM payment;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_pay():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Transaction farmer id', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Transaction date', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Transaction amount', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Transaction method', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e3.insert(0, datetime.now())\n e4.place(x=350, y=160)\n e5 = StringVar(root)\n e5.set('Debit card')\n w = OptionMenu(root, e5, 'Credit Card', 'UPI', 'Cheque', 'Cash')\n w.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_pay_command)\n Button.place(x=400, y=400)\n\n\ndef insert_pay_command():\n try:\n sql = 'INSERT INTO payment values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n payment()\n except:\n insert_pay()\n\n\ndef delete_pay():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id:', font=('Times new roman', 20\n ), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_pay_command)\n Button.place(x=400, y=400)\n\n\n<mask token>\n\n\ndef search_pay():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id:', font=('Times new roman', 20\n ), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_pa)\n Button.place(x=400, y=400)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef First_page(root):\n global T1, T2, T3\n frame = Frame(root, height=500, width=800, bg='ivory')\n frame.pack()\n label = Label(root, text='WELCOME TO AGRI MARKET', font=(\n 'Times new roman', 25))\n label.place(x=200, y=50)\n button = Button(root, text='LogIn', font=('times new roman', 20),\n command=check_pass, bg='green')\n button.place(x=350, y=350)\n L1 = tk.Label(root, text='Username', font=('Arial Bold', 15), bg='ivory')\n L1.place(x=150, y=200)\n T1 = tk.Entry(root, width=30, bd=5)\n T1.place(x=280, y=200)\n L2 = tk.Label(root, text='Password', font=('Arial Bold', 15), bg='ivory')\n L2.place(x=150, y=250)\n T2 = tk.Entry(root, width=30, show='*', bd=5)\n T2.place(x=280, y=250)\n reg_button = Button(root, text='Register', font=('Arial Bold', 15), bg=\n 'blue', command=create_pass)\n reg_button.place(x=340, y=400)\n\n\n<mask token>\n\n\ndef create_pass():\n global root, T1, T2, T3\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'ivory')\n label.place(x=0, y=0)\n L1 = tk.Label(root, text='Username', font=('Arial Bold', 15), bg='ivory')\n L1.place(x=150, y=200)\n T1 = tk.Entry(root, width=30, bd=5)\n T1.place(x=380, y=200)\n L2 = tk.Label(root, text='Password', font=('Arial Bold', 15), bg='ivory')\n L2.place(x=150, y=250)\n T2 = tk.Entry(root, width=30, show='*', bd=5)\n T2.place(x=380, y=250)\n L2 = tk.Label(root, text='Confirm Password', font=('Arial Bold', 15),\n bg='ivory')\n L2.place(x=150, y=300)\n T3 = tk.Entry(root, width=30, show='*', bd=5)\n T3.place(x=380, y=300)\n reg_button = Button(root, text='Done', font=('Arial Bold', 15), bg=\n 'blue', command=add_pass)\n reg_button.place(x=440, y=400)\n\n\ndef add_pass():\n global root, T1, T2, T3\n if T2.get() != T3.get():\n label = Label(root, text='Incorrect Password. Enter again', font=(\n 'times new roman', 20))\n label.place(x=100, y=100)\n else:\n try:\n with open('password.txt', 'r') as f:\n data = f.read()\n with open('password.txt', 'w') as f:\n f.write(data + '\\n')\n f.write(T1.get() + '=' + T2.get())\n entity_page()\n except:\n with open('password.txt', 'w') as f:\n f.write(T1.get() + '=' + T2.get())\n entity_page()\n\n\ndef entity_page():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'ivory')\n label.place(x=0, y=0)\n label = Label(root, text='WELCOME TO AGRI MARKET ', font=(\n 'Times new roman', 20), bg='blue')\n label.place(x=200, y=20)\n label = Label(root, text='Choose the Entity ', font=('Times new roman',\n 20), bg='white')\n label.place(x=250, y=100)\n Button = tk.Button(root, text='Farmers', font=('Arial', 15), command=farmer\n )\n Button.place(x=100, y=150 + 25)\n Button = tk.Button(root, text='Company', font=('Arial', 15), command=\n company)\n Button.place(x=300, y=150 + 25)\n Button = tk.Button(root, text='Fertilizer', font=('Arial', 15), command\n =fertilizer)\n Button.place(x=500, y=150 + 25)\n Button = tk.Button(root, text='Order', font=('Arial', 15), command=orders)\n Button.place(x=200, y=300 + 25)\n Button = tk.Button(root, text='Payment', font=('Arial', 15), command=\n payment)\n Button.place(x=400, y=300 + 25)\n Button = tk.Button(root, text='GET BOOKING HISTORY', font=('Arial', 15),\n command=history)\n Button.place(x=200, y=400 + 25)\n\n\ndef history():\n global root, cur, db\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n cur.execute('CALL getMonth(%s);', [datetime.today().strftime('%m')])\n data = cur.fetchall()\n label = Label(root, text='The Transaction History of this month', font=\n ('Arial', 15))\n label.place(x=200, y=20)\n button = Button(root, text='BACK', command=entity_page)\n button.place(x=20, y=20)\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('trans_id', 'p_f_id', 'p_date',\n 'p_amount', 'p_method'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('trans_id', text='Transaction Id')\n table.heading('p_f_id', text='Farmer Id')\n table.heading('p_date', text='Payment Date')\n table.heading('p_amount', text='Amount')\n table.heading('p_method', text='Payment Method')\n table['show'] = 'headings'\n table.pack()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n db.close()\n db = mysql.connector.connect(host='localhost', user='root', passwd=\n 'bhushi', database='farmer_app')\n cur = db.cursor()\n\n\n<mask token>\n\n\ndef view_farmer():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('f_id', 'f_name', 'f_phone',\n 'f_mail', 'f_locality', 'f_address'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('f_id', text='Farmer Id')\n table.heading('f_name', text='Farmer Name')\n table.heading('f_phone', text='Farmer Phone')\n table.heading('f_mail', text='Farmer Mail')\n table.heading('f_locality', text='Farmer Locality')\n table.heading('f_address', text='Farmer Address')\n table['show'] = 'headings'\n table.column('f_id', width=100)\n table.pack()\n cur.execute('SELECT * FROM farmer;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_farmer():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer_id', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=10)\n label = Label(root, text='Farmer_name', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Farmer_phone', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Farmer_mail', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Farmer_locality', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=210)\n label = Label(root, text='Farmer_address', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=270)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e6 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n e6.place(x=350, y=270)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=farmer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_farmer_command)\n Button.place(x=400, y=400)\n\n\n<mask token>\n\n\ndef invalid(page):\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n if page == 'farmer':\n label = Label(root, text='Enter valid farmer_id', font=(\n 'Times new roman', 30), bg='white')\n label.place(x=170, y=200)\n button = Button(root, text='Re-enter', font=('Times new roman', 20),\n command=insert_farmer)\n button.place(x=300, y=400)\n elif page == 'company':\n label = Label(root, text='Enter valid company_id', font=(\n 'Times new roman', 30), bg='white')\n label.place(x=170, y=200)\n button = Button(root, text='Re-enter', font=('Times new roman', 20),\n command=insert_company)\n button.place(x=300, y=400)\n\n\n<mask token>\n\n\ndef delete_farmer_command():\n try:\n sql = 'DELETE FROM farmer WHERE f_id=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n farmer()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\ndef update_farmer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer Id:', font=('Times new roman', 20), bg\n ='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=update)\n Button.place(x=300, y=400)\n\n\n<mask token>\n\n\ndef update_command():\n try:\n sql = (\n 'UPDATE farmer SET f_name=%s,f_phone_no=%s,f_mail=%s,f_locality=%s,f_address=%s WHERE f_id=%s;'\n )\n vals = e2.get(), e3.get(), e4.get(), e5.get(), e6.get(), e1.get()\n cur.executemany(sql, [vals])\n db.commit()\n farmer()\n except:\n update_farmer()\n\n\ndef search_farmer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer Id:', font=('Times new roman', 20), bg\n ='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=farmer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=search)\n Button.place(x=400, y=400)\n\n\ndef search():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM farmer WHERE f_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=farmer)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['farmer id: ', 'farmer name: ', 'farmer phone: ',\n 'farmer mail: ', 'farmer locality: ', 'farmer address: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Farmer Id', font=('times new roman', 15))\n l.place(x=100, y=300)\n search_farmer()\n\n\ndef company():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_company)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_company)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_company)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_company)\n Button.place(x=410, y=50)\n view_company()\n\n\ndef view_company():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('c_id', 'c_name', 'c_address'),\n xscrollcommand=x_scroll.set, yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('c_id', text='Company Id')\n table.heading('c_name', text='Company Name')\n table.heading('c_address', text='Company Address')\n table['show'] = 'headings'\n table.column('c_id', width=100)\n table.pack()\n cur.execute('SELECT * FROM company;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\ndef insert_company():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company_id', font=('Times new roman', 20), bg\n ='white')\n label.place(x=50, y=10)\n label = Label(root, text='Company_name', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Company_address', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=110)\n e3.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_company_command)\n Button.place(x=400, y=400)\n\n\ndef insert_company_command():\n try:\n if len(e1.get()) > 3:\n invalid('company')\n else:\n sql = 'INSERT INTO company values(%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get()\n cur.executemany(sql, [vals])\n db.commit()\n company()\n except:\n insert_company()\n\n\ndef delete_company():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Id:', font=('Times new roman', 20),\n bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_company_command)\n Button.place(x=400, y=400)\n\n\ndef delete_company_command():\n try:\n sql = 'DELETE FROM company WHERE c_id=%s;'\n cur.execute(sql, [int(e1.get())])\n db.commit()\n company()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\n<mask token>\n\n\ndef update_c():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM company WHERE c_id=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Company_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Company_name', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Company_address', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=110)\n e3.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_c)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Farmer_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_company()\n\n\n<mask token>\n\n\ndef search_company():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Id:', font=('Times new roman', 20),\n bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_c)\n Button.place(x=400, y=400)\n\n\ndef search_c():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM company WHERE c_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=company\n )\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['company id: ', 'company name: ', 'company address: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Company Id', font=('times new roman', 15)\n )\n l.place(x=100, y=300)\n search_company()\n\n\ndef fertilizer():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertilizer Table', font=('Times new roman', \n 15), bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_fer)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_fer)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_fer)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_fer)\n Button.place(x=410, y=50)\n view_fer()\n\n\ndef view_fer():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('fe_formula', 'fe_name',\n 'fe_content', 'fe_price', 'company_id'), xscrollcommand=x_scroll.\n set, yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('fe_formula', text='Fertilizer Formula')\n table.heading('fe_name', text='Fertilizer name')\n table.heading('fe_content', text='Fertilizer content')\n table.heading('fe_price', text='Fertilizer price')\n table.heading('company_id', text='Company_id')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM fertilizer;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_fer_command():\n try:\n sql = 'INSERT INTO fertilizer values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n fertilizer()\n except:\n insert_fer()\n\n\ndef delete_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertilizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_fer_command)\n Button.place(x=400, y=400)\n\n\ndef delete_fer_command():\n try:\n sql = 'DELETE FROM fertilizer WHERE fe_formula=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n fertilizer()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\ndef update_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertlizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=update_fe)\n Button.place(x=300, y=400)\n\n\ndef update_fe():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM fertilizer WHERE fe_formula=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Fertlizer formula', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Fertlizer name', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Fertlizer content', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Fertlizer price', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='comapny_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n e4 = Entry(root)\n e5 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3, e4, e5, e6]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_fe)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Farmer_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_fer()\n\n\n<mask token>\n\n\ndef search_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertlizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_fe)\n Button.place(x=400, y=400)\n\n\ndef search_fe():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM fertilizer WHERE fe_formula=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['fertilizer formula: ', 'fertilizer name: ',\n 'fertilizer content: ', 'fertilizer price: ', 'company_id: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Fertilizer formula', font=(\n 'times new roman', 15))\n l.place(x=100, y=300)\n search_fer()\n\n\ndef orders():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Orders Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_ord)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_ord)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_ord)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_ord)\n Button.place(x=410, y=50)\n view_ord()\n\n\ndef view_ord():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('or_id', 'or_date', 'or_fid',\n 'or_formula', 'or_to'), xscrollcommand=x_scroll.set, yscrollcommand\n =y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('or_id', text='Order Id')\n table.heading('or_date', text='Order Date')\n table.heading('or_fid', text='Ordered Farmer Id')\n table.heading('or_formula', text='Order (item)formula')\n table.heading('or_to', text='Order to')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM orders;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_ord():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=10)\n label = Label(root, text='Order date', font=('Times new roman', 20), bg\n ='white')\n label.place(x=50, y=60)\n label = Label(root, text='Order FID', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=110)\n label = Label(root, text='Order formula', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Order to', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e2.insert(0, datetime.now())\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_ord_command)\n Button.place(x=400, y=400)\n\n\ndef insert_ord_command():\n try:\n sql = 'INSERT INTO orders values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n orders()\n except:\n insert_ord()\n\n\ndef delete_ord():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id:', font=('Times new roman', 20), bg=\n 'tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_ord_command)\n Button.place(x=400, y=400)\n\n\ndef delete_ord_command():\n try:\n sql = 'DELETE FROM orders WHERE or_id=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n orders()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\n<mask token>\n\n\ndef update_or():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM orders WHERE or_id=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Order Id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Order Date', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Order f_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Order formula', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Order to', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n e4 = Entry(root)\n e5 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3, e4, e5, e6]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_ord)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Order_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_ord()\n\n\ndef update_command_ord():\n sql = (\n 'UPDATE orders SET or_date=%s,or_fid=%s,or_formula=%s,or_to=%s WHERE or_id=%s;'\n )\n vals = e2.get(), e3.get(), e4.get(), e5.get(), e1.get()\n cur.executemany(sql, [vals])\n db.commit()\n orders()\n\n\ndef search_ord():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id:', font=('Times new roman', 20), bg=\n 'tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_or)\n Button.place(x=400, y=400)\n\n\ndef search_or():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM orders WHERE or_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=orders)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['order Id: ', 'Order date: ', 'Order fid: ',\n 'Order formula: ', 'order to: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid order id', font=('times new roman', 15))\n l.place(x=100, y=300)\n search_ord()\n\n\ndef payment():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Payment Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_pay)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_pay)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_pay)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_pay)\n Button.place(x=410, y=50)\n view_pay()\n\n\ndef view_pay():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('trans_id', 'p_f_id', 'p_date',\n 'p_amount', 'p_method'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('trans_id', text='Transaction Id')\n table.heading('p_f_id', text='Farmer Id')\n table.heading('p_date', text='Payment Date')\n table.heading('p_amount', text='Amount')\n table.heading('p_method', text='Payment Method')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM payment;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_pay():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Transaction farmer id', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Transaction date', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Transaction amount', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Transaction method', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e3.insert(0, datetime.now())\n e4.place(x=350, y=160)\n e5 = StringVar(root)\n e5.set('Debit card')\n w = OptionMenu(root, e5, 'Credit Card', 'UPI', 'Cheque', 'Cash')\n w.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_pay_command)\n Button.place(x=400, y=400)\n\n\ndef insert_pay_command():\n try:\n sql = 'INSERT INTO payment values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n payment()\n except:\n insert_pay()\n\n\ndef delete_pay():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id:', font=('Times new roman', 20\n ), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_pay_command)\n Button.place(x=400, y=400)\n\n\n<mask token>\n\n\ndef search_pay():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id:', font=('Times new roman', 20\n ), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_pa)\n Button.place(x=400, y=400)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef First_page(root):\n global T1, T2, T3\n frame = Frame(root, height=500, width=800, bg='ivory')\n frame.pack()\n label = Label(root, text='WELCOME TO AGRI MARKET', font=(\n 'Times new roman', 25))\n label.place(x=200, y=50)\n button = Button(root, text='LogIn', font=('times new roman', 20),\n command=check_pass, bg='green')\n button.place(x=350, y=350)\n L1 = tk.Label(root, text='Username', font=('Arial Bold', 15), bg='ivory')\n L1.place(x=150, y=200)\n T1 = tk.Entry(root, width=30, bd=5)\n T1.place(x=280, y=200)\n L2 = tk.Label(root, text='Password', font=('Arial Bold', 15), bg='ivory')\n L2.place(x=150, y=250)\n T2 = tk.Entry(root, width=30, show='*', bd=5)\n T2.place(x=280, y=250)\n reg_button = Button(root, text='Register', font=('Arial Bold', 15), bg=\n 'blue', command=create_pass)\n reg_button.place(x=340, y=400)\n\n\ndef check_pass():\n global root, T1, T2, T3\n try:\n with open('password.txt', 'r') as f:\n lines = f.read()\n if T1.get() + '=' + T2.get() in lines and T1.get(\n ) != '' and T2.get() != '':\n entity_page()\n else:\n label = Label(root, text=\n 'Invalid username or password.Try again', font=(\n 'times new roman', 15))\n label.place(x=200, y=100)\n except:\n label = Label(root, text='Invalid username or password.Try again',\n font=('times new roman', 15))\n label.place(x=200, y=100)\n\n\ndef create_pass():\n global root, T1, T2, T3\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'ivory')\n label.place(x=0, y=0)\n L1 = tk.Label(root, text='Username', font=('Arial Bold', 15), bg='ivory')\n L1.place(x=150, y=200)\n T1 = tk.Entry(root, width=30, bd=5)\n T1.place(x=380, y=200)\n L2 = tk.Label(root, text='Password', font=('Arial Bold', 15), bg='ivory')\n L2.place(x=150, y=250)\n T2 = tk.Entry(root, width=30, show='*', bd=5)\n T2.place(x=380, y=250)\n L2 = tk.Label(root, text='Confirm Password', font=('Arial Bold', 15),\n bg='ivory')\n L2.place(x=150, y=300)\n T3 = tk.Entry(root, width=30, show='*', bd=5)\n T3.place(x=380, y=300)\n reg_button = Button(root, text='Done', font=('Arial Bold', 15), bg=\n 'blue', command=add_pass)\n reg_button.place(x=440, y=400)\n\n\ndef add_pass():\n global root, T1, T2, T3\n if T2.get() != T3.get():\n label = Label(root, text='Incorrect Password. Enter again', font=(\n 'times new roman', 20))\n label.place(x=100, y=100)\n else:\n try:\n with open('password.txt', 'r') as f:\n data = f.read()\n with open('password.txt', 'w') as f:\n f.write(data + '\\n')\n f.write(T1.get() + '=' + T2.get())\n entity_page()\n except:\n with open('password.txt', 'w') as f:\n f.write(T1.get() + '=' + T2.get())\n entity_page()\n\n\ndef entity_page():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'ivory')\n label.place(x=0, y=0)\n label = Label(root, text='WELCOME TO AGRI MARKET ', font=(\n 'Times new roman', 20), bg='blue')\n label.place(x=200, y=20)\n label = Label(root, text='Choose the Entity ', font=('Times new roman',\n 20), bg='white')\n label.place(x=250, y=100)\n Button = tk.Button(root, text='Farmers', font=('Arial', 15), command=farmer\n )\n Button.place(x=100, y=150 + 25)\n Button = tk.Button(root, text='Company', font=('Arial', 15), command=\n company)\n Button.place(x=300, y=150 + 25)\n Button = tk.Button(root, text='Fertilizer', font=('Arial', 15), command\n =fertilizer)\n Button.place(x=500, y=150 + 25)\n Button = tk.Button(root, text='Order', font=('Arial', 15), command=orders)\n Button.place(x=200, y=300 + 25)\n Button = tk.Button(root, text='Payment', font=('Arial', 15), command=\n payment)\n Button.place(x=400, y=300 + 25)\n Button = tk.Button(root, text='GET BOOKING HISTORY', font=('Arial', 15),\n command=history)\n Button.place(x=200, y=400 + 25)\n\n\ndef history():\n global root, cur, db\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n cur.execute('CALL getMonth(%s);', [datetime.today().strftime('%m')])\n data = cur.fetchall()\n label = Label(root, text='The Transaction History of this month', font=\n ('Arial', 15))\n label.place(x=200, y=20)\n button = Button(root, text='BACK', command=entity_page)\n button.place(x=20, y=20)\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('trans_id', 'p_f_id', 'p_date',\n 'p_amount', 'p_method'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('trans_id', text='Transaction Id')\n table.heading('p_f_id', text='Farmer Id')\n table.heading('p_date', text='Payment Date')\n table.heading('p_amount', text='Amount')\n table.heading('p_method', text='Payment Method')\n table['show'] = 'headings'\n table.pack()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n db.close()\n db = mysql.connector.connect(host='localhost', user='root', passwd=\n 'bhushi', database='farmer_app')\n cur = db.cursor()\n\n\ndef farmer():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_farmer)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_farmer)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_farmer)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_farmer)\n Button.place(x=410, y=50)\n view_farmer()\n\n\ndef view_farmer():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('f_id', 'f_name', 'f_phone',\n 'f_mail', 'f_locality', 'f_address'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('f_id', text='Farmer Id')\n table.heading('f_name', text='Farmer Name')\n table.heading('f_phone', text='Farmer Phone')\n table.heading('f_mail', text='Farmer Mail')\n table.heading('f_locality', text='Farmer Locality')\n table.heading('f_address', text='Farmer Address')\n table['show'] = 'headings'\n table.column('f_id', width=100)\n table.pack()\n cur.execute('SELECT * FROM farmer;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_farmer():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer_id', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=10)\n label = Label(root, text='Farmer_name', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Farmer_phone', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Farmer_mail', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Farmer_locality', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=210)\n label = Label(root, text='Farmer_address', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=270)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e6 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n e6.place(x=350, y=270)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=farmer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_farmer_command)\n Button.place(x=400, y=400)\n\n\n<mask token>\n\n\ndef invalid(page):\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n if page == 'farmer':\n label = Label(root, text='Enter valid farmer_id', font=(\n 'Times new roman', 30), bg='white')\n label.place(x=170, y=200)\n button = Button(root, text='Re-enter', font=('Times new roman', 20),\n command=insert_farmer)\n button.place(x=300, y=400)\n elif page == 'company':\n label = Label(root, text='Enter valid company_id', font=(\n 'Times new roman', 30), bg='white')\n label.place(x=170, y=200)\n button = Button(root, text='Re-enter', font=('Times new roman', 20),\n command=insert_company)\n button.place(x=300, y=400)\n\n\n<mask token>\n\n\ndef delete_farmer_command():\n try:\n sql = 'DELETE FROM farmer WHERE f_id=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n farmer()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\ndef update_farmer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer Id:', font=('Times new roman', 20), bg\n ='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=update)\n Button.place(x=300, y=400)\n\n\ndef update():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM farmer WHERE f_id=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Farmer_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Farmer_name', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Farmer_phone', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Farmer_mail', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Farmer_locality', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=210)\n label = Label(root, text='Farmer_address', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=270)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n e4 = Entry(root)\n e5 = Entry(root)\n e6 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3, e4, e5, e6]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n e6.place(x=350, y=270)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Farmer_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_farmer()\n\n\ndef update_command():\n try:\n sql = (\n 'UPDATE farmer SET f_name=%s,f_phone_no=%s,f_mail=%s,f_locality=%s,f_address=%s WHERE f_id=%s;'\n )\n vals = e2.get(), e3.get(), e4.get(), e5.get(), e6.get(), e1.get()\n cur.executemany(sql, [vals])\n db.commit()\n farmer()\n except:\n update_farmer()\n\n\ndef search_farmer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer Id:', font=('Times new roman', 20), bg\n ='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=farmer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=search)\n Button.place(x=400, y=400)\n\n\ndef search():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM farmer WHERE f_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=farmer)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['farmer id: ', 'farmer name: ', 'farmer phone: ',\n 'farmer mail: ', 'farmer locality: ', 'farmer address: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Farmer Id', font=('times new roman', 15))\n l.place(x=100, y=300)\n search_farmer()\n\n\ndef company():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_company)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_company)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_company)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_company)\n Button.place(x=410, y=50)\n view_company()\n\n\ndef view_company():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('c_id', 'c_name', 'c_address'),\n xscrollcommand=x_scroll.set, yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('c_id', text='Company Id')\n table.heading('c_name', text='Company Name')\n table.heading('c_address', text='Company Address')\n table['show'] = 'headings'\n table.column('c_id', width=100)\n table.pack()\n cur.execute('SELECT * FROM company;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\ndef insert_company():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company_id', font=('Times new roman', 20), bg\n ='white')\n label.place(x=50, y=10)\n label = Label(root, text='Company_name', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Company_address', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=110)\n e3.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_company_command)\n Button.place(x=400, y=400)\n\n\ndef insert_company_command():\n try:\n if len(e1.get()) > 3:\n invalid('company')\n else:\n sql = 'INSERT INTO company values(%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get()\n cur.executemany(sql, [vals])\n db.commit()\n company()\n except:\n insert_company()\n\n\ndef delete_company():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Id:', font=('Times new roman', 20),\n bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_company_command)\n Button.place(x=400, y=400)\n\n\ndef delete_company_command():\n try:\n sql = 'DELETE FROM company WHERE c_id=%s;'\n cur.execute(sql, [int(e1.get())])\n db.commit()\n company()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\n<mask token>\n\n\ndef update_c():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM company WHERE c_id=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Company_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Company_name', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Company_address', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=110)\n e3.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_c)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Farmer_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_company()\n\n\n<mask token>\n\n\ndef search_company():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Id:', font=('Times new roman', 20),\n bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_c)\n Button.place(x=400, y=400)\n\n\ndef search_c():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM company WHERE c_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=company\n )\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['company id: ', 'company name: ', 'company address: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Company Id', font=('times new roman', 15)\n )\n l.place(x=100, y=300)\n search_company()\n\n\ndef fertilizer():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertilizer Table', font=('Times new roman', \n 15), bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_fer)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_fer)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_fer)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_fer)\n Button.place(x=410, y=50)\n view_fer()\n\n\ndef view_fer():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('fe_formula', 'fe_name',\n 'fe_content', 'fe_price', 'company_id'), xscrollcommand=x_scroll.\n set, yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('fe_formula', text='Fertilizer Formula')\n table.heading('fe_name', text='Fertilizer name')\n table.heading('fe_content', text='Fertilizer content')\n table.heading('fe_price', text='Fertilizer price')\n table.heading('company_id', text='Company_id')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM fertilizer;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_fer_command():\n try:\n sql = 'INSERT INTO fertilizer values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n fertilizer()\n except:\n insert_fer()\n\n\ndef delete_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertilizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_fer_command)\n Button.place(x=400, y=400)\n\n\ndef delete_fer_command():\n try:\n sql = 'DELETE FROM fertilizer WHERE fe_formula=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n fertilizer()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\ndef update_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertlizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=update_fe)\n Button.place(x=300, y=400)\n\n\ndef update_fe():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM fertilizer WHERE fe_formula=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Fertlizer formula', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Fertlizer name', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Fertlizer content', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Fertlizer price', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='comapny_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n e4 = Entry(root)\n e5 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3, e4, e5, e6]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_fe)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Farmer_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_fer()\n\n\n<mask token>\n\n\ndef search_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertlizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_fe)\n Button.place(x=400, y=400)\n\n\ndef search_fe():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM fertilizer WHERE fe_formula=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['fertilizer formula: ', 'fertilizer name: ',\n 'fertilizer content: ', 'fertilizer price: ', 'company_id: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Fertilizer formula', font=(\n 'times new roman', 15))\n l.place(x=100, y=300)\n search_fer()\n\n\ndef orders():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Orders Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_ord)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_ord)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_ord)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_ord)\n Button.place(x=410, y=50)\n view_ord()\n\n\ndef view_ord():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('or_id', 'or_date', 'or_fid',\n 'or_formula', 'or_to'), xscrollcommand=x_scroll.set, yscrollcommand\n =y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('or_id', text='Order Id')\n table.heading('or_date', text='Order Date')\n table.heading('or_fid', text='Ordered Farmer Id')\n table.heading('or_formula', text='Order (item)formula')\n table.heading('or_to', text='Order to')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM orders;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_ord():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=10)\n label = Label(root, text='Order date', font=('Times new roman', 20), bg\n ='white')\n label.place(x=50, y=60)\n label = Label(root, text='Order FID', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=110)\n label = Label(root, text='Order formula', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Order to', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e2.insert(0, datetime.now())\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_ord_command)\n Button.place(x=400, y=400)\n\n\ndef insert_ord_command():\n try:\n sql = 'INSERT INTO orders values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n orders()\n except:\n insert_ord()\n\n\ndef delete_ord():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id:', font=('Times new roman', 20), bg=\n 'tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_ord_command)\n Button.place(x=400, y=400)\n\n\ndef delete_ord_command():\n try:\n sql = 'DELETE FROM orders WHERE or_id=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n orders()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\ndef update_ord():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id:', font=('Times new roman', 20), bg=\n 'tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=update_or)\n Button.place(x=300, y=400)\n\n\ndef update_or():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM orders WHERE or_id=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Order Id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Order Date', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Order f_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Order formula', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Order to', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n e4 = Entry(root)\n e5 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3, e4, e5, e6]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_ord)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Order_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_ord()\n\n\ndef update_command_ord():\n sql = (\n 'UPDATE orders SET or_date=%s,or_fid=%s,or_formula=%s,or_to=%s WHERE or_id=%s;'\n )\n vals = e2.get(), e3.get(), e4.get(), e5.get(), e1.get()\n cur.executemany(sql, [vals])\n db.commit()\n orders()\n\n\ndef search_ord():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id:', font=('Times new roman', 20), bg=\n 'tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_or)\n Button.place(x=400, y=400)\n\n\ndef search_or():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM orders WHERE or_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=orders)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['order Id: ', 'Order date: ', 'Order fid: ',\n 'Order formula: ', 'order to: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid order id', font=('times new roman', 15))\n l.place(x=100, y=300)\n search_ord()\n\n\ndef payment():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Payment Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_pay)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_pay)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_pay)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_pay)\n Button.place(x=410, y=50)\n view_pay()\n\n\ndef view_pay():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('trans_id', 'p_f_id', 'p_date',\n 'p_amount', 'p_method'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('trans_id', text='Transaction Id')\n table.heading('p_f_id', text='Farmer Id')\n table.heading('p_date', text='Payment Date')\n table.heading('p_amount', text='Amount')\n table.heading('p_method', text='Payment Method')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM payment;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_pay():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Transaction farmer id', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Transaction date', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Transaction amount', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Transaction method', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e3.insert(0, datetime.now())\n e4.place(x=350, y=160)\n e5 = StringVar(root)\n e5.set('Debit card')\n w = OptionMenu(root, e5, 'Credit Card', 'UPI', 'Cheque', 'Cash')\n w.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_pay_command)\n Button.place(x=400, y=400)\n\n\ndef insert_pay_command():\n try:\n sql = 'INSERT INTO payment values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n payment()\n except:\n insert_pay()\n\n\ndef delete_pay():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id:', font=('Times new roman', 20\n ), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_pay_command)\n Button.place(x=400, y=400)\n\n\ndef delete_pay_command():\n try:\n sql = 'DELETE FROM payment WHERE trans_id=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n payment()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\n<mask token>\n\n\ndef search_pay():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id:', font=('Times new roman', 20\n ), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_pa)\n Button.place(x=400, y=400)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef First_page(root):\n global T1, T2, T3\n frame = Frame(root, height=500, width=800, bg='ivory')\n frame.pack()\n label = Label(root, text='WELCOME TO AGRI MARKET', font=(\n 'Times new roman', 25))\n label.place(x=200, y=50)\n button = Button(root, text='LogIn', font=('times new roman', 20),\n command=check_pass, bg='green')\n button.place(x=350, y=350)\n L1 = tk.Label(root, text='Username', font=('Arial Bold', 15), bg='ivory')\n L1.place(x=150, y=200)\n T1 = tk.Entry(root, width=30, bd=5)\n T1.place(x=280, y=200)\n L2 = tk.Label(root, text='Password', font=('Arial Bold', 15), bg='ivory')\n L2.place(x=150, y=250)\n T2 = tk.Entry(root, width=30, show='*', bd=5)\n T2.place(x=280, y=250)\n reg_button = Button(root, text='Register', font=('Arial Bold', 15), bg=\n 'blue', command=create_pass)\n reg_button.place(x=340, y=400)\n\n\ndef check_pass():\n global root, T1, T2, T3\n try:\n with open('password.txt', 'r') as f:\n lines = f.read()\n if T1.get() + '=' + T2.get() in lines and T1.get(\n ) != '' and T2.get() != '':\n entity_page()\n else:\n label = Label(root, text=\n 'Invalid username or password.Try again', font=(\n 'times new roman', 15))\n label.place(x=200, y=100)\n except:\n label = Label(root, text='Invalid username or password.Try again',\n font=('times new roman', 15))\n label.place(x=200, y=100)\n\n\ndef create_pass():\n global root, T1, T2, T3\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'ivory')\n label.place(x=0, y=0)\n L1 = tk.Label(root, text='Username', font=('Arial Bold', 15), bg='ivory')\n L1.place(x=150, y=200)\n T1 = tk.Entry(root, width=30, bd=5)\n T1.place(x=380, y=200)\n L2 = tk.Label(root, text='Password', font=('Arial Bold', 15), bg='ivory')\n L2.place(x=150, y=250)\n T2 = tk.Entry(root, width=30, show='*', bd=5)\n T2.place(x=380, y=250)\n L2 = tk.Label(root, text='Confirm Password', font=('Arial Bold', 15),\n bg='ivory')\n L2.place(x=150, y=300)\n T3 = tk.Entry(root, width=30, show='*', bd=5)\n T3.place(x=380, y=300)\n reg_button = Button(root, text='Done', font=('Arial Bold', 15), bg=\n 'blue', command=add_pass)\n reg_button.place(x=440, y=400)\n\n\ndef add_pass():\n global root, T1, T2, T3\n if T2.get() != T3.get():\n label = Label(root, text='Incorrect Password. Enter again', font=(\n 'times new roman', 20))\n label.place(x=100, y=100)\n else:\n try:\n with open('password.txt', 'r') as f:\n data = f.read()\n with open('password.txt', 'w') as f:\n f.write(data + '\\n')\n f.write(T1.get() + '=' + T2.get())\n entity_page()\n except:\n with open('password.txt', 'w') as f:\n f.write(T1.get() + '=' + T2.get())\n entity_page()\n\n\ndef entity_page():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'ivory')\n label.place(x=0, y=0)\n label = Label(root, text='WELCOME TO AGRI MARKET ', font=(\n 'Times new roman', 20), bg='blue')\n label.place(x=200, y=20)\n label = Label(root, text='Choose the Entity ', font=('Times new roman',\n 20), bg='white')\n label.place(x=250, y=100)\n Button = tk.Button(root, text='Farmers', font=('Arial', 15), command=farmer\n )\n Button.place(x=100, y=150 + 25)\n Button = tk.Button(root, text='Company', font=('Arial', 15), command=\n company)\n Button.place(x=300, y=150 + 25)\n Button = tk.Button(root, text='Fertilizer', font=('Arial', 15), command\n =fertilizer)\n Button.place(x=500, y=150 + 25)\n Button = tk.Button(root, text='Order', font=('Arial', 15), command=orders)\n Button.place(x=200, y=300 + 25)\n Button = tk.Button(root, text='Payment', font=('Arial', 15), command=\n payment)\n Button.place(x=400, y=300 + 25)\n Button = tk.Button(root, text='GET BOOKING HISTORY', font=('Arial', 15),\n command=history)\n Button.place(x=200, y=400 + 25)\n\n\ndef history():\n global root, cur, db\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n cur.execute('CALL getMonth(%s);', [datetime.today().strftime('%m')])\n data = cur.fetchall()\n label = Label(root, text='The Transaction History of this month', font=\n ('Arial', 15))\n label.place(x=200, y=20)\n button = Button(root, text='BACK', command=entity_page)\n button.place(x=20, y=20)\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('trans_id', 'p_f_id', 'p_date',\n 'p_amount', 'p_method'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('trans_id', text='Transaction Id')\n table.heading('p_f_id', text='Farmer Id')\n table.heading('p_date', text='Payment Date')\n table.heading('p_amount', text='Amount')\n table.heading('p_method', text='Payment Method')\n table['show'] = 'headings'\n table.pack()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n db.close()\n db = mysql.connector.connect(host='localhost', user='root', passwd=\n 'bhushi', database='farmer_app')\n cur = db.cursor()\n\n\ndef farmer():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_farmer)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_farmer)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_farmer)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_farmer)\n Button.place(x=410, y=50)\n view_farmer()\n\n\ndef view_farmer():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('f_id', 'f_name', 'f_phone',\n 'f_mail', 'f_locality', 'f_address'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('f_id', text='Farmer Id')\n table.heading('f_name', text='Farmer Name')\n table.heading('f_phone', text='Farmer Phone')\n table.heading('f_mail', text='Farmer Mail')\n table.heading('f_locality', text='Farmer Locality')\n table.heading('f_address', text='Farmer Address')\n table['show'] = 'headings'\n table.column('f_id', width=100)\n table.pack()\n cur.execute('SELECT * FROM farmer;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_farmer():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer_id', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=10)\n label = Label(root, text='Farmer_name', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Farmer_phone', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Farmer_mail', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Farmer_locality', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=210)\n label = Label(root, text='Farmer_address', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=270)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e6 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n e6.place(x=350, y=270)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=farmer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_farmer_command)\n Button.place(x=400, y=400)\n\n\ndef insert_farmer_command():\n global root\n try:\n sql = 'INSERT INTO farmer values(%s,%s,%s,%s,%s,%s);'\n if len(e1.get()) > 3:\n invalid('farmer')\n else:\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get(), e6.get()\n cur.executemany(sql, [vals])\n db.commit()\n farmer()\n except:\n insert_farmer()\n\n\ndef invalid(page):\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n if page == 'farmer':\n label = Label(root, text='Enter valid farmer_id', font=(\n 'Times new roman', 30), bg='white')\n label.place(x=170, y=200)\n button = Button(root, text='Re-enter', font=('Times new roman', 20),\n command=insert_farmer)\n button.place(x=300, y=400)\n elif page == 'company':\n label = Label(root, text='Enter valid company_id', font=(\n 'Times new roman', 30), bg='white')\n label.place(x=170, y=200)\n button = Button(root, text='Re-enter', font=('Times new roman', 20),\n command=insert_company)\n button.place(x=300, y=400)\n\n\n<mask token>\n\n\ndef delete_farmer_command():\n try:\n sql = 'DELETE FROM farmer WHERE f_id=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n farmer()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\ndef update_farmer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer Id:', font=('Times new roman', 20), bg\n ='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=update)\n Button.place(x=300, y=400)\n\n\ndef update():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM farmer WHERE f_id=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Farmer_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Farmer_name', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Farmer_phone', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Farmer_mail', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Farmer_locality', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=210)\n label = Label(root, text='Farmer_address', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=270)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n e4 = Entry(root)\n e5 = Entry(root)\n e6 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3, e4, e5, e6]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n e6.place(x=350, y=270)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Farmer_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_farmer()\n\n\ndef update_command():\n try:\n sql = (\n 'UPDATE farmer SET f_name=%s,f_phone_no=%s,f_mail=%s,f_locality=%s,f_address=%s WHERE f_id=%s;'\n )\n vals = e2.get(), e3.get(), e4.get(), e5.get(), e6.get(), e1.get()\n cur.executemany(sql, [vals])\n db.commit()\n farmer()\n except:\n update_farmer()\n\n\ndef search_farmer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Farmer Id:', font=('Times new roman', 20), bg\n ='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=farmer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=search)\n Button.place(x=400, y=400)\n\n\ndef search():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM farmer WHERE f_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=farmer)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['farmer id: ', 'farmer name: ', 'farmer phone: ',\n 'farmer mail: ', 'farmer locality: ', 'farmer address: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Farmer Id', font=('times new roman', 15))\n l.place(x=100, y=300)\n search_farmer()\n\n\ndef company():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_company)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_company)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_company)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_company)\n Button.place(x=410, y=50)\n view_company()\n\n\ndef view_company():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('c_id', 'c_name', 'c_address'),\n xscrollcommand=x_scroll.set, yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('c_id', text='Company Id')\n table.heading('c_name', text='Company Name')\n table.heading('c_address', text='Company Address')\n table['show'] = 'headings'\n table.column('c_id', width=100)\n table.pack()\n cur.execute('SELECT * FROM company;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\ndef insert_company():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company_id', font=('Times new roman', 20), bg\n ='white')\n label.place(x=50, y=10)\n label = Label(root, text='Company_name', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Company_address', font=('Times new roman', 20\n ), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=110)\n e3.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_company_command)\n Button.place(x=400, y=400)\n\n\ndef insert_company_command():\n try:\n if len(e1.get()) > 3:\n invalid('company')\n else:\n sql = 'INSERT INTO company values(%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get()\n cur.executemany(sql, [vals])\n db.commit()\n company()\n except:\n insert_company()\n\n\ndef delete_company():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Id:', font=('Times new roman', 20),\n bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_company_command)\n Button.place(x=400, y=400)\n\n\ndef delete_company_command():\n try:\n sql = 'DELETE FROM company WHERE c_id=%s;'\n cur.execute(sql, [int(e1.get())])\n db.commit()\n company()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\n<mask token>\n\n\ndef update_c():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM company WHERE c_id=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Company_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Company_name', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Company_address', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=110)\n e3.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_c)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Farmer_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_company()\n\n\n<mask token>\n\n\ndef search_company():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Company Id:', font=('Times new roman', 20),\n bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=company)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_c)\n Button.place(x=400, y=400)\n\n\ndef search_c():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM company WHERE c_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=company\n )\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['company id: ', 'company name: ', 'company address: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Company Id', font=('times new roman', 15)\n )\n l.place(x=100, y=300)\n search_company()\n\n\ndef fertilizer():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertilizer Table', font=('Times new roman', \n 15), bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_fer)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_fer)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_fer)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_fer)\n Button.place(x=410, y=50)\n view_fer()\n\n\ndef view_fer():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('fe_formula', 'fe_name',\n 'fe_content', 'fe_price', 'company_id'), xscrollcommand=x_scroll.\n set, yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('fe_formula', text='Fertilizer Formula')\n table.heading('fe_name', text='Fertilizer name')\n table.heading('fe_content', text='Fertilizer content')\n table.heading('fe_price', text='Fertilizer price')\n table.heading('company_id', text='Company_id')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM fertilizer;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_fer_command():\n try:\n sql = 'INSERT INTO fertilizer values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n fertilizer()\n except:\n insert_fer()\n\n\ndef delete_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertilizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_fer_command)\n Button.place(x=400, y=400)\n\n\ndef delete_fer_command():\n try:\n sql = 'DELETE FROM fertilizer WHERE fe_formula=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n fertilizer()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\ndef update_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertlizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=update_fe)\n Button.place(x=300, y=400)\n\n\ndef update_fe():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM fertilizer WHERE fe_formula=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Fertlizer formula', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Fertlizer name', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Fertlizer content', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Fertlizer price', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='comapny_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n e4 = Entry(root)\n e5 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3, e4, e5, e6]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_fe)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Farmer_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_fer()\n\n\n<mask token>\n\n\ndef search_fer():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Fertlizer formula:', font=('Times new roman',\n 20), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_fe)\n Button.place(x=400, y=400)\n\n\ndef search_fe():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM fertilizer WHERE fe_formula=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=\n fertilizer)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['fertilizer formula: ', 'fertilizer name: ',\n 'fertilizer content: ', 'fertilizer price: ', 'company_id: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid Fertilizer formula', font=(\n 'times new roman', 15))\n l.place(x=100, y=300)\n search_fer()\n\n\ndef orders():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Orders Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_ord)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_ord)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_ord)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_ord)\n Button.place(x=410, y=50)\n view_ord()\n\n\ndef view_ord():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('or_id', 'or_date', 'or_fid',\n 'or_formula', 'or_to'), xscrollcommand=x_scroll.set, yscrollcommand\n =y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('or_id', text='Order Id')\n table.heading('or_date', text='Order Date')\n table.heading('or_fid', text='Ordered Farmer Id')\n table.heading('or_formula', text='Order (item)formula')\n table.heading('or_to', text='Order to')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM orders;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_ord():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=10)\n label = Label(root, text='Order date', font=('Times new roman', 20), bg\n ='white')\n label.place(x=50, y=60)\n label = Label(root, text='Order FID', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=110)\n label = Label(root, text='Order formula', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Order to', font=('Times new roman', 20), bg=\n 'white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e2.insert(0, datetime.now())\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_ord_command)\n Button.place(x=400, y=400)\n\n\ndef insert_ord_command():\n try:\n sql = 'INSERT INTO orders values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n orders()\n except:\n insert_ord()\n\n\ndef delete_ord():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id:', font=('Times new roman', 20), bg=\n 'tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_ord_command)\n Button.place(x=400, y=400)\n\n\ndef delete_ord_command():\n try:\n sql = 'DELETE FROM orders WHERE or_id=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n orders()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\ndef update_ord():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id:', font=('Times new roman', 20), bg=\n 'tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=update_or)\n Button.place(x=300, y=400)\n\n\ndef update_or():\n try:\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500),\n bg='tomato')\n label.place(x=0, y=0)\n sql = 'SELECT * FROM orders WHERE or_id=%s;'\n vals = [e1.get()]\n cur.execute(sql, vals)\n label = Label(root, text='Order Id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Order Date', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Order f_id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Order formula', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Order to', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root)\n e2 = Entry(root)\n e3 = Entry(root)\n e4 = Entry(root)\n e5 = Entry(root)\n data = cur.fetchall()\n arr = [e1, e2, e3, e4, e5, e6]\n count = 0\n for val in data[0]:\n arr[count].insert(0, val)\n count += 1\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e4.place(x=350, y=160)\n e5.place(x=350, y=210)\n label = Button(root, text='Modify', font=('Times new roman', 20),\n bg='blue', command=update_command_ord)\n label.place(x=300, y=400)\n except:\n l = Label(root, text='Invalid Order_id', font=('times new roman', 15))\n l.place(x=100, y=300)\n update_ord()\n\n\ndef update_command_ord():\n sql = (\n 'UPDATE orders SET or_date=%s,or_fid=%s,or_formula=%s,or_to=%s WHERE or_id=%s;'\n )\n vals = e2.get(), e3.get(), e4.get(), e5.get(), e1.get()\n cur.executemany(sql, [vals])\n db.commit()\n orders()\n\n\ndef search_ord():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Order Id:', font=('Times new roman', 20), bg=\n 'tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=orders)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_or)\n Button.place(x=400, y=400)\n\n\ndef search_or():\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n try:\n sql = 'SELECT * FROM orders WHERE or_id=%s;'\n val = [e1.get()]\n cur.execute(sql, val)\n Button = tk.Button(root, text='OK', font=('Arial', 15), command=orders)\n Button.place(x=300, y=400)\n for val in cur:\n count = 0\n Y = 50\n names = ['order Id: ', 'Order date: ', 'Order fid: ',\n 'Order formula: ', 'order to: ']\n for i in val:\n label = Label(root, text=names[count] + str(i), font=(\n 'Times new roman', 20), bg='tomato')\n label.place(x=10, y=Y)\n Y += 50\n count += 1\n db.commit()\n except:\n l = Label(root, text='Invalid order id', font=('times new roman', 15))\n l.place(x=100, y=300)\n search_ord()\n\n\ndef payment():\n global root\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Payment Table', font=('Times new roman', 15),\n bg='white')\n label.place(x=350, y=10)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=\n entity_page)\n Button.place(x=10, y=50)\n Button = tk.Button(root, text='Insert', font=('Arial', 15), command=\n insert_pay)\n Button.place(x=110, y=50)\n Button = tk.Button(root, text='Delete', font=('Arial', 15), command=\n delete_pay)\n Button.place(x=210, y=50)\n Button = tk.Button(root, text='Update', font=('Arial', 15), command=\n update_pay)\n Button.place(x=310, y=50)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_pay)\n Button.place(x=410, y=50)\n view_pay()\n\n\ndef view_pay():\n frame = Frame(root, bd=5, relief=RIDGE, bg='tomato')\n frame.place(x=10, y=100, width=750, height=400)\n x_scroll = Scrollbar(frame, orient=HORIZONTAL)\n y_scroll = Scrollbar(frame, orient=VERTICAL)\n table = ttk.Treeview(frame, columns=('trans_id', 'p_f_id', 'p_date',\n 'p_amount', 'p_method'), xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n x_scroll.pack(side=BOTTOM, fill=X)\n y_scroll.pack(side=RIGHT, fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('trans_id', text='Transaction Id')\n table.heading('p_f_id', text='Farmer Id')\n table.heading('p_date', text='Payment Date')\n table.heading('p_amount', text='Amount')\n table.heading('p_method', text='Payment Method')\n table['show'] = 'headings'\n table.pack()\n cur.execute('SELECT * FROM payment;')\n data = cur.fetchall()\n db.commit()\n if len(data) != 0:\n for row in data:\n table.insert('', END, values=row)\n\n\n<mask token>\n\n\ndef insert_pay():\n global e1, e2, e3, e4, e5, e6\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id', font=('Times new roman', 20),\n bg='white')\n label.place(x=50, y=10)\n label = Label(root, text='Transaction farmer id', font=(\n 'Times new roman', 20), bg='white')\n label.place(x=50, y=60)\n label = Label(root, text='Transaction date', font=('Times new roman', \n 20), bg='white')\n label.place(x=50, y=110)\n label = Label(root, text='Transaction amount', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=160)\n label = Label(root, text='Transaction method', font=('Times new roman',\n 20), bg='white')\n label.place(x=50, y=210)\n e1 = Entry(root, width=50)\n e2 = Entry(root, width=50)\n e3 = Entry(root, width=50)\n e4 = Entry(root, width=50)\n e5 = Entry(root, width=50)\n e1.place(x=350, y=10)\n e2.place(x=350, y=60)\n e3.place(x=350, y=110)\n e3.insert(0, datetime.now())\n e4.place(x=350, y=160)\n e5 = StringVar(root)\n e5.set('Debit card')\n w = OptionMenu(root, e5, 'Credit Card', 'UPI', 'Cheque', 'Cash')\n w.place(x=350, y=210)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n insert_pay_command)\n Button.place(x=400, y=400)\n\n\ndef insert_pay_command():\n try:\n sql = 'INSERT INTO payment values(%s,%s,%s,%s,%s);'\n vals = e1.get(), e2.get(), e3.get(), e4.get(), e5.get()\n cur.executemany(sql, [vals])\n db.commit()\n payment()\n except:\n insert_pay()\n\n\ndef delete_pay():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id:', font=('Times new roman', 20\n ), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Commit', font=('Arial', 15), command=\n delete_pay_command)\n Button.place(x=400, y=400)\n\n\ndef delete_pay_command():\n try:\n sql = 'DELETE FROM payment WHERE trans_id=%s;'\n cur.execute(sql, [e1.get()])\n db.commit()\n payment()\n except:\n l = Label(root, text='Invalid Entry', font=('times new roman', 15))\n l.place(x=100, y=300)\n\n\n<mask token>\n\n\ndef search_pay():\n global e1\n label = Label(root, text=' ' * 800, font=('Times new roman', 500), bg=\n 'tomato')\n label.place(x=0, y=0)\n label = Label(root, text='Transaction Id:', font=('Times new roman', 20\n ), bg='tomato')\n label.place(x=100, y=200)\n e1 = Entry(root, width=50)\n e1.place(x=300, y=200)\n Button = tk.Button(root, text='Back', font=('Arial', 15), command=payment)\n Button.place(x=200, y=400)\n Button = tk.Button(root, text='Search', font=('Arial', 15), command=\n search_pa)\n Button.place(x=400, y=400)\n\n\n<mask token>\n",
"step-5": "#! /usr/bin/python3\nimport pprint\nimport tkinter as tk\nfrom tkinter import messagebox\nfrom PIL import Image\nfrom tkinter import *\nfrom prettytable import PrettyTable\nimport ttk\nimport os\nimport subprocess\nimport mysql.connector\nfrom datetime import datetime\nimport time\n\n\ndb=mysql.connector.connect(host='localhost',user='root',passwd='PASSWORD',database='DATABASENAME')\ncur=db.cursor()\n\n\nroot=Tk()\nroot.title(\"WELCOME TO AGRI MARKET\")\n\n#stored procedure\n\"\"\"\n DELIMITER $$\n \n CREATE PROCEDURE getMonth(\n IN month VARCHAR(2))\n BEGIN\n SELECT * FROM payment\n WHERE p_date LIKE CONCAT('____-',month,'%');\n END$$\n\n DELIMITER ;\n\n\"\"\"\n\nT1,T2,T3=0,0,0\ndef First_page(root):\n global T1,T2,T3\n frame=Frame(root,height=500,width=800,bg='ivory')\n frame.pack()\n\n label=Label(root,text='WELCOME TO AGRI MARKET',font=('Times new roman',25))\n label.place(x=200,y=50)\n\n button=Button(root,text='LogIn',font=('times new roman',20),command=check_pass,bg='green')\n button.place(x=350,y=350)\n\n L1 = tk.Label(root, text=\"Username\", font=(\"Arial Bold\", 15), bg='ivory')\n L1.place(x=150, y=200)\n T1 = tk.Entry(root, width = 30, bd = 5)\n T1.place(x=280, y=200)\n\n L2 = tk.Label(root, text=\"Password\", font=(\"Arial Bold\", 15), bg='ivory')\n L2.place(x=150, y=250)\n T2 = tk.Entry(root, width = 30, show='*', bd = 5)\n T2.place(x=280, y=250)\n\n reg_button=Button(root,text='Register',font=(\"Arial Bold\",15),bg='blue',command=create_pass)\n reg_button.place(x=340,y=400)\n\ndef check_pass():\n global root,T1,T2,T3\n try:\n with open('password.txt','r')as f:\n lines=f.read()\n if T1.get()+'='+T2.get() in lines and T1.get()!='' and T2.get()!='':\n entity_page()\n else:\n label=Label(root,text='Invalid username or password.Try again',font=('times new roman',15))\n label.place(x=200,y=100)\n except:\n label=Label(root,text='Invalid username or password.Try again',font=('times new roman',15))\n label.place(x=200,y=100)\n\ndef create_pass():\n global root,T1,T2,T3\n\n\n #to clean up previous window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='ivory')\n label.place(x=0,y=0)\n\n\n #this window\n L1 = tk.Label(root, text=\"Username\", font=(\"Arial Bold\", 15), bg='ivory')\n L1.place(x=150, y=200)\n T1 = tk.Entry(root, width = 30, bd = 5)\n T1.place(x=380, y=200)\n\n L2 = tk.Label(root, text=\"Password\", font=(\"Arial Bold\", 15), bg='ivory')\n L2.place(x=150, y=250)\n T2 = tk.Entry(root, width = 30, show='*', bd = 5)\n T2.place(x=380, y=250)\n\n L2 = tk.Label(root, text=\"Confirm Password\", font=(\"Arial Bold\", 15), bg='ivory')\n L2.place(x=150, y=300)\n T3 = tk.Entry(root, width = 30, show='*', bd = 5)\n T3.place(x=380, y=300)\n\n reg_button=Button(root,text='Done',font=(\"Arial Bold\",15),bg='blue',command=add_pass)\n reg_button.place(x=440,y=400)\n\n\ndef add_pass():\n global root,T1,T2,T3\n\n if T2.get()!=T3.get():\n label=Label(root,text='Incorrect Password. Enter again',font=('times new roman',20))\n label.place(x=100,y=100)\n else:\n try:\n with open('password.txt','r')as f:\n data=f.read()\n with open('password.txt','w')as f:\n f.write(data+'\\n')\n f.write(T1.get()+'='+T2.get())\n\n entity_page()\n except:\n with open('password.txt','w')as f:\n f.write(T1.get()+'='+T2.get())\n\n entity_page()\n\ndef entity_page():\n global root\n #cleaning previous window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='ivory')\n label.place(x=0,y=0)\n\n #this window\n label=Label(root,text='WELCOME TO AGRI MARKET ',font=('Times new roman',20),bg='blue')\n label.place(x=200,y=20)\n\n label=Label(root,text='Choose the Entity ',font=('Times new roman',20),bg='white')\n label.place(x=250,y=100)\n\n\n Button = tk.Button(root, text=\"Farmers\", font=(\"Arial\", 15),command=farmer)\n Button.place(x=100, y=150+25)\n\n Button = tk.Button(root, text=\"Company\", font=(\"Arial\", 15),command=company)\n Button.place(x=300, y=150+25)\n\n Button = tk.Button(root, text=\"Fertilizer\", font=(\"Arial\", 15),command=fertilizer)\n Button.place(x=500, y=150+25)\n\n Button = tk.Button(root, text=\"Order\", font=(\"Arial\", 15),command=orders)\n Button.place(x=200, y=300+25)\n\n Button = tk.Button(root, text=\"Payment\", font=(\"Arial\", 15),command=payment)\n Button.place(x=400, y=300+25)\n\n Button = tk.Button(root, text=\"GET BOOKING HISTORY\", font=(\"Arial\", 15),command=history)\n Button.place(x=200, y=400+25)\n\n#history\ndef history():\n global root,cur,db\n #clean previous window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n\n cur.execute(\"CALL getMonth(%s);\",[datetime.today().strftime(\"%m\")])\n data=cur.fetchall()\n \n label=Label(root,text=\"The Transaction History of this month\",font=(\"Arial\",15))\n label.place(x=200,y=20)\n\n button=Button(root,text='BACK',command=entity_page)\n button.place(x=20,y=20)\n\n frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')\n frame.place(x=10,y=100,width=750,height=400)\n\n x_scroll=Scrollbar(frame,orient=HORIZONTAL)\n y_scroll=Scrollbar(frame,orient=VERTICAL)\n\n table=ttk.Treeview(frame,columns=(\"trans_id\",'p_f_id','p_date','p_amount','p_method'),xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n\n x_scroll.pack(side=BOTTOM,fill=X)\n y_scroll.pack(side=RIGHT,fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('trans_id',text=\"Transaction Id\")\n table.heading('p_f_id',text=\"Farmer Id\")\n\n\n table.heading('p_date',text=\"Payment Date\")\n table.heading('p_amount',text=\"Amount\")\n table.heading('p_method',text=\"Payment Method\")\n #table.heading('f_address',text=\"Farmer Address\")\n table['show']='headings'\n\n #table.column(\"f_id\",width=100)\n\n\n table.pack()\n\n\n\n #cur.execute(\"SELECT * FROM payment;\")\n\n #data =cur.fetchall()\n #db.commit()\n if len(data)!=0:\n for row in data:\n table.insert('',END,values=row)\n\n db.close()\n db=mysql.connector.connect(host='localhost',user='root',passwd='bhushi',database='farmer_app')\n cur=db.cursor()\n \n\n\n#farmer page\ndef farmer():\n global root\n #clean previous window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Farmer Table',font=('Times new roman',15),bg='white')\n label.place(x=350,y=10)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=entity_page)\n Button.place(x=10, y=50)\n\n Button = tk.Button(root, text=\"Insert\", font=(\"Arial\", 15),command=insert_farmer)\n Button.place(x=110, y=50)\n\n Button = tk.Button(root, text=\"Delete\", font=(\"Arial\", 15),command=delete_farmer)\n Button.place(x=210, y=50)\n\n Button = tk.Button(root, text=\"Update\", font=(\"Arial\", 15),command=update_farmer)\n Button.place(x=310, y=50)\n\n Button = tk.Button(root, text=\"Search\", font=(\"Arial\", 15),command=search_farmer)\n Button.place(x=410, y=50)\n\n view_farmer()\n\n\ndef view_farmer():\n frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')\n frame.place(x=10,y=100,width=750,height=400)\n\n x_scroll=Scrollbar(frame,orient=HORIZONTAL)\n y_scroll=Scrollbar(frame,orient=VERTICAL)\n\n table=ttk.Treeview(frame,columns=(\"f_id\",'f_name','f_phone','f_mail','f_locality','f_address'),xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n\n x_scroll.pack(side=BOTTOM,fill=X)\n y_scroll.pack(side=RIGHT,fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('f_id',text=\"Farmer Id\")\n table.heading('f_name',text=\"Farmer Name\")\n table.heading('f_phone',text=\"Farmer Phone\")\n table.heading('f_mail',text=\"Farmer Mail\")\n table.heading('f_locality',text=\"Farmer Locality\")\n table.heading('f_address',text=\"Farmer Address\")\n table['show']='headings'\n\n table.column(\"f_id\",width=100)\n\n\n table.pack()\n\n\n\n cur.execute(\"SELECT * FROM farmer;\")\n\n data =cur.fetchall()\n db.commit()\n if len(data)!=0:\n for row in data:\n table.insert('',END,values=row)\n\ne1,e2,e3,e4,e5,e6=0,0,0,0,0,0\ndef insert_farmer():\n global e1,e2,e3,e4,e5,e6\n #clean the window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n\n #create the window\n label=Label(root,text='Farmer_id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=10)\n\n label=Label(root,text='Farmer_name',font=('Times new roman',20),bg='white')\n label.place(x=50,y=60)\n\n label=Label(root,text='Farmer_phone',font=('Times new roman',20),bg='white')\n label.place(x=50,y=110)\n\n label=Label(root,text='Farmer_mail',font=('Times new roman',20),bg='white')\n label.place(x=50,y=160)\n\n label=Label(root,text='Farmer_locality',font=('Times new roman',20),bg='white')\n label.place(x=50,y=210)\n\n label=Label(root,text='Farmer_address',font=('Times new roman',20),bg='white')\n label.place(x=50,y=270)\n\n e1=Entry(root,width=50)\n e2=Entry(root,width=50)\n e3=Entry(root,width=50)\n e4=Entry(root,width=50)\n e5=Entry(root,width=50)\n e6=Entry(root,width=50)\n\n e1.place(x=350,y=10)\n e2.place(x=350,y=60)\n e3.place(x=350,y=110)\n e4.place(x=350,y=160)\n e5.place(x=350,y=210)\n e6.place(x=350,y=270)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=farmer)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Commit\", font=(\"Arial\", 15),command=insert_farmer_command)\n Button.place(x=400, y=400)\n\ndef insert_farmer_command():\n global root\n try:\n sql=\"INSERT INTO farmer values(%s,%s,%s,%s,%s,%s);\"\n if len(e1.get())>3:\n invalid('farmer')\n else:\n\n vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get(),e6.get()\n cur.executemany(sql,[vals])\n db.commit()\n farmer()\n except:\n insert_farmer()\ndef invalid(page):\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n if page=='farmer':\n label=Label(root,text='Enter valid farmer_id',font=('Times new roman',30),bg='white')\n label.place(x=170,y=200)\n\n button=Button(root,text='Re-enter',font=('Times new roman',20),command=insert_farmer)\n button.place(x=300,y=400)\n elif page=='company':\n label=Label(root,text='Enter valid company_id',font=('Times new roman',30),bg='white')\n label.place(x=170,y=200)\n\n button=Button(root,text='Re-enter',font=('Times new roman',20),command=insert_company)\n button.place(x=300,y=400)\ndef delete_farmer():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Farmer Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=farmer)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Commit\", font=(\"Arial\", 15),command=delete_farmer_command)\n Button.place(x=400, y=400)\n\n\ndef delete_farmer_command():\n try:\n sql=\"DELETE FROM farmer WHERE f_id=%s;\"\n cur.execute(sql,[e1.get()])\n db.commit()\n farmer()\n except:\n l=Label(root,text='Invalid Entry',font=('times new roman',15))\n l.place(x=100,y=300)\n\ndef update_farmer():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Farmer Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"OK\", font=(\"Arial\", 15),command=update)\n\n Button.place(x=300, y=400)\n\ndef update():\n try:\n global e1,e2,e3,e4,e5,e6\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n sql='SELECT * FROM farmer WHERE f_id=%s;'\n vals=[e1.get()]\n cur.execute(sql,vals)\n\n label=Label(root,text='Farmer_id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=10)\n\n label=Label(root,text='Farmer_name',font=('Times new roman',20),bg='white')\n label.place(x=50,y=60)\n\n label=Label(root,text='Farmer_phone',font=('Times new roman',20),bg='white')\n label.place(x=50,y=110)\n\n label=Label(root,text='Farmer_mail',font=('Times new roman',20),bg='white')\n label.place(x=50,y=160)\n\n label=Label(root,text='Farmer_locality',font=('Times new roman',20),bg='white')\n label.place(x=50,y=210)\n\n label=Label(root,text='Farmer_address',font=('Times new roman',20),bg='white')\n label.place(x=50,y=270)\n\n e1=Entry(root)\n e2=Entry(root)\n e3=Entry(root)\n e4=Entry(root)\n e5=Entry(root)\n e6=Entry(root)\n\n data=cur.fetchall()\n arr=[e1,e2,e3,e4,e5,e6]\n count=0\n for val in data[0]:\n arr[count].insert(0,val)\n count+=1\n\n e1.place(x=350,y=10)\n e2.place(x=350,y=60)\n e3.place(x=350,y=110)\n e4.place(x=350,y=160)\n e5.place(x=350,y=210)\n e6.place(x=350,y=270)\n\n label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command)\n label.place(x=300,y=400)\n\n\n except:\n l=Label(root,text='Invalid Farmer_id',font=('times new roman',15))\n l.place(x=100,y=300)\n update_farmer()\n\ndef update_command():\n try:\n sql=\"UPDATE farmer SET f_name=%s,f_phone_no=%s,f_mail=%s,f_locality=%s,f_address=%s WHERE f_id=%s;\"\n vals=e2.get(),e3.get(),e4.get(),e5.get(),e6.get(),e1.get()\n cur.executemany(sql,[vals])\n db.commit()\n farmer()\n except:\n update_farmer()\ndef search_farmer():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Farmer Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=farmer)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Search\", font=(\"Arial\", 15),command=search)\n Button.place(x=400, y=400)\ndef search():\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n try:\n sql='SELECT * FROM farmer WHERE f_id=%s;'\n val=[e1.get()]\n cur.execute(sql,val)\n\n Button = tk.Button(root, text=\"OK\", font=(\"Arial\", 15),command=farmer)\n Button.place(x=300, y=400)\n\n for val in cur:\n count=0\n Y=50\n names=['farmer id: ','farmer name: ','farmer phone: ','farmer mail: ','farmer locality: ','farmer address: ']\n for i in val:\n label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')\n label.place(x=10,y=Y)\n Y+=50\n count+=1\n db.commit()\n except:\n l=Label(root,text='Invalid Farmer Id',font=('times new roman',15))\n l.place(x=100,y=300)\n search_farmer()\n\n\n#company page\ndef company():\n global root\n #clean previous window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Company Table',font=('Times new roman',15),bg='white')\n label.place(x=350,y=10)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=entity_page)\n Button.place(x=10, y=50)\n\n Button = tk.Button(root, text=\"Insert\", font=(\"Arial\", 15),command=insert_company)\n Button.place(x=110, y=50)\n\n Button = tk.Button(root, text=\"Delete\", font=(\"Arial\", 15),command=delete_company)\n Button.place(x=210, y=50)\n\n Button = tk.Button(root, text=\"Update\", font=(\"Arial\", 15),command=update_company)\n Button.place(x=310, y=50)\n\n Button = tk.Button(root, text=\"Search\", font=(\"Arial\", 15),command=search_company)\n Button.place(x=410, y=50)\n\n view_company()\n\n\ndef view_company():\n frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')\n frame.place(x=10,y=100,width=750,height=400)\n\n x_scroll=Scrollbar(frame,orient=HORIZONTAL)\n y_scroll=Scrollbar(frame,orient=VERTICAL)\n\n table=ttk.Treeview(frame,columns=(\"c_id\",'c_name','c_address'),xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n\n x_scroll.pack(side=BOTTOM,fill=X)\n y_scroll.pack(side=RIGHT,fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('c_id',text=\"Company Id\")\n table.heading('c_name',text=\"Company Name\")\n table.heading('c_address',text=\"Company Address\")\n table['show']='headings'\n\n table.column(\"c_id\",width=100)\n\n\n table.pack()\n\n\n\n cur.execute(\"SELECT * FROM company;\")\n\n data =cur.fetchall()\n db.commit()\n if len(data)!=0:\n for row in data:\n table.insert('',END,values=row)\n\ndef insert_company():\n global e1,e2,e3,e4,e5,e6\n #clean the window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n\n #create the window\n label=Label(root,text='Company_id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=10)\n\n label=Label(root,text='Company_name',font=('Times new roman',20),bg='white')\n label.place(x=50,y=110)\n\n label=Label(root,text='Company_address',font=('Times new roman',20),bg='white')\n label.place(x=50,y=210)\n\n e1=Entry(root,width=50)\n e2=Entry(root,width=50)\n e3=Entry(root,width=50)\n\n e1.place(x=350,y=10)\n e2.place(x=350,y=110)\n e3.place(x=350,y=210)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=company)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Commit\", font=(\"Arial\", 15),command=insert_company_command)\n Button.place(x=400, y=400)\n\ndef insert_company_command():\n try:\n if len(e1.get())>3:\n invalid(\"company\")\n else:\n sql=\"INSERT INTO company values(%s,%s,%s);\"\n vals=e1.get(),e2.get(),e3.get()\n cur.executemany(sql,[vals])\n db.commit()\n company()\n except:\n insert_company()\ndef delete_company():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Company Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=company)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Commit\", font=(\"Arial\", 15),command=delete_company_command)\n Button.place(x=400, y=400)\n\n\ndef delete_company_command():\n try:\n sql=\"DELETE FROM company WHERE c_id=%s;\"\n cur.execute(sql,[int(e1.get())])\n db.commit()\n company()\n except:\n l=Label(root,text='Invalid Entry',font=('times new roman',15))\n l.place(x=100,y=300)\n\ndef update_company():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Company Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"OK\", font=(\"Arial\", 15),command=update_c)\n\n Button.place(x=300, y=400)\n\ndef update_c():\n try:\n global e1,e2,e3,e4,e5,e6\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n sql='SELECT * FROM company WHERE c_id=%s;'\n vals=[e1.get()]\n cur.execute(sql,vals)\n\n label=Label(root,text='Company_id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=10)\n\n label=Label(root,text='Company_name',font=('Times new roman',20),bg='white')\n label.place(x=50,y=110)\n\n label=Label(root,text='Company_address',font=('Times new roman',20),bg='white')\n label.place(x=50,y=210)\n\n e1=Entry(root)\n e2=Entry(root)\n e3=Entry(root)\n\n data=cur.fetchall()\n arr=[e1,e2,e3]\n count=0\n for val in data[0]:\n arr[count].insert(0,val)\n count+=1\n\n e1.place(x=350,y=10)\n e2.place(x=350,y=110)\n e3.place(x=350,y=210)\n\n label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_c)\n label.place(x=300,y=400)\n\n\n except:\n l=Label(root,text='Invalid Farmer_id',font=('times new roman',15))\n l.place(x=100,y=300)\n update_company()\n\ndef update_command_c():\n try:\n sql=\"UPDATE company SET c_name=%s,c_address=%s WHERE c_id=%s;\"\n vals=e2.get(),e3.get(),e1.get()\n cur.executemany(sql,[vals])\n db.commit()\n company()\n except:\n update_company()\ndef search_company():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Company Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=company)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Search\", font=(\"Arial\", 15),command=search_c)\n Button.place(x=400, y=400)\ndef search_c():\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n try:\n sql='SELECT * FROM company WHERE c_id=%s;'\n val=[e1.get()]\n cur.execute(sql,val)\n\n Button = tk.Button(root, text=\"OK\", font=(\"Arial\", 15),command=company)\n Button.place(x=300, y=400)\n\n for val in cur:\n count=0\n Y=50\n names=['company id: ','company name: ','company address: ']\n for i in val:\n label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')\n label.place(x=10,y=Y)\n Y+=50\n count+=1\n db.commit()\n except:\n l=Label(root,text='Invalid Company Id',font=('times new roman',15))\n l.place(x=100,y=300)\n search_company()\n\n\n\n#fertilizer page\ndef fertilizer():\n global root\n #clean previous window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Fertilizer Table',font=('Times new roman',15),bg='white')\n label.place(x=350,y=10)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=entity_page)\n Button.place(x=10, y=50)\n\n Button = tk.Button(root, text=\"Insert\", font=(\"Arial\", 15),command=insert_fer)\n Button.place(x=110, y=50)\n\n Button = tk.Button(root, text=\"Delete\", font=(\"Arial\", 15),command=delete_fer)\n Button.place(x=210, y=50)\n\n Button = tk.Button(root, text=\"Update\", font=(\"Arial\", 15),command=update_fer)\n Button.place(x=310, y=50)\n\n Button = tk.Button(root, text=\"Search\", font=(\"Arial\", 15),command=search_fer)\n Button.place(x=410, y=50)\n\n view_fer()\n\n\ndef view_fer():\n frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')\n frame.place(x=10,y=100,width=750,height=400)\n\n x_scroll=Scrollbar(frame,orient=HORIZONTAL)\n y_scroll=Scrollbar(frame,orient=VERTICAL)\n\n table=ttk.Treeview(frame,columns=(\"fe_formula\",'fe_name','fe_content','fe_price','company_id'),xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n\n x_scroll.pack(side=BOTTOM,fill=X)\n y_scroll.pack(side=RIGHT,fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('fe_formula',text=\"Fertilizer Formula\")\n table.heading('fe_name',text=\"Fertilizer name\")\n table.heading('fe_content',text=\"Fertilizer content\")\n table.heading('fe_price',text=\"Fertilizer price\")\n table.heading('company_id',text=\"Company_id\")\n #table.heading('f_address',text=\"Farmer Address\")\n table['show']='headings'\n\n #table.column(\"f_id\",width=100)\n\n\n table.pack()\n\n\n\n cur.execute(\"SELECT * FROM fertilizer;\")\n\n data =cur.fetchall()\n db.commit()\n if len(data)!=0:\n for row in data:\n table.insert('',END,values=row)\n\ne1,e2,e3,e4,e5,e6=0,0,0,0,0,0\ndef insert_fer():\n global e1,e2,e3,e4,e5,e6\n #clean the window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n\n #create the window\n label=Label(root,text='Fertlizer formula',font=('Times new roman',20),bg='white')\n label.place(x=50,y=10)\n\n label=Label(root,text='Fertlizer name',font=('Times new roman',20),bg='white')\n label.place(x=50,y=60)\n\n label=Label(root,text='Fertilizer content',font=('Times new roman',20),bg='white')\n label.place(x=50,y=110)\n\n label=Label(root,text='Fertlizer price',font=('Times new roman',20),bg='white')\n label.place(x=50,y=160)\n\n label=Label(root,text='Company id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=210)\n\n\n e1=Entry(root,width=50)\n e2=Entry(root,width=50)\n e3=Entry(root,width=50)\n e4=Entry(root,width=50)\n e5=Entry(root,width=50)\n #e6=Entry(root,width=50)\n\n e1.place(x=350,y=10)\n e2.place(x=350,y=60)\n e3.place(x=350,y=110)\n e4.place(x=350,y=160)\n e5.place(x=350,y=210)\n #e6.place(x=350,y=270)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=fertilizer)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Commit\", font=(\"Arial\", 15),command=insert_fer_command)\n Button.place(x=400, y=400)\n\ndef insert_fer_command():\n try:\n sql=\"INSERT INTO fertilizer values(%s,%s,%s,%s,%s);\"\n vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get()\n cur.executemany(sql,[vals])\n db.commit()\n fertilizer()\n except:\n insert_fer()\ndef delete_fer():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Fertilizer formula:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=fertilizer)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Commit\", font=(\"Arial\", 15),command=delete_fer_command)\n Button.place(x=400, y=400)\n\n\ndef delete_fer_command():\n try:\n sql=\"DELETE FROM fertilizer WHERE fe_formula=%s;\"\n cur.execute(sql,[e1.get()])\n db.commit()\n fertilizer()\n except:\n l=Label(root,text='Invalid Entry',font=('times new roman',15))\n l.place(x=100,y=300)\n\ndef update_fer():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Fertlizer formula:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"OK\", font=(\"Arial\", 15),command=update_fe)\n\n Button.place(x=300, y=400)\n\ndef update_fe():\n try:\n global e1,e2,e3,e4,e5,e6\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n sql='SELECT * FROM fertilizer WHERE fe_formula=%s;'\n vals=[e1.get()]\n cur.execute(sql,vals)\n\n label=Label(root,text='Fertlizer formula',font=('Times new roman',20),bg='white')\n label.place(x=50,y=10)\n\n label=Label(root,text='Fertlizer name',font=('Times new roman',20),bg='white')\n label.place(x=50,y=60)\n\n label=Label(root,text='Fertlizer content',font=('Times new roman',20),bg='white')\n label.place(x=50,y=110)\n\n label=Label(root,text='Fertlizer price',font=('Times new roman',20),bg='white')\n label.place(x=50,y=160)\n\n label=Label(root,text='comapny_id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=210)\n\n\n e1=Entry(root)\n e2=Entry(root)\n e3=Entry(root)\n e4=Entry(root)\n e5=Entry(root)\n #e6=Entry(root)\n\n data=cur.fetchall()\n arr=[e1,e2,e3,e4,e5,e6]\n count=0\n for val in data[0]:\n arr[count].insert(0,val)\n count+=1\n\n e1.place(x=350,y=10)\n e2.place(x=350,y=60)\n e3.place(x=350,y=110)\n e4.place(x=350,y=160)\n e5.place(x=350,y=210)\n #e6.place(x=350,y=270)\n\n label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_fe)\n label.place(x=300,y=400)\n\n\n except:\n l=Label(root,text='Invalid Farmer_id',font=('times new roman',15))\n l.place(x=100,y=300)\n update_fer()\n\ndef update_command_fe():\n\n sql=\"UPDATE fertilizer SET fe_name=%s,fe_content=%s,fe_price=%s,company_id=%s WHERE fe_formula=%s;\"\n vals=e2.get(),e3.get(),e4.get(),e5.get(),e1.get()\n cur.executemany(sql,[vals])\n db.commit()\n fertilizer()\n\ndef search_fer():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Fertlizer formula:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=fertilizer)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Search\", font=(\"Arial\", 15),command=search_fe)\n Button.place(x=400, y=400)\ndef search_fe():\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n try:\n sql='SELECT * FROM fertilizer WHERE fe_formula=%s;'\n val=[e1.get()]\n cur.execute(sql,val)\n\n Button = tk.Button(root, text=\"OK\", font=(\"Arial\", 15),command=fertilizer)\n Button.place(x=300, y=400)\n\n for val in cur:\n count=0\n Y=50\n names=['fertilizer formula: ','fertilizer name: ','fertilizer content: ','fertilizer price: ','company_id: ']\n for i in val:\n label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')\n label.place(x=10,y=Y)\n Y+=50\n count+=1\n db.commit()\n except:\n l=Label(root,text='Invalid Fertilizer formula',font=('times new roman',15))\n l.place(x=100,y=300)\n search_fer()\n\n\n\n#order page\ndef orders():\n global root\n #clean previous window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Orders Table',font=('Times new roman',15),bg='white')\n label.place(x=350,y=10)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=entity_page)\n Button.place(x=10, y=50)\n\n Button = tk.Button(root, text=\"Insert\", font=(\"Arial\", 15),command=insert_ord)\n Button.place(x=110, y=50)\n\n Button = tk.Button(root, text=\"Delete\", font=(\"Arial\", 15),command=delete_ord)\n Button.place(x=210, y=50)\n\n Button = tk.Button(root, text=\"Update\", font=(\"Arial\", 15),command=update_ord)\n Button.place(x=310, y=50)\n\n Button = tk.Button(root, text=\"Search\", font=(\"Arial\", 15),command=search_ord)\n Button.place(x=410, y=50)\n\n view_ord()\n\n\ndef view_ord():\n frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')\n frame.place(x=10,y=100,width=750,height=400)\n\n x_scroll=Scrollbar(frame,orient=HORIZONTAL)\n y_scroll=Scrollbar(frame,orient=VERTICAL)\n\n table=ttk.Treeview(frame,columns=(\"or_id\",'or_date','or_fid','or_formula','or_to'),xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n\n x_scroll.pack(side=BOTTOM,fill=X)\n y_scroll.pack(side=RIGHT,fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('or_id',text=\"Order Id\")\n table.heading('or_date',text=\"Order Date\")\n\n\n table.heading('or_fid',text=\"Ordered Farmer Id\")\n table.heading('or_formula',text=\"Order (item)formula\")\n table.heading('or_to',text=\"Order to\")\n #table.heading('f_address',text=\"Farmer Address\")\n table['show']='headings'\n\n #table.column(\"f_id\",width=100)\n\n\n table.pack()\n\n\n\n cur.execute(\"SELECT * FROM orders;\")\n\n data =cur.fetchall()\n db.commit()\n if len(data)!=0:\n for row in data:\n table.insert('',END,values=row)\n\ne1,e2,e3,e4,e5,e6=0,0,0,0,0,0\ndef insert_ord():\n global e1,e2,e3,e4,e5,e6\n #clean the window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n\n #create the window\n label=Label(root,text='Order Id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=10)\n\n label=Label(root,text='Order date',font=('Times new roman',20),bg='white')\n label.place(x=50,y=60)\n\n label=Label(root,text='Order FID',font=('Times new roman',20),bg='white')\n label.place(x=50,y=110)\n\n label=Label(root,text='Order formula',font=('Times new roman',20),bg='white')\n label.place(x=50,y=160)\n\n label=Label(root,text='Order to',font=('Times new roman',20),bg='white')\n label.place(x=50,y=210)\n\n\n e1=Entry(root,width=50)\n e2=Entry(root,width=50)\n e3=Entry(root,width=50)\n e4=Entry(root,width=50)\n e5=Entry(root,width=50)\n #e6=Entry(root,width=50)\n\n e1.place(x=350,y=10)\n e2.place(x=350,y=60)\n e2.insert(0,datetime.now())\n e3.place(x=350,y=110)\n e4.place(x=350,y=160)\n e5.place(x=350,y=210)\n #e6.place(x=350,y=270)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=orders)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Commit\", font=(\"Arial\", 15),command=insert_ord_command)\n Button.place(x=400, y=400)\n\ndef insert_ord_command():\n try:\n sql=\"INSERT INTO orders values(%s,%s,%s,%s,%s);\"\n vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get()\n cur.executemany(sql,[vals])\n db.commit()\n orders()\n except:\n insert_ord()\ndef delete_ord():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Order Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=orders)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Commit\", font=(\"Arial\", 15),command=delete_ord_command)\n Button.place(x=400, y=400)\n\n\ndef delete_ord_command():\n try:\n sql=\"DELETE FROM orders WHERE or_id=%s;\"\n cur.execute(sql,[e1.get()])\n db.commit()\n orders()\n except:\n l=Label(root,text='Invalid Entry',font=('times new roman',15))\n l.place(x=100,y=300)\n\ndef update_ord():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Order Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"OK\", font=(\"Arial\", 15),command=update_or)\n\n Button.place(x=300, y=400)\n\ndef update_or():\n try:\n global e1,e2,e3,e4,e5,e6\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n sql='SELECT * FROM orders WHERE or_id=%s;'\n vals=[e1.get()]\n cur.execute(sql,vals)\n\n label=Label(root,text='Order Id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=10)\n\n label=Label(root,text='Order Date',font=('Times new roman',20),bg='white')\n label.place(x=50,y=60)\n\n label=Label(root,text='Order f_id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=110)\n\n label=Label(root,text='Order formula',font=('Times new roman',20),bg='white')\n label.place(x=50,y=160)\n\n label=Label(root,text='Order to',font=('Times new roman',20),bg='white')\n label.place(x=50,y=210)\n\n\n e1=Entry(root)\n e2=Entry(root)\n e3=Entry(root)\n e4=Entry(root)\n e5=Entry(root)\n #e6=Entry(root)\n\n data=cur.fetchall()\n arr=[e1,e2,e3,e4,e5,e6]\n count=0\n for val in data[0]:\n arr[count].insert(0,val)\n count+=1\n\n e1.place(x=350,y=10)\n e2.place(x=350,y=60)\n #e2.insert(0,datetime.now())\n e3.place(x=350,y=110)\n e4.place(x=350,y=160)\n e5.place(x=350,y=210)\n #e6.place(x=350,y=270)\n\n label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_ord)\n label.place(x=300,y=400)\n\n\n except:\n l=Label(root,text='Invalid Order_id',font=('times new roman',15))\n l.place(x=100,y=300)\n update_ord()\n\ndef update_command_ord():\n\n sql=\"UPDATE orders SET or_date=%s,or_fid=%s,or_formula=%s,or_to=%s WHERE or_id=%s;\"\n vals=e2.get(),e3.get(),e4.get(),e5.get(),e1.get()\n cur.executemany(sql,[vals])\n db.commit()\n orders()\n\ndef search_ord():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Order Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=orders)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Search\", font=(\"Arial\", 15),command=search_or)\n Button.place(x=400, y=400)\ndef search_or():\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n try:\n sql='SELECT * FROM orders WHERE or_id=%s;'\n val=[e1.get()]\n cur.execute(sql,val)\n\n Button = tk.Button(root, text=\"OK\", font=(\"Arial\", 15),command=orders)\n Button.place(x=300, y=400)\n\n for val in cur:\n count=0\n Y=50\n names=['order Id: ','Order date: ','Order fid: ','Order formula: ','order to: ']\n for i in val:\n label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')\n label.place(x=10,y=Y)\n Y+=50\n count+=1\n db.commit()\n except:\n l=Label(root,text='Invalid order id',font=('times new roman',15))\n l.place(x=100,y=300)\n search_ord()\n\n\n\n\n#payment page\ndef payment():\n global root\n #clean previous window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Payment Table',font=('Times new roman',15),bg='white')\n label.place(x=350,y=10)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=entity_page)\n Button.place(x=10, y=50)\n\n Button = tk.Button(root, text=\"Insert\", font=(\"Arial\", 15),command=insert_pay)\n Button.place(x=110, y=50)\n\n Button = tk.Button(root, text=\"Delete\", font=(\"Arial\", 15),command=delete_pay)\n Button.place(x=210, y=50)\n\n Button = tk.Button(root, text=\"Update\", font=(\"Arial\", 15),command=update_pay)\n Button.place(x=310, y=50)\n\n Button = tk.Button(root, text=\"Search\", font=(\"Arial\", 15),command=search_pay)\n Button.place(x=410, y=50)\n\n view_pay()\n\n\ndef view_pay():\n frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')\n frame.place(x=10,y=100,width=750,height=400)\n\n x_scroll=Scrollbar(frame,orient=HORIZONTAL)\n y_scroll=Scrollbar(frame,orient=VERTICAL)\n\n table=ttk.Treeview(frame,columns=(\"trans_id\",'p_f_id','p_date','p_amount','p_method'),xscrollcommand=x_scroll.set,\n yscrollcommand=y_scroll.set)\n\n x_scroll.pack(side=BOTTOM,fill=X)\n y_scroll.pack(side=RIGHT,fill=Y)\n x_scroll.config(command=table.xview)\n y_scroll.config(command=table.yview)\n table.heading('trans_id',text=\"Transaction Id\")\n table.heading('p_f_id',text=\"Farmer Id\")\n\n\n table.heading('p_date',text=\"Payment Date\")\n table.heading('p_amount',text=\"Amount\")\n table.heading('p_method',text=\"Payment Method\")\n #table.heading('f_address',text=\"Farmer Address\")\n table['show']='headings'\n\n #table.column(\"f_id\",width=100)\n\n\n table.pack()\n\n\n\n cur.execute(\"SELECT * FROM payment;\")\n\n data =cur.fetchall()\n db.commit()\n if len(data)!=0:\n for row in data:\n table.insert('',END,values=row)\n\ne1,e2,e3,e4,e5,e6=0,0,0,0,0,0\ndef insert_pay():\n global e1,e2,e3,e4,e5,e6\n #clean the window\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n\n #create the window\n label=Label(root,text='Transaction Id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=10)\n\n label=Label(root,text='Transaction farmer id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=60)\n\n label=Label(root,text='Transaction date',font=('Times new roman',20),bg='white')\n label.place(x=50,y=110)\n\n label=Label(root,text='Transaction amount',font=('Times new roman',20),bg='white')\n label.place(x=50,y=160)\n\n label=Label(root,text='Transaction method',font=('Times new roman',20),bg='white')\n label.place(x=50,y=210)\n\n\n e1=Entry(root,width=50)\n e2=Entry(root,width=50)\n e3=Entry(root,width=50)\n\n e4=Entry(root,width=50)\n e5=Entry(root,width=50)\n #e6=Entry(root,width=50)\n\n e1.place(x=350,y=10)\n e2.place(x=350,y=60)\n #e2.insert(0,datetime.now())\n\n e3.place(x=350,y=110)\n e3.insert(0,datetime.now())\n e4.place(x=350,y=160)\n #e5.place(x=350,y=210)\n e5 = StringVar(root)\n e5.set(\"Debit card\") # default value\n\n w= OptionMenu(root, e5, \"Credit Card\", \"UPI\", \"Cheque\",\"Cash\")\n w.place(x=350,y=210)\n\n#mainloop()\n\n #e6.place(x=350,y=270)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=payment)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Commit\", font=(\"Arial\", 15),command=insert_pay_command)\n Button.place(x=400, y=400)\n\ndef insert_pay_command():\n try:\n sql=\"INSERT INTO payment values(%s,%s,%s,%s,%s);\"\n vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get()\n cur.executemany(sql,[vals])\n db.commit()\n payment()\n except:\n insert_pay()\ndef delete_pay():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Transaction Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=payment)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Commit\", font=(\"Arial\", 15),command=delete_pay_command)\n Button.place(x=400, y=400)\n\n\ndef delete_pay_command():\n try:\n sql=\"DELETE FROM payment WHERE trans_id=%s;\"\n cur.execute(sql,[e1.get()])\n db.commit()\n payment()\n except:\n l=Label(root,text='Invalid Entry',font=('times new roman',15))\n l.place(x=100,y=300)\n\ndef update_pay():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window\n label=Label(root,text='Transaction Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"OK\", font=(\"Arial\", 15),command=update_pa)\n\n Button.place(x=300, y=400)\n\ndef update_pa():\n try:\n global e1,e2,e3,e4,e5,e6\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n sql='SELECT * FROM payment WHERE trans_id=%s;'\n vals=[e1.get()]\n cur.execute(sql,vals)\n\n label=Label(root,text='Transaction Id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=10)\n\n label=Label(root,text='Farmer_id',font=('Times new roman',20),bg='white')\n label.place(x=50,y=60)\n\n label=Label(root,text='Transaction date',font=('Times new roman',20),bg='white')\n label.place(x=50,y=110)\n\n label=Label(root,text='Transaction amount',font=('Times new roman',20),bg='white')\n label.place(x=50,y=160)\n\n label=Label(root,text='Transaction method',font=('Times new roman',20),bg='white')\n label.place(x=50,y=210)\n\n\n e1=Entry(root)\n e2=Entry(root)\n e3=Entry(root)\n e4=Entry(root)\n e5=Entry(root)\n #e6=Entry(root)\n\n data=cur.fetchall()\n arr=[e1,e2,e3,e4,e5,e6]\n count=0\n for val in data[0]:\n if count==5:\n continue\n arr[count].insert(0,val)\n count+=1\n\n e1.place(x=350,y=10)\n e2.place(x=350,y=60)\n\n e3.place(x=350,y=110)\n #e3.insert(0,datetime.now())\n e4.place(x=350,y=160)\n #e5.place(x=350,y=210)\n #e6.place(x=350,y=270)\n e5 = StringVar(root)\n e5.set(\"Debit card\") # default value\n\n w= OptionMenu(root, e5, \"Credit Card\", \"UPI\", \"Cheque\",\"Cash\")\n w.place(x=350,y=210)\n\n label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_pay)\n label.place(x=300,y=400)\n\n\n except:\n l=Label(root,text='Invalid Order_id',font=('times new roman',15))\n l.place(x=100,y=300)\n update_pay()\n\ndef update_command_pay():\n\n sql=\"UPDATE payment SET p_f_id=%s,p_date=%s,p_amount=%s,p_method=%s WHERE trans_id=%s;\"\n vals=e2.get(),e3.get(),e4.get(),e5.get(),e1.get()\n cur.executemany(sql,[vals])\n db.commit()\n payment()\ndef search_pay():\n global e1\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n\n #window2\n label=Label(root,text='Transaction Id:',font=('Times new roman',20),bg='tomato')\n label.place(x=100,y=200)\n\n e1=Entry(root,width=50)\n e1.place(x=300,y=200)\n\n Button = tk.Button(root, text=\"Back\", font=(\"Arial\", 15),command=payment)\n Button.place(x=200, y=400)\n\n Button = tk.Button(root, text=\"Search\", font=(\"Arial\", 15),command=search_pa)\n Button.place(x=400, y=400)\ndef search_pa():\n #clean\n label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')\n label.place(x=0,y=0)\n try:\n sql='SELECT * FROM payment WHERE trans_id=%s;'\n val=[e1.get()]\n cur.execute(sql,val)\n\n Button = tk.Button(root, text=\"OK\", font=(\"Arial\", 15),command=payment)\n Button.place(x=300, y=400)\n\n for val in cur:\n count=0\n Y=50\n names=['Transaction Id: ','Transaction fid: ','Transaction date: ','Transaction amount: ','Transaction method: ']\n for i in val:\n label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')\n label.place(x=10,y=Y)\n Y+=50\n count+=1\n db.commit()\n except:\n l=Label(root,text='Invalid order id',font=('times new roman',15))\n l.place(x=100,y=300)\n search_pay()\n\n\nFirst_page(root)\nroot.mainloop()\n",
"step-ids": [
46,
47,
52,
53,
66
]
}
|
[
46,
47,
52,
53,
66
] |
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.views.generic import TemplateView
from django.core.context_processors import csrf
from django.template import RequestContext
from django.views.generic import DetailView, ListView , CreateView , UpdateView , DeleteView , FormView , View
from .models import Contact
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.shortcuts import render_to_response
# Create your views here.
#def home(request):
# posts = Post.objects.all()
# contexto = {'posts' : ''}
# return render_to_response("home.html" , contexto)
class Home(TemplateView):
def get(self, request , *args , **kwargs):
return render_to_response('home.html')
class AddContact(CreateView):
model = Contact
success_url = reverse_lazy('home')
# return render_to_response("home.html" , contexto)
class ListContact(ListView):
model = Contact
|
normal
|
{
"blob_id": "8a3694f96203ae8d1e306e1c9a5a47bfe26abeb1",
"index": 5178,
"step-1": "<mask token>\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-2": "<mask token>\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-3": "<mask token>\n\n\nclass Home(TemplateView):\n <mask token>\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-4": "<mask token>\n\n\nclass Home(TemplateView):\n\n def get(self, request, *args, **kwargs):\n return render_to_response('home.html')\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django.shortcuts import render_to_response\nfrom django.views.generic import TemplateView\nfrom django.core.context_processors import csrf\nfrom django.template import RequestContext\nfrom django.views.generic import DetailView, ListView , CreateView , UpdateView , DeleteView , FormView , View\nfrom .models import Contact\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\n\n# Create your views here.\n\n#def home(request):\n # posts = Post.objects.all()\n# contexto = {'posts' : ''}\n# return render_to_response(\"home.html\" , contexto)\n\n\n\nclass Home(TemplateView):\n def get(self, request , *args , **kwargs):\n return render_to_response('home.html')\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n # return render_to_response(\"home.html\" , contexto)\n\nclass ListContact(ListView):\n model = Contact\n\n",
"step-ids": [
2,
4,
5,
6,
8
]
}
|
[
2,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def initialize(x: object) ->object:
data = []
starttimeinmillis = int(round(time.time()))
c = 0
file1 = sys.argv[x]
with open(file1) as datafile:
for line in datafile:
c += 1
if c % 100 == 0:
print('.', sep='', end='', flush=True)
data.append([int(l) for l in line.split()])
rows = len(data)
cols = len(data[0])
print('time took:', int(round(time.time())) - starttimeinmillis, 'seconds')
return data
<|reserved_special_token_1|>
import sys
import time
def initialize(x: object) ->object:
data = []
starttimeinmillis = int(round(time.time()))
c = 0
file1 = sys.argv[x]
with open(file1) as datafile:
for line in datafile:
c += 1
if c % 100 == 0:
print('.', sep='', end='', flush=True)
data.append([int(l) for l in line.split()])
rows = len(data)
cols = len(data[0])
print('time took:', int(round(time.time())) - starttimeinmillis, 'seconds')
return data
<|reserved_special_token_1|>
import sys
import time
def initialize(x: object) -> object:
# Create initialization data and take a lot of time
data = []
starttimeinmillis = int(round(time.time()))
c =0
file1 = sys.argv[x]
with open(file1) as datafile:
for line in datafile:
c+=1
if(c%100==0):
print(".",sep='', end='',flush=True)
data.append([int(l) for l in line.split()])
rows = len(data)
cols = len(data[0])
# print(data)
#print("rows=", rows, " cols=", cols)
print("time took:",int(round(time.time()))-starttimeinmillis,"seconds")
return data
|
flexible
|
{
"blob_id": "91f3aae4e74f371cadaf10385510bc1c80063f55",
"index": 7765,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef initialize(x: object) ->object:\n data = []\n starttimeinmillis = int(round(time.time()))\n c = 0\n file1 = sys.argv[x]\n with open(file1) as datafile:\n for line in datafile:\n c += 1\n if c % 100 == 0:\n print('.', sep='', end='', flush=True)\n data.append([int(l) for l in line.split()])\n rows = len(data)\n cols = len(data[0])\n print('time took:', int(round(time.time())) - starttimeinmillis, 'seconds')\n return data\n",
"step-3": "import sys\nimport time\n\n\ndef initialize(x: object) ->object:\n data = []\n starttimeinmillis = int(round(time.time()))\n c = 0\n file1 = sys.argv[x]\n with open(file1) as datafile:\n for line in datafile:\n c += 1\n if c % 100 == 0:\n print('.', sep='', end='', flush=True)\n data.append([int(l) for l in line.split()])\n rows = len(data)\n cols = len(data[0])\n print('time took:', int(round(time.time())) - starttimeinmillis, 'seconds')\n return data\n",
"step-4": "import sys\nimport time\ndef initialize(x: object) -> object:\n # Create initialization data and take a lot of time\n\n data = []\n starttimeinmillis = int(round(time.time()))\n\n c =0\n file1 = sys.argv[x]\n with open(file1) as datafile:\n for line in datafile:\n c+=1\n if(c%100==0):\n print(\".\",sep='', end='',flush=True)\n data.append([int(l) for l in line.split()])\n\n rows = len(data)\n cols = len(data[0])\n # print(data)\n\n #print(\"rows=\", rows, \" cols=\", cols)\n print(\"time took:\",int(round(time.time()))-starttimeinmillis,\"seconds\")\n return data\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class server:
def __init__(self):
self.commandSock = socket.socket()
self.commandPort = 8080
self.transferSock = socket.socket()
self.transferPort = 8088
self.chatSock = socket.socket()
self.chatPort = 8085
self.host = ''
self.bindsocket()
def bindsocket(self):
self.commandSock.bind((self.host, self.commandPort))
self.transferSock.bind((self.host, self.transferPort))
self.chatSock.bind((self.host, self.chatPort))
self.commandSock.listen(10)
self.transferSock.listen(10)
self.chatSock.listen(10)
self.filename = ''
print('Waiting for a connection.....')
self.clientTransferSock, self.transferAddr = self.transferSock.accept()
self.clientCommandSock, self.commandAddr = self.commandSock.accept()
self.clientChatSock, self.chatAddr = self.chatSock.accept()
print('Got a transfer connection from %s' % str(self.transferAddr))
print('Got a command connection from %s' % str(self.commandAddr))
print('Got a chat connection from %s' % str(self.chatAddr))
self.sendPartitions()
self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))
print('Partitions Sent!')
def closeServer(self):
self.clientCommandSock.close()
self.clientTransferSock.close()
self.clientChatSock.close()
<|reserved_special_token_0|>
def chat(self):
self.chatfile = open('chatfile.txt', 'w')
self.message = self.clientChatSock.recv(128).decode('utf-8')
self.chatfile.write(self.message + '\n')
self.chatfile.close()
print(self.message)
<|reserved_special_token_0|>
def send(self, directory):
print(directory)
self.filename = directory.split('\\')[len(directory.split('\\')) - 1]
self.filename = self.filename.encode('utf-8')
self.nameSize = len(self.filename)
self.nameSize = str(self.nameSize).encode('utf-8')
self.clientTransferSock.send(self.nameSize)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'Name Size Received':
print('Waiting for Name Size to deliver...')
time.sleep(1)
else:
print('Name Size Delivered!')
self.clientTransferSock.send(self.filename)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Name Received':
print('Waiting for File Name to deliver...')
time.sleep(1)
else:
print('File Name Delivered!')
self.filename = self.filename.decode('utf-8')
self.fileSize = os.path.getsize(directory)
self.fileSize = str(self.fileSize).encode('utf-8')
self.clientTransferSock.send(self.fileSize)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Size Received':
print('Waiting for File Size to deliver...')
time.sleep(1)
else:
print('File Size Delivered!')
file_to_send = open(directory, 'rb')
lines = file_to_send.read()
self.clientTransferSock.sendall(lines)
file_to_send.close()
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Received':
print('Waiting for File to deliver...')
time.sleep(1)
else:
print('File Delivered Successfully!')
def delete(self):
self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.remove(self.deleteDirectory)
self.clientCommandSock.send('File Deleted'.encode('utf-8'))
print('Delete successfully!')
except:
self.clientCommandSock.send('File Not Found'.encode('utf-8'))
print('File not found!')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class server:
def __init__(self):
self.commandSock = socket.socket()
self.commandPort = 8080
self.transferSock = socket.socket()
self.transferPort = 8088
self.chatSock = socket.socket()
self.chatPort = 8085
self.host = ''
self.bindsocket()
def bindsocket(self):
self.commandSock.bind((self.host, self.commandPort))
self.transferSock.bind((self.host, self.transferPort))
self.chatSock.bind((self.host, self.chatPort))
self.commandSock.listen(10)
self.transferSock.listen(10)
self.chatSock.listen(10)
self.filename = ''
print('Waiting for a connection.....')
self.clientTransferSock, self.transferAddr = self.transferSock.accept()
self.clientCommandSock, self.commandAddr = self.commandSock.accept()
self.clientChatSock, self.chatAddr = self.chatSock.accept()
print('Got a transfer connection from %s' % str(self.transferAddr))
print('Got a command connection from %s' % str(self.commandAddr))
print('Got a chat connection from %s' % str(self.chatAddr))
self.sendPartitions()
self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))
print('Partitions Sent!')
def closeServer(self):
self.clientCommandSock.close()
self.clientTransferSock.close()
self.clientChatSock.close()
<|reserved_special_token_0|>
def chat(self):
self.chatfile = open('chatfile.txt', 'w')
self.message = self.clientChatSock.recv(128).decode('utf-8')
self.chatfile.write(self.message + '\n')
self.chatfile.close()
print(self.message)
def mkdir(self):
self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.mkdir(self.mkdirPath)
self.clientCommandSock.send('Directory Made'.encode('utf-8'))
print('Directory Made Successfully!')
except:
self.clientCommandSock.send('Directory Already Exist'.encode(
'utf-8'))
print('Directory Already Exist')
def send(self, directory):
print(directory)
self.filename = directory.split('\\')[len(directory.split('\\')) - 1]
self.filename = self.filename.encode('utf-8')
self.nameSize = len(self.filename)
self.nameSize = str(self.nameSize).encode('utf-8')
self.clientTransferSock.send(self.nameSize)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'Name Size Received':
print('Waiting for Name Size to deliver...')
time.sleep(1)
else:
print('Name Size Delivered!')
self.clientTransferSock.send(self.filename)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Name Received':
print('Waiting for File Name to deliver...')
time.sleep(1)
else:
print('File Name Delivered!')
self.filename = self.filename.decode('utf-8')
self.fileSize = os.path.getsize(directory)
self.fileSize = str(self.fileSize).encode('utf-8')
self.clientTransferSock.send(self.fileSize)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Size Received':
print('Waiting for File Size to deliver...')
time.sleep(1)
else:
print('File Size Delivered!')
file_to_send = open(directory, 'rb')
lines = file_to_send.read()
self.clientTransferSock.sendall(lines)
file_to_send.close()
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Received':
print('Waiting for File to deliver...')
time.sleep(1)
else:
print('File Delivered Successfully!')
def delete(self):
self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.remove(self.deleteDirectory)
self.clientCommandSock.send('File Deleted'.encode('utf-8'))
print('Delete successfully!')
except:
self.clientCommandSock.send('File Not Found'.encode('utf-8'))
print('File not found!')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def listdir(self):
self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')
self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))
self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath
)))).encode('utf-8'))
while self.clientCommandSock.recv(32).decode('utf-8'
) != 'Listdir Size Received':
print('Waiting for Listdir Size to deliver...')
time.sleep(1)
else:
print('Listdir Size Delivered!')
self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).
encode('utf-8'))
while self.clientCommandSock.recv(32).decode('utf-8'
) != 'Listdir Received':
print('Waiting for Listdir to deliver...')
time.sleep(1)
else:
print('Listdir Delivered!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class server:
def __init__(self):
self.commandSock = socket.socket()
self.commandPort = 8080
self.transferSock = socket.socket()
self.transferPort = 8088
self.chatSock = socket.socket()
self.chatPort = 8085
self.host = ''
self.bindsocket()
def bindsocket(self):
self.commandSock.bind((self.host, self.commandPort))
self.transferSock.bind((self.host, self.transferPort))
self.chatSock.bind((self.host, self.chatPort))
self.commandSock.listen(10)
self.transferSock.listen(10)
self.chatSock.listen(10)
self.filename = ''
print('Waiting for a connection.....')
self.clientTransferSock, self.transferAddr = self.transferSock.accept()
self.clientCommandSock, self.commandAddr = self.commandSock.accept()
self.clientChatSock, self.chatAddr = self.chatSock.accept()
print('Got a transfer connection from %s' % str(self.transferAddr))
print('Got a command connection from %s' % str(self.commandAddr))
print('Got a chat connection from %s' % str(self.chatAddr))
self.sendPartitions()
self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))
print('Partitions Sent!')
def closeServer(self):
self.clientCommandSock.close()
self.clientTransferSock.close()
self.clientChatSock.close()
<|reserved_special_token_0|>
def chat(self):
self.chatfile = open('chatfile.txt', 'w')
self.message = self.clientChatSock.recv(128).decode('utf-8')
self.chatfile.write(self.message + '\n')
self.chatfile.close()
print(self.message)
def mkdir(self):
self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.mkdir(self.mkdirPath)
self.clientCommandSock.send('Directory Made'.encode('utf-8'))
print('Directory Made Successfully!')
except:
self.clientCommandSock.send('Directory Already Exist'.encode(
'utf-8'))
print('Directory Already Exist')
def send(self, directory):
print(directory)
self.filename = directory.split('\\')[len(directory.split('\\')) - 1]
self.filename = self.filename.encode('utf-8')
self.nameSize = len(self.filename)
self.nameSize = str(self.nameSize).encode('utf-8')
self.clientTransferSock.send(self.nameSize)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'Name Size Received':
print('Waiting for Name Size to deliver...')
time.sleep(1)
else:
print('Name Size Delivered!')
self.clientTransferSock.send(self.filename)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Name Received':
print('Waiting for File Name to deliver...')
time.sleep(1)
else:
print('File Name Delivered!')
self.filename = self.filename.decode('utf-8')
self.fileSize = os.path.getsize(directory)
self.fileSize = str(self.fileSize).encode('utf-8')
self.clientTransferSock.send(self.fileSize)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Size Received':
print('Waiting for File Size to deliver...')
time.sleep(1)
else:
print('File Size Delivered!')
file_to_send = open(directory, 'rb')
lines = file_to_send.read()
self.clientTransferSock.sendall(lines)
file_to_send.close()
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Received':
print('Waiting for File to deliver...')
time.sleep(1)
else:
print('File Delivered Successfully!')
def delete(self):
self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.remove(self.deleteDirectory)
self.clientCommandSock.send('File Deleted'.encode('utf-8'))
print('Delete successfully!')
except:
self.clientCommandSock.send('File Not Found'.encode('utf-8'))
print('File not found!')
def copy(self):
self.pathes = self.clientCommandSock.recv(128).decode('utf-8').split(
',')
print(self.pathes)
try:
shutil.copy2(self.pathes[0], self.pathes[1])
self.clientCommandSock.send('File Copied'.encode('utf-8'))
print('Copied successfully!')
except:
self.clientCommandSock.send('File Not Found or Access Denied'.
encode('utf-8'))
print('File Not Found or Access Denied')
<|reserved_special_token_0|>
def sendPartitions(self):
self.dps_defualt = psutil.disk_partitions()
fmt_str = '{:<8}'
fmt_str.format('Opts')
self.dps = [(chr(x) + ':') for x in range(65, 90) if os.path.exists
(chr(x) + ':')]
self.clientCommandSock.send(str(self.dps).encode('utf-8'))
def listdir(self):
self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')
self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))
self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath
)))).encode('utf-8'))
while self.clientCommandSock.recv(32).decode('utf-8'
) != 'Listdir Size Received':
print('Waiting for Listdir Size to deliver...')
time.sleep(1)
else:
print('Listdir Size Delivered!')
self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).
encode('utf-8'))
while self.clientCommandSock.recv(32).decode('utf-8'
) != 'Listdir Received':
print('Waiting for Listdir to deliver...')
time.sleep(1)
else:
print('Listdir Delivered!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import socket
import threading
import os
import time
import psutil
import shutil
class server:
def __init__(self):
self.commandSock = socket.socket()
self.commandPort = 8080
self.transferSock = socket.socket()
self.transferPort = 8088
self.chatSock = socket.socket()
self.chatPort = 8085
self.host = ''
self.bindsocket()
def bindsocket(self):
self.commandSock.bind((self.host, self.commandPort))
self.transferSock.bind((self.host, self.transferPort))
self.chatSock.bind((self.host, self.chatPort))
self.commandSock.listen(10)
self.transferSock.listen(10)
self.chatSock.listen(10)
self.filename = ''
print('Waiting for a connection.....')
self.clientTransferSock, self.transferAddr = self.transferSock.accept()
self.clientCommandSock, self.commandAddr = self.commandSock.accept()
self.clientChatSock, self.chatAddr = self.chatSock.accept()
print('Got a transfer connection from %s' % str(self.transferAddr))
print('Got a command connection from %s' % str(self.commandAddr))
print('Got a chat connection from %s' % str(self.chatAddr))
self.sendPartitions()
self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))
print('Partitions Sent!')
def closeServer(self):
self.clientCommandSock.close()
self.clientTransferSock.close()
self.clientChatSock.close()
def dicision(self):
while True:
self.message = self.clientCommandSock.recv(32).decode('utf-8')
if self.message == 'Delete Request':
self.clientCommandSock.send('Delete Request Received'.
encode('utf-8'))
self.delete()
elif self.message == 'Copy Request':
self.clientCommandSock.send('Copy Request Received'.encode(
'utf-8'))
self.copy()
elif self.message == 'Send File Request':
self.clientCommandSock.send('Send File Request Received'.
encode('utf-8'))
self.sendFile()
elif self.message == 'Listdir Request':
self.clientCommandSock.send('Listdir Request Received'.
encode('utf-8'))
self.listdir()
elif self.message == 'Chat Request':
self.clientCommandSock.send('Chat Request Received'.encode(
'utf-8'))
self.chat()
elif self.message == 'Mkdir Request':
self.clientCommandSock.send('Mkdir Request Received'.encode
('utf-8'))
self.mkdir()
def chat(self):
self.chatfile = open('chatfile.txt', 'w')
self.message = self.clientChatSock.recv(128).decode('utf-8')
self.chatfile.write(self.message + '\n')
self.chatfile.close()
print(self.message)
def mkdir(self):
self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.mkdir(self.mkdirPath)
self.clientCommandSock.send('Directory Made'.encode('utf-8'))
print('Directory Made Successfully!')
except:
self.clientCommandSock.send('Directory Already Exist'.encode(
'utf-8'))
print('Directory Already Exist')
def send(self, directory):
print(directory)
self.filename = directory.split('\\')[len(directory.split('\\')) - 1]
self.filename = self.filename.encode('utf-8')
self.nameSize = len(self.filename)
self.nameSize = str(self.nameSize).encode('utf-8')
self.clientTransferSock.send(self.nameSize)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'Name Size Received':
print('Waiting for Name Size to deliver...')
time.sleep(1)
else:
print('Name Size Delivered!')
self.clientTransferSock.send(self.filename)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Name Received':
print('Waiting for File Name to deliver...')
time.sleep(1)
else:
print('File Name Delivered!')
self.filename = self.filename.decode('utf-8')
self.fileSize = os.path.getsize(directory)
self.fileSize = str(self.fileSize).encode('utf-8')
self.clientTransferSock.send(self.fileSize)
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Size Received':
print('Waiting for File Size to deliver...')
time.sleep(1)
else:
print('File Size Delivered!')
file_to_send = open(directory, 'rb')
lines = file_to_send.read()
self.clientTransferSock.sendall(lines)
file_to_send.close()
while self.clientTransferSock.recv(32).decode('utf-8'
) != 'File Received':
print('Waiting for File to deliver...')
time.sleep(1)
else:
print('File Delivered Successfully!')
def delete(self):
self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.remove(self.deleteDirectory)
self.clientCommandSock.send('File Deleted'.encode('utf-8'))
print('Delete successfully!')
except:
self.clientCommandSock.send('File Not Found'.encode('utf-8'))
print('File not found!')
def copy(self):
self.pathes = self.clientCommandSock.recv(128).decode('utf-8').split(
',')
print(self.pathes)
try:
shutil.copy2(self.pathes[0], self.pathes[1])
self.clientCommandSock.send('File Copied'.encode('utf-8'))
print('Copied successfully!')
except:
self.clientCommandSock.send('File Not Found or Access Denied'.
encode('utf-8'))
print('File Not Found or Access Denied')
def sendFile(self):
self.sendFileDirectory = self.clientCommandSock.recv(128).decode(
'utf-8')
self.clientCommandSock.send('File Directory Received'.encode('utf-8'))
threading.Thread(target=self.send, args=(self.sendFileDirectory,)
).start()
def sendPartitions(self):
self.dps_defualt = psutil.disk_partitions()
fmt_str = '{:<8}'
fmt_str.format('Opts')
self.dps = [(chr(x) + ':') for x in range(65, 90) if os.path.exists
(chr(x) + ':')]
self.clientCommandSock.send(str(self.dps).encode('utf-8'))
def listdir(self):
self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')
self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))
self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath
)))).encode('utf-8'))
while self.clientCommandSock.recv(32).decode('utf-8'
) != 'Listdir Size Received':
print('Waiting for Listdir Size to deliver...')
time.sleep(1)
else:
print('Listdir Size Delivered!')
self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).
encode('utf-8'))
while self.clientCommandSock.recv(32).decode('utf-8'
) != 'Listdir Received':
print('Waiting for Listdir to deliver...')
time.sleep(1)
else:
print('Listdir Delivered!')
if __name__ == '__main__':
myServer = server()
threading.Thread(target=myServer.dicision()).start()
<|reserved_special_token_1|>
import socket
import threading
import os
import time
import psutil
import shutil
class server:
def __init__(self):
self.commandSock = socket.socket()
self.commandPort = 8080
self.transferSock = socket.socket()
self.transferPort = 8088
self.chatSock=socket.socket()
self.chatPort=8085
self.host = ''
self.bindsocket()
def bindsocket(self):
self.commandSock.bind((self.host, self.commandPort))
self.transferSock.bind((self.host, self.transferPort))
self.chatSock.bind((self.host,self.chatPort))
self.commandSock.listen(10)
self.transferSock.listen(10)
self.chatSock.listen(10)
self.filename = ""
print ("Waiting for a connection.....")
self.clientTransferSock, self.transferAddr = self.transferSock.accept()
self.clientCommandSock, self.commandAddr = self.commandSock.accept()
self.clientChatSock , self.chatAddr = self.chatSock.accept()
print("Got a transfer connection from %s" % str(self.transferAddr))
print("Got a command connection from %s" % str(self.commandAddr))
print("Got a chat connection from %s" % str(self.chatAddr))
self.sendPartitions()
self.clientCommandSock.send(('Partitions Sent').encode('utf-8'))
print('Partitions Sent!')
def closeServer(self):
self.clientCommandSock.close()
self.clientTransferSock.close()
self.clientChatSock.close()
def dicision(self):
while True:
self.message = (self.clientCommandSock.recv(32)).decode('utf-8')
#(self.message)
if self.message == 'Delete Request':
self.clientCommandSock.send('Delete Request Received'.encode('utf-8'))
self.delete()
elif self.message == 'Copy Request':
self.clientCommandSock.send('Copy Request Received'.encode('utf-8'))
self.copy()
elif self.message == 'Send File Request':
self.clientCommandSock.send('Send File Request Received'.encode('utf-8'))
self.sendFile()
elif self.message == 'Listdir Request':
self.clientCommandSock.send('Listdir Request Received'.encode('utf-8'))
self.listdir()
elif self.message == 'Chat Request':
self.clientCommandSock.send('Chat Request Received'.encode('utf-8'))
self.chat()
elif self.message == 'Mkdir Request':
self.clientCommandSock.send('Mkdir Request Received'.encode('utf-8'))
self.mkdir()
def chat(self):
self.chatfile=open('chatfile.txt','w')
self.message = self.clientChatSock.recv(128).decode('utf-8')
self.chatfile.write(self.message+'\n')
self.chatfile.close()
print(self.message)
def mkdir(self):
self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.mkdir(self.mkdirPath)
self.clientCommandSock.send('Directory Made'.encode('utf-8'))
print ('Directory Made Successfully!')
except:
self.clientCommandSock.send('Directory Already Exist'.encode('utf-8'))
print ('Directory Already Exist')
def send(self, directory):
print(directory)
self.filename = directory.split('\\')[len(directory.split('\\')) - 1]
self.filename = self.filename.encode('utf-8')
self.nameSize = len(self.filename)
self.nameSize = str(self.nameSize).encode('utf-8')
self.clientTransferSock.send(self.nameSize)
while (self.clientTransferSock.recv(32)).decode('utf-8') != 'Name Size Received':
print('Waiting for Name Size to deliver...')
time.sleep(1)
else:
print('Name Size Delivered!')
self.clientTransferSock.send(self.filename)
while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Name Received':
print('Waiting for File Name to deliver...')
time.sleep(1)
else:
print('File Name Delivered!')
self.filename = self.filename.decode('utf-8')
# filename = os.path.join(path,filename)
self.fileSize = os.path.getsize(directory)
self.fileSize = str(self.fileSize).encode('utf-8')
self.clientTransferSock.send(self.fileSize)
while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Size Received':
print('Waiting for File Size to deliver...')
time.sleep(1)
else:
print('File Size Delivered!')
file_to_send = open(directory, 'rb')
lines = file_to_send.read()
self.clientTransferSock.sendall(lines)
file_to_send.close()
while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Received':
print('Waiting for File to deliver...')
time.sleep(1)
else:
print('File Delivered Successfully!')
def delete(self):
self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.remove(self.deleteDirectory)
self.clientCommandSock.send('File Deleted'.encode('utf-8'))
print ('Delete successfully!')
except:
self.clientCommandSock.send('File Not Found'.encode('utf-8'))
print ('File not found!')
def copy(self):
self.pathes = (self.clientCommandSock.recv(128).decode('utf-8')).split(',')
print(self.pathes)
#shutil.copy2(self.pathes[0], self.pathes[1])
try:
shutil.copy2(self.pathes[0], self.pathes[1])
self.clientCommandSock.send('File Copied'.encode('utf-8'))
print ('Copied successfully!')
except:
self.clientCommandSock.send('File Not Found or Access Denied'.encode('utf-8'))
print ('File Not Found or Access Denied')
def sendFile(self):
self.sendFileDirectory = self.clientCommandSock.recv(128).decode('utf-8')
self.clientCommandSock.send('File Directory Received'.encode('utf-8'))
threading.Thread(target=self.send, args=(self.sendFileDirectory,)).start()
def sendPartitions(self):
self.dps_defualt = psutil.disk_partitions()
fmt_str = "{:<8}"
fmt_str.format("Opts")
self.dps = [chr(x) + ":" for x in range(65, 90) if os.path.exists(chr(x) + ":")]
self.clientCommandSock.send((str(self.dps)).encode('utf-8'))
def listdir(self):
self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')
self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))
self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath)))).encode('utf-8'))
while (self.clientCommandSock.recv(32)).decode('utf-8') != 'Listdir Size Received':
print('Waiting for Listdir Size to deliver...')
time.sleep(1)
else:
print('Listdir Size Delivered!')
self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).encode('utf-8'))
while (self.clientCommandSock.recv(32)).decode('utf-8') != 'Listdir Received':
print('Waiting for Listdir to deliver...')
time.sleep(1)
else:
print('Listdir Delivered!')
if __name__ == '__main__':
myServer = server()
threading.Thread(target=myServer.dicision()).start()
|
flexible
|
{
"blob_id": "4736f4e06f166b3c3fd8379a2021eb84a34fcbd3",
"index": 6099,
"step-1": "<mask token>\n\n\nclass server:\n\n def __init__(self):\n self.commandSock = socket.socket()\n self.commandPort = 8080\n self.transferSock = socket.socket()\n self.transferPort = 8088\n self.chatSock = socket.socket()\n self.chatPort = 8085\n self.host = ''\n self.bindsocket()\n\n def bindsocket(self):\n self.commandSock.bind((self.host, self.commandPort))\n self.transferSock.bind((self.host, self.transferPort))\n self.chatSock.bind((self.host, self.chatPort))\n self.commandSock.listen(10)\n self.transferSock.listen(10)\n self.chatSock.listen(10)\n self.filename = ''\n print('Waiting for a connection.....')\n self.clientTransferSock, self.transferAddr = self.transferSock.accept()\n self.clientCommandSock, self.commandAddr = self.commandSock.accept()\n self.clientChatSock, self.chatAddr = self.chatSock.accept()\n print('Got a transfer connection from %s' % str(self.transferAddr))\n print('Got a command connection from %s' % str(self.commandAddr))\n print('Got a chat connection from %s' % str(self.chatAddr))\n self.sendPartitions()\n self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))\n print('Partitions Sent!')\n\n def closeServer(self):\n self.clientCommandSock.close()\n self.clientTransferSock.close()\n self.clientChatSock.close()\n <mask token>\n\n def chat(self):\n self.chatfile = open('chatfile.txt', 'w')\n self.message = self.clientChatSock.recv(128).decode('utf-8')\n self.chatfile.write(self.message + '\\n')\n self.chatfile.close()\n print(self.message)\n <mask token>\n\n def send(self, directory):\n print(directory)\n self.filename = directory.split('\\\\')[len(directory.split('\\\\')) - 1]\n self.filename = self.filename.encode('utf-8')\n self.nameSize = len(self.filename)\n self.nameSize = str(self.nameSize).encode('utf-8')\n self.clientTransferSock.send(self.nameSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'Name Size Received':\n print('Waiting for Name Size to deliver...')\n time.sleep(1)\n else:\n print('Name Size Delivered!')\n self.clientTransferSock.send(self.filename)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Name Received':\n print('Waiting for File Name to deliver...')\n time.sleep(1)\n else:\n print('File Name Delivered!')\n self.filename = self.filename.decode('utf-8')\n self.fileSize = os.path.getsize(directory)\n self.fileSize = str(self.fileSize).encode('utf-8')\n self.clientTransferSock.send(self.fileSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Size Received':\n print('Waiting for File Size to deliver...')\n time.sleep(1)\n else:\n print('File Size Delivered!')\n file_to_send = open(directory, 'rb')\n lines = file_to_send.read()\n self.clientTransferSock.sendall(lines)\n file_to_send.close()\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Received':\n print('Waiting for File to deliver...')\n time.sleep(1)\n else:\n print('File Delivered Successfully!')\n\n def delete(self):\n self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.remove(self.deleteDirectory)\n self.clientCommandSock.send('File Deleted'.encode('utf-8'))\n print('Delete successfully!')\n except:\n self.clientCommandSock.send('File Not Found'.encode('utf-8'))\n print('File not found!')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass server:\n\n def __init__(self):\n self.commandSock = socket.socket()\n self.commandPort = 8080\n self.transferSock = socket.socket()\n self.transferPort = 8088\n self.chatSock = socket.socket()\n self.chatPort = 8085\n self.host = ''\n self.bindsocket()\n\n def bindsocket(self):\n self.commandSock.bind((self.host, self.commandPort))\n self.transferSock.bind((self.host, self.transferPort))\n self.chatSock.bind((self.host, self.chatPort))\n self.commandSock.listen(10)\n self.transferSock.listen(10)\n self.chatSock.listen(10)\n self.filename = ''\n print('Waiting for a connection.....')\n self.clientTransferSock, self.transferAddr = self.transferSock.accept()\n self.clientCommandSock, self.commandAddr = self.commandSock.accept()\n self.clientChatSock, self.chatAddr = self.chatSock.accept()\n print('Got a transfer connection from %s' % str(self.transferAddr))\n print('Got a command connection from %s' % str(self.commandAddr))\n print('Got a chat connection from %s' % str(self.chatAddr))\n self.sendPartitions()\n self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))\n print('Partitions Sent!')\n\n def closeServer(self):\n self.clientCommandSock.close()\n self.clientTransferSock.close()\n self.clientChatSock.close()\n <mask token>\n\n def chat(self):\n self.chatfile = open('chatfile.txt', 'w')\n self.message = self.clientChatSock.recv(128).decode('utf-8')\n self.chatfile.write(self.message + '\\n')\n self.chatfile.close()\n print(self.message)\n\n def mkdir(self):\n self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.mkdir(self.mkdirPath)\n self.clientCommandSock.send('Directory Made'.encode('utf-8'))\n print('Directory Made Successfully!')\n except:\n self.clientCommandSock.send('Directory Already Exist'.encode(\n 'utf-8'))\n print('Directory Already Exist')\n\n def send(self, directory):\n print(directory)\n self.filename = directory.split('\\\\')[len(directory.split('\\\\')) - 1]\n self.filename = self.filename.encode('utf-8')\n self.nameSize = len(self.filename)\n self.nameSize = str(self.nameSize).encode('utf-8')\n self.clientTransferSock.send(self.nameSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'Name Size Received':\n print('Waiting for Name Size to deliver...')\n time.sleep(1)\n else:\n print('Name Size Delivered!')\n self.clientTransferSock.send(self.filename)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Name Received':\n print('Waiting for File Name to deliver...')\n time.sleep(1)\n else:\n print('File Name Delivered!')\n self.filename = self.filename.decode('utf-8')\n self.fileSize = os.path.getsize(directory)\n self.fileSize = str(self.fileSize).encode('utf-8')\n self.clientTransferSock.send(self.fileSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Size Received':\n print('Waiting for File Size to deliver...')\n time.sleep(1)\n else:\n print('File Size Delivered!')\n file_to_send = open(directory, 'rb')\n lines = file_to_send.read()\n self.clientTransferSock.sendall(lines)\n file_to_send.close()\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Received':\n print('Waiting for File to deliver...')\n time.sleep(1)\n else:\n print('File Delivered Successfully!')\n\n def delete(self):\n self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.remove(self.deleteDirectory)\n self.clientCommandSock.send('File Deleted'.encode('utf-8'))\n print('Delete successfully!')\n except:\n self.clientCommandSock.send('File Not Found'.encode('utf-8'))\n print('File not found!')\n <mask token>\n <mask token>\n <mask token>\n\n def listdir(self):\n self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))\n self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath\n )))).encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Size Received':\n print('Waiting for Listdir Size to deliver...')\n time.sleep(1)\n else:\n print('Listdir Size Delivered!')\n self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).\n encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Received':\n print('Waiting for Listdir to deliver...')\n time.sleep(1)\n else:\n print('Listdir Delivered!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass server:\n\n def __init__(self):\n self.commandSock = socket.socket()\n self.commandPort = 8080\n self.transferSock = socket.socket()\n self.transferPort = 8088\n self.chatSock = socket.socket()\n self.chatPort = 8085\n self.host = ''\n self.bindsocket()\n\n def bindsocket(self):\n self.commandSock.bind((self.host, self.commandPort))\n self.transferSock.bind((self.host, self.transferPort))\n self.chatSock.bind((self.host, self.chatPort))\n self.commandSock.listen(10)\n self.transferSock.listen(10)\n self.chatSock.listen(10)\n self.filename = ''\n print('Waiting for a connection.....')\n self.clientTransferSock, self.transferAddr = self.transferSock.accept()\n self.clientCommandSock, self.commandAddr = self.commandSock.accept()\n self.clientChatSock, self.chatAddr = self.chatSock.accept()\n print('Got a transfer connection from %s' % str(self.transferAddr))\n print('Got a command connection from %s' % str(self.commandAddr))\n print('Got a chat connection from %s' % str(self.chatAddr))\n self.sendPartitions()\n self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))\n print('Partitions Sent!')\n\n def closeServer(self):\n self.clientCommandSock.close()\n self.clientTransferSock.close()\n self.clientChatSock.close()\n <mask token>\n\n def chat(self):\n self.chatfile = open('chatfile.txt', 'w')\n self.message = self.clientChatSock.recv(128).decode('utf-8')\n self.chatfile.write(self.message + '\\n')\n self.chatfile.close()\n print(self.message)\n\n def mkdir(self):\n self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.mkdir(self.mkdirPath)\n self.clientCommandSock.send('Directory Made'.encode('utf-8'))\n print('Directory Made Successfully!')\n except:\n self.clientCommandSock.send('Directory Already Exist'.encode(\n 'utf-8'))\n print('Directory Already Exist')\n\n def send(self, directory):\n print(directory)\n self.filename = directory.split('\\\\')[len(directory.split('\\\\')) - 1]\n self.filename = self.filename.encode('utf-8')\n self.nameSize = len(self.filename)\n self.nameSize = str(self.nameSize).encode('utf-8')\n self.clientTransferSock.send(self.nameSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'Name Size Received':\n print('Waiting for Name Size to deliver...')\n time.sleep(1)\n else:\n print('Name Size Delivered!')\n self.clientTransferSock.send(self.filename)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Name Received':\n print('Waiting for File Name to deliver...')\n time.sleep(1)\n else:\n print('File Name Delivered!')\n self.filename = self.filename.decode('utf-8')\n self.fileSize = os.path.getsize(directory)\n self.fileSize = str(self.fileSize).encode('utf-8')\n self.clientTransferSock.send(self.fileSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Size Received':\n print('Waiting for File Size to deliver...')\n time.sleep(1)\n else:\n print('File Size Delivered!')\n file_to_send = open(directory, 'rb')\n lines = file_to_send.read()\n self.clientTransferSock.sendall(lines)\n file_to_send.close()\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Received':\n print('Waiting for File to deliver...')\n time.sleep(1)\n else:\n print('File Delivered Successfully!')\n\n def delete(self):\n self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.remove(self.deleteDirectory)\n self.clientCommandSock.send('File Deleted'.encode('utf-8'))\n print('Delete successfully!')\n except:\n self.clientCommandSock.send('File Not Found'.encode('utf-8'))\n print('File not found!')\n\n def copy(self):\n self.pathes = self.clientCommandSock.recv(128).decode('utf-8').split(\n ',')\n print(self.pathes)\n try:\n shutil.copy2(self.pathes[0], self.pathes[1])\n self.clientCommandSock.send('File Copied'.encode('utf-8'))\n print('Copied successfully!')\n except:\n self.clientCommandSock.send('File Not Found or Access Denied'.\n encode('utf-8'))\n print('File Not Found or Access Denied')\n <mask token>\n\n def sendPartitions(self):\n self.dps_defualt = psutil.disk_partitions()\n fmt_str = '{:<8}'\n fmt_str.format('Opts')\n self.dps = [(chr(x) + ':') for x in range(65, 90) if os.path.exists\n (chr(x) + ':')]\n self.clientCommandSock.send(str(self.dps).encode('utf-8'))\n\n def listdir(self):\n self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))\n self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath\n )))).encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Size Received':\n print('Waiting for Listdir Size to deliver...')\n time.sleep(1)\n else:\n print('Listdir Size Delivered!')\n self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).\n encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Received':\n print('Waiting for Listdir to deliver...')\n time.sleep(1)\n else:\n print('Listdir Delivered!')\n\n\n<mask token>\n",
"step-4": "import socket\nimport threading\nimport os\nimport time\nimport psutil\nimport shutil\n\n\nclass server:\n\n def __init__(self):\n self.commandSock = socket.socket()\n self.commandPort = 8080\n self.transferSock = socket.socket()\n self.transferPort = 8088\n self.chatSock = socket.socket()\n self.chatPort = 8085\n self.host = ''\n self.bindsocket()\n\n def bindsocket(self):\n self.commandSock.bind((self.host, self.commandPort))\n self.transferSock.bind((self.host, self.transferPort))\n self.chatSock.bind((self.host, self.chatPort))\n self.commandSock.listen(10)\n self.transferSock.listen(10)\n self.chatSock.listen(10)\n self.filename = ''\n print('Waiting for a connection.....')\n self.clientTransferSock, self.transferAddr = self.transferSock.accept()\n self.clientCommandSock, self.commandAddr = self.commandSock.accept()\n self.clientChatSock, self.chatAddr = self.chatSock.accept()\n print('Got a transfer connection from %s' % str(self.transferAddr))\n print('Got a command connection from %s' % str(self.commandAddr))\n print('Got a chat connection from %s' % str(self.chatAddr))\n self.sendPartitions()\n self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))\n print('Partitions Sent!')\n\n def closeServer(self):\n self.clientCommandSock.close()\n self.clientTransferSock.close()\n self.clientChatSock.close()\n\n def dicision(self):\n while True:\n self.message = self.clientCommandSock.recv(32).decode('utf-8')\n if self.message == 'Delete Request':\n self.clientCommandSock.send('Delete Request Received'.\n encode('utf-8'))\n self.delete()\n elif self.message == 'Copy Request':\n self.clientCommandSock.send('Copy Request Received'.encode(\n 'utf-8'))\n self.copy()\n elif self.message == 'Send File Request':\n self.clientCommandSock.send('Send File Request Received'.\n encode('utf-8'))\n self.sendFile()\n elif self.message == 'Listdir Request':\n self.clientCommandSock.send('Listdir Request Received'.\n encode('utf-8'))\n self.listdir()\n elif self.message == 'Chat Request':\n self.clientCommandSock.send('Chat Request Received'.encode(\n 'utf-8'))\n self.chat()\n elif self.message == 'Mkdir Request':\n self.clientCommandSock.send('Mkdir Request Received'.encode\n ('utf-8'))\n self.mkdir()\n\n def chat(self):\n self.chatfile = open('chatfile.txt', 'w')\n self.message = self.clientChatSock.recv(128).decode('utf-8')\n self.chatfile.write(self.message + '\\n')\n self.chatfile.close()\n print(self.message)\n\n def mkdir(self):\n self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.mkdir(self.mkdirPath)\n self.clientCommandSock.send('Directory Made'.encode('utf-8'))\n print('Directory Made Successfully!')\n except:\n self.clientCommandSock.send('Directory Already Exist'.encode(\n 'utf-8'))\n print('Directory Already Exist')\n\n def send(self, directory):\n print(directory)\n self.filename = directory.split('\\\\')[len(directory.split('\\\\')) - 1]\n self.filename = self.filename.encode('utf-8')\n self.nameSize = len(self.filename)\n self.nameSize = str(self.nameSize).encode('utf-8')\n self.clientTransferSock.send(self.nameSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'Name Size Received':\n print('Waiting for Name Size to deliver...')\n time.sleep(1)\n else:\n print('Name Size Delivered!')\n self.clientTransferSock.send(self.filename)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Name Received':\n print('Waiting for File Name to deliver...')\n time.sleep(1)\n else:\n print('File Name Delivered!')\n self.filename = self.filename.decode('utf-8')\n self.fileSize = os.path.getsize(directory)\n self.fileSize = str(self.fileSize).encode('utf-8')\n self.clientTransferSock.send(self.fileSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Size Received':\n print('Waiting for File Size to deliver...')\n time.sleep(1)\n else:\n print('File Size Delivered!')\n file_to_send = open(directory, 'rb')\n lines = file_to_send.read()\n self.clientTransferSock.sendall(lines)\n file_to_send.close()\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Received':\n print('Waiting for File to deliver...')\n time.sleep(1)\n else:\n print('File Delivered Successfully!')\n\n def delete(self):\n self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.remove(self.deleteDirectory)\n self.clientCommandSock.send('File Deleted'.encode('utf-8'))\n print('Delete successfully!')\n except:\n self.clientCommandSock.send('File Not Found'.encode('utf-8'))\n print('File not found!')\n\n def copy(self):\n self.pathes = self.clientCommandSock.recv(128).decode('utf-8').split(\n ',')\n print(self.pathes)\n try:\n shutil.copy2(self.pathes[0], self.pathes[1])\n self.clientCommandSock.send('File Copied'.encode('utf-8'))\n print('Copied successfully!')\n except:\n self.clientCommandSock.send('File Not Found or Access Denied'.\n encode('utf-8'))\n print('File Not Found or Access Denied')\n\n def sendFile(self):\n self.sendFileDirectory = self.clientCommandSock.recv(128).decode(\n 'utf-8')\n self.clientCommandSock.send('File Directory Received'.encode('utf-8'))\n threading.Thread(target=self.send, args=(self.sendFileDirectory,)\n ).start()\n\n def sendPartitions(self):\n self.dps_defualt = psutil.disk_partitions()\n fmt_str = '{:<8}'\n fmt_str.format('Opts')\n self.dps = [(chr(x) + ':') for x in range(65, 90) if os.path.exists\n (chr(x) + ':')]\n self.clientCommandSock.send(str(self.dps).encode('utf-8'))\n\n def listdir(self):\n self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))\n self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath\n )))).encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Size Received':\n print('Waiting for Listdir Size to deliver...')\n time.sleep(1)\n else:\n print('Listdir Size Delivered!')\n self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).\n encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Received':\n print('Waiting for Listdir to deliver...')\n time.sleep(1)\n else:\n print('Listdir Delivered!')\n\n\nif __name__ == '__main__':\n myServer = server()\n threading.Thread(target=myServer.dicision()).start()\n",
"step-5": "import socket\nimport threading\nimport os\nimport time\nimport psutil\nimport shutil\n\n\n\nclass server:\n def __init__(self):\n self.commandSock = socket.socket()\n self.commandPort = 8080\n self.transferSock = socket.socket()\n self.transferPort = 8088\n self.chatSock=socket.socket()\n self.chatPort=8085\n self.host = ''\n self.bindsocket()\n\n def bindsocket(self):\n self.commandSock.bind((self.host, self.commandPort))\n self.transferSock.bind((self.host, self.transferPort))\n self.chatSock.bind((self.host,self.chatPort))\n self.commandSock.listen(10)\n self.transferSock.listen(10)\n self.chatSock.listen(10)\n\n self.filename = \"\"\n print (\"Waiting for a connection.....\")\n self.clientTransferSock, self.transferAddr = self.transferSock.accept()\n self.clientCommandSock, self.commandAddr = self.commandSock.accept()\n self.clientChatSock , self.chatAddr = self.chatSock.accept()\n\n print(\"Got a transfer connection from %s\" % str(self.transferAddr))\n print(\"Got a command connection from %s\" % str(self.commandAddr))\n print(\"Got a chat connection from %s\" % str(self.chatAddr))\n\n self.sendPartitions()\n self.clientCommandSock.send(('Partitions Sent').encode('utf-8'))\n print('Partitions Sent!')\n\n def closeServer(self):\n self.clientCommandSock.close()\n self.clientTransferSock.close()\n self.clientChatSock.close()\n\n def dicision(self):\n while True:\n self.message = (self.clientCommandSock.recv(32)).decode('utf-8')\n #(self.message)\n if self.message == 'Delete Request':\n self.clientCommandSock.send('Delete Request Received'.encode('utf-8'))\n self.delete()\n elif self.message == 'Copy Request':\n self.clientCommandSock.send('Copy Request Received'.encode('utf-8'))\n self.copy()\n elif self.message == 'Send File Request':\n self.clientCommandSock.send('Send File Request Received'.encode('utf-8'))\n self.sendFile()\n elif self.message == 'Listdir Request':\n self.clientCommandSock.send('Listdir Request Received'.encode('utf-8'))\n self.listdir()\n elif self.message == 'Chat Request':\n self.clientCommandSock.send('Chat Request Received'.encode('utf-8'))\n self.chat()\n elif self.message == 'Mkdir Request':\n self.clientCommandSock.send('Mkdir Request Received'.encode('utf-8'))\n self.mkdir()\n\n def chat(self):\n self.chatfile=open('chatfile.txt','w')\n self.message = self.clientChatSock.recv(128).decode('utf-8')\n self.chatfile.write(self.message+'\\n')\n self.chatfile.close()\n print(self.message)\n\n def mkdir(self):\n self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.mkdir(self.mkdirPath)\n self.clientCommandSock.send('Directory Made'.encode('utf-8'))\n print ('Directory Made Successfully!')\n except:\n self.clientCommandSock.send('Directory Already Exist'.encode('utf-8'))\n print ('Directory Already Exist')\n\n def send(self, directory):\n print(directory)\n self.filename = directory.split('\\\\')[len(directory.split('\\\\')) - 1]\n self.filename = self.filename.encode('utf-8')\n self.nameSize = len(self.filename)\n self.nameSize = str(self.nameSize).encode('utf-8')\n self.clientTransferSock.send(self.nameSize)\n while (self.clientTransferSock.recv(32)).decode('utf-8') != 'Name Size Received':\n print('Waiting for Name Size to deliver...')\n time.sleep(1)\n else:\n print('Name Size Delivered!')\n self.clientTransferSock.send(self.filename)\n while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Name Received':\n print('Waiting for File Name to deliver...')\n time.sleep(1)\n else:\n print('File Name Delivered!')\n self.filename = self.filename.decode('utf-8')\n\n # filename = os.path.join(path,filename)\n self.fileSize = os.path.getsize(directory)\n self.fileSize = str(self.fileSize).encode('utf-8')\n self.clientTransferSock.send(self.fileSize)\n while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Size Received':\n print('Waiting for File Size to deliver...')\n time.sleep(1)\n else:\n print('File Size Delivered!')\n file_to_send = open(directory, 'rb')\n\n lines = file_to_send.read()\n self.clientTransferSock.sendall(lines)\n file_to_send.close()\n\n while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Received':\n print('Waiting for File to deliver...')\n time.sleep(1)\n else:\n print('File Delivered Successfully!')\n\n def delete(self):\n self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.remove(self.deleteDirectory)\n self.clientCommandSock.send('File Deleted'.encode('utf-8'))\n print ('Delete successfully!')\n except:\n self.clientCommandSock.send('File Not Found'.encode('utf-8'))\n print ('File not found!')\n\n def copy(self):\n self.pathes = (self.clientCommandSock.recv(128).decode('utf-8')).split(',')\n print(self.pathes)\n #shutil.copy2(self.pathes[0], self.pathes[1])\n try:\n shutil.copy2(self.pathes[0], self.pathes[1])\n self.clientCommandSock.send('File Copied'.encode('utf-8'))\n print ('Copied successfully!')\n except:\n self.clientCommandSock.send('File Not Found or Access Denied'.encode('utf-8'))\n print ('File Not Found or Access Denied')\n\n def sendFile(self):\n self.sendFileDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n self.clientCommandSock.send('File Directory Received'.encode('utf-8'))\n threading.Thread(target=self.send, args=(self.sendFileDirectory,)).start()\n\n def sendPartitions(self):\n self.dps_defualt = psutil.disk_partitions()\n fmt_str = \"{:<8}\"\n fmt_str.format(\"Opts\")\n self.dps = [chr(x) + \":\" for x in range(65, 90) if os.path.exists(chr(x) + \":\")]\n self.clientCommandSock.send((str(self.dps)).encode('utf-8'))\n\n def listdir(self):\n\n self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))\n self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath)))).encode('utf-8'))\n while (self.clientCommandSock.recv(32)).decode('utf-8') != 'Listdir Size Received':\n print('Waiting for Listdir Size to deliver...')\n time.sleep(1)\n else:\n print('Listdir Size Delivered!')\n self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).encode('utf-8'))\n while (self.clientCommandSock.recv(32)).decode('utf-8') != 'Listdir Received':\n print('Waiting for Listdir to deliver...')\n time.sleep(1)\n else:\n print('Listdir Delivered!')\n\n\n\nif __name__ == '__main__':\n myServer = server()\n threading.Thread(target=myServer.dicision()).start()\n\n",
"step-ids": [
7,
9,
11,
15,
16
]
}
|
[
7,
9,
11,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
main()
<|reserved_special_token_1|>
from .parapred import main
main()
|
flexible
|
{
"blob_id": "96cb2754db2740767dfb145078ed17969e85123d",
"index": 843,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmain()\n",
"step-3": "from .parapred import main\nmain()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
import os
import time
import urllib.request
import pandas as pd
from lib.db.dbutils import (
check_blacklisted,
check_ticker_exists,
get_db,
update_blacklisted,
)
def get_data(url, delay=20):
while True:
df = json.loads(urllib.request.urlopen(url).read())
if df.get("Note", 0) == 0:
break
time.sleep(20)
return df
def grab_a_ticker(symbol="MSFT", apiKey=None):
if apiKey is None:
apiKey = os.environ.get("API_KEY")
# Check if ticker already exists in the database
if not check_ticker_exists(symbol) and not check_blacklisted(symbol):
requestUrl = r"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}"
metaDataUrl = r"https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}"
data = get_data(requestUrl.format(symbol, apiKey))
metaData = get_data(metaDataUrl.format(symbol, apiKey))
df = pd.DataFrame(
pd.DataFrame(data.get("Time Series (Daily)")).transpose()[
"4. close"
]
).reset_index()
df.columns = ["Date", "Price"]
df["Symbol"] = data["Meta Data"]["2. Symbol"]
if len(metaData["bestMatches"]) > 0:
met_df = (
pd.DataFrame(metaData["bestMatches"][0], index=[0])[
["1. symbol", "2. name", "3. type", "4. region"]
]
.reset_index()
.drop(["index"], axis=1)
)
met_df.columns = ["Symbol", "Name", "Type", "Region"]
else:
print(metaData.keys())
met_df = pd.DataFrame()
try:
assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol
df.to_sql(
"time_series", con=get_db(), if_exists="append", index=False
)
met_df.to_sql(
"stock_meta_data",
con=get_db(),
if_exists="append",
index=False,
)
except AssertionError as e:
print(
"'Couldn't get it right with assertion error: {}".format(
str(e)
)
)
update_blacklisted(symbol)
except Exception as e:
print(str(e))
update_blacklisted(symbol)
else:
print("Symbol {} already exists.".format(symbol))
|
normal
|
{
"blob_id": "3c8e6a93c4d5616b9199cf473d298bfa2dc191af",
"index": 9971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef grab_a_ticker(symbol='MSFT', apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get('API_KEY')\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = (\n 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'\n )\n metaDataUrl = (\n 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'\n )\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).\n transpose()['4. close']).reset_index()\n df.columns = ['Date', 'Price']\n df['Symbol'] = data['Meta Data']['2. Symbol']\n if len(metaData['bestMatches']) > 0:\n met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[\n '1. symbol', '2. name', '3. type', '4. region']].reset_index(\n ).drop(['index'], axis=1)\n met_df.columns = ['Symbol', 'Name', 'Type', 'Region']\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql('time_series', con=get_db(), if_exists='append',\n index=False)\n met_df.to_sql('stock_meta_data', con=get_db(), if_exists=\n 'append', index=False)\n except AssertionError as e:\n print(\"'Couldn't get it right with assertion error: {}\".format(\n str(e)))\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print('Symbol {} already exists.'.format(symbol))\n",
"step-3": "<mask token>\n\n\ndef get_data(url, delay=20):\n while True:\n df = json.loads(urllib.request.urlopen(url).read())\n if df.get('Note', 0) == 0:\n break\n time.sleep(20)\n return df\n\n\ndef grab_a_ticker(symbol='MSFT', apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get('API_KEY')\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = (\n 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'\n )\n metaDataUrl = (\n 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'\n )\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).\n transpose()['4. close']).reset_index()\n df.columns = ['Date', 'Price']\n df['Symbol'] = data['Meta Data']['2. Symbol']\n if len(metaData['bestMatches']) > 0:\n met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[\n '1. symbol', '2. name', '3. type', '4. region']].reset_index(\n ).drop(['index'], axis=1)\n met_df.columns = ['Symbol', 'Name', 'Type', 'Region']\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql('time_series', con=get_db(), if_exists='append',\n index=False)\n met_df.to_sql('stock_meta_data', con=get_db(), if_exists=\n 'append', index=False)\n except AssertionError as e:\n print(\"'Couldn't get it right with assertion error: {}\".format(\n str(e)))\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print('Symbol {} already exists.'.format(symbol))\n",
"step-4": "import json\nimport os\nimport time\nimport urllib.request\nimport pandas as pd\nfrom lib.db.dbutils import check_blacklisted, check_ticker_exists, get_db, update_blacklisted\n\n\ndef get_data(url, delay=20):\n while True:\n df = json.loads(urllib.request.urlopen(url).read())\n if df.get('Note', 0) == 0:\n break\n time.sleep(20)\n return df\n\n\ndef grab_a_ticker(symbol='MSFT', apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get('API_KEY')\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = (\n 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'\n )\n metaDataUrl = (\n 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'\n )\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).\n transpose()['4. close']).reset_index()\n df.columns = ['Date', 'Price']\n df['Symbol'] = data['Meta Data']['2. Symbol']\n if len(metaData['bestMatches']) > 0:\n met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[\n '1. symbol', '2. name', '3. type', '4. region']].reset_index(\n ).drop(['index'], axis=1)\n met_df.columns = ['Symbol', 'Name', 'Type', 'Region']\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql('time_series', con=get_db(), if_exists='append',\n index=False)\n met_df.to_sql('stock_meta_data', con=get_db(), if_exists=\n 'append', index=False)\n except AssertionError as e:\n print(\"'Couldn't get it right with assertion error: {}\".format(\n str(e)))\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print('Symbol {} already exists.'.format(symbol))\n",
"step-5": "import json\nimport os\nimport time\nimport urllib.request\n\nimport pandas as pd\n\nfrom lib.db.dbutils import (\n check_blacklisted,\n check_ticker_exists,\n get_db,\n update_blacklisted,\n)\n\n\ndef get_data(url, delay=20):\n while True:\n df = json.loads(urllib.request.urlopen(url).read())\n if df.get(\"Note\", 0) == 0:\n break\n time.sleep(20)\n return df\n\n\ndef grab_a_ticker(symbol=\"MSFT\", apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get(\"API_KEY\")\n # Check if ticker already exists in the database\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = r\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}\"\n metaDataUrl = r\"https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}\"\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(\n pd.DataFrame(data.get(\"Time Series (Daily)\")).transpose()[\n \"4. close\"\n ]\n ).reset_index()\n\n df.columns = [\"Date\", \"Price\"]\n df[\"Symbol\"] = data[\"Meta Data\"][\"2. Symbol\"]\n if len(metaData[\"bestMatches\"]) > 0:\n met_df = (\n pd.DataFrame(metaData[\"bestMatches\"][0], index=[0])[\n [\"1. symbol\", \"2. name\", \"3. type\", \"4. region\"]\n ]\n .reset_index()\n .drop([\"index\"], axis=1)\n )\n met_df.columns = [\"Symbol\", \"Name\", \"Type\", \"Region\"]\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql(\n \"time_series\", con=get_db(), if_exists=\"append\", index=False\n )\n met_df.to_sql(\n \"stock_meta_data\",\n con=get_db(),\n if_exists=\"append\",\n index=False,\n )\n except AssertionError as e:\n print(\n \"'Couldn't get it right with assertion error: {}\".format(\n str(e)\n )\n )\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print(\"Symbol {} already exists.\".format(symbol))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(variable_1, variable_2, variable_3, sep=', ')
print(variable_4, variable_5, sep=', ', end='!\n')
<|reserved_special_token_0|>
print(variable_4 + ', ' + user_name + '! ' + 'Ваш возраст: ' + user_age)
<|reserved_special_token_1|>
variable_1 = 100
variable_2 = 500
variable_3 = 222.5
variable_4 = 'Hello'
variable_5 = 'world'
print(variable_1, variable_2, variable_3, sep=', ')
print(variable_4, variable_5, sep=', ', end='!\n')
user_age = input('Введите ваш возраст: ')
user_name = input('Введите ваше имя: ')
print(variable_4 + ', ' + user_name + '! ' + 'Ваш возраст: ' + user_age)
|
flexible
|
{
"blob_id": "12ca9a81574d34d1004ac9ebcb2ee4b31d7171e2",
"index": 5623,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(variable_1, variable_2, variable_3, sep=', ')\nprint(variable_4, variable_5, sep=', ', end='!\\n')\n<mask token>\nprint(variable_4 + ', ' + user_name + '! ' + 'Ваш возраст: ' + user_age)\n",
"step-3": "variable_1 = 100\nvariable_2 = 500\nvariable_3 = 222.5\nvariable_4 = 'Hello'\nvariable_5 = 'world'\nprint(variable_1, variable_2, variable_3, sep=', ')\nprint(variable_4, variable_5, sep=', ', end='!\\n')\nuser_age = input('Введите ваш возраст: ')\nuser_name = input('Введите ваше имя: ')\nprint(variable_4 + ', ' + user_name + '! ' + 'Ваш возраст: ' + user_age)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
import requests
import re
def get_content(url):
paste_info = {
'site': 'pomf',
'url': url
}
m = re.match('^.*/([0-9a-zA-Z]+)\.([a-zA-Z0-9]+)$',url)
response = requests.get(url)
if response.status_code != 200:
return
paste_info['ext'] = m.group(2)
paste_info['orig_filename'] = m.group(1)
paste_info['content'] = response.content
return paste_info
|
normal
|
{
"blob_id": "78a6202f501bc116e21e98a3e83c9e3f8d6402b4",
"index": 3981,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_content(url):\n paste_info = {'site': 'pomf', 'url': url}\n m = re.match('^.*/([0-9a-zA-Z]+)\\\\.([a-zA-Z0-9]+)$', url)\n response = requests.get(url)\n if response.status_code != 200:\n return\n paste_info['ext'] = m.group(2)\n paste_info['orig_filename'] = m.group(1)\n paste_info['content'] = response.content\n return paste_info\n",
"step-3": "import requests\nimport re\n\n\ndef get_content(url):\n paste_info = {'site': 'pomf', 'url': url}\n m = re.match('^.*/([0-9a-zA-Z]+)\\\\.([a-zA-Z0-9]+)$', url)\n response = requests.get(url)\n if response.status_code != 200:\n return\n paste_info['ext'] = m.group(2)\n paste_info['orig_filename'] = m.group(1)\n paste_info['content'] = response.content\n return paste_info\n",
"step-4": "#!/usr/bin/env python\nimport requests\nimport re\ndef get_content(url):\n paste_info = {\n 'site': 'pomf',\n 'url': url\n }\n m = re.match('^.*/([0-9a-zA-Z]+)\\.([a-zA-Z0-9]+)$',url)\n response = requests.get(url)\n if response.status_code != 200:\n return\n paste_info['ext'] = m.group(2)\n paste_info['orig_filename'] = m.group(1)\n paste_info['content'] = response.content\n return paste_info\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def check_duplicate(fp1, fp2):
"""takes two files, does a diff on them, returns True if same"""
try:
subprocess.check_output(['diff', fp1, fp2])
return True
except subprocess.CalledProcessError:
return False
def identify_uniq_ads(session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
error_bugs.append(bug)
continue
if bug.is_ad():
height = '999'
width = '999'
if bug_filetype == 'swf':
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump',
'-X', bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump',
'-Y', bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError:
LOG.exception('swfdump error on file %s' %
bug_filepath)
else:
target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output([
'identify', '-format', '"%h"',
bug_filepath]).strip()
width = subprocess.check_output(['identify',
'-format', '"%w"', bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception('identify error on file %s' %
bug_filepath)
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.
get_filepath()):
dup = m
break
if dup:
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else:
ads[dup] = {test_site: bug_count}
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site: bug_count}
except KeyError:
target_bin[dimension] = [bug]
ads[bug] = {test_site: bug_count}
return ads, error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try:
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
fwtr.write(
'#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(
new_uuidname, name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
<|reserved_special_token_0|>
def export_ads(results, out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1
with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.
format('Ad#', 'Company', 'FileType', 'Ad-Category',
'Website-URL', 'Refresh-Num', 'Training-Topic',
'Context-of-site', 'Total', 'Ad-src'))
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.
get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write(
"""{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},
"""
.format(file_name, bug.get_name(), bug.
get_filetype(), '', test_site, refresh_num,
train_category, 'N/A', bugcount, bug.get_src())
)
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or file_type.startswith('ASCII'
) or file_type.startswith('UTF-8 Unicode English'
) or file_type.startswith('very short'):
bug_type = 'text'
elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try:
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ''
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
<|reserved_special_token_0|>
def process_results_legacy(refresh_count, output_dir, ext_queue,
result_queue, num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
bug_dict = {}
try:
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try:
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ''
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=
bugaffiliation, bug_type=bugtype, matched_pattern=
bugpattern, pathname=bugpathname)
try:
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,
dl_counter)
dl_counter += 1
save_to_path = os.path.join(output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, ((
output_dir, saved_location, save_to_path, bug,
curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'
) as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == 'STOP':
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,
bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e:
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,
filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (
saved_file_name, filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
<|reserved_special_token_0|>
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {}
input_queue = Queue()
worker_output_queue = Queue()
ack_queue = Queue()
bug_dict = {}
try:
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
dl_counter = 0
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location = 'Visit%d_%s%d' % (refresh_count, bug.
get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join(output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers):
input_queue.put(('STOP',))
stopped = 0
while stopped < len(workers_dict):
ack = ack_queue.get()
p = workers_dict[ack]
p.join(timeout=1)
if p.is_alive():
p.terminate()
LOG.debug('terminating process %d' % ack)
stopped += 1
while not worker_output_queue.empty():
cbug = worker_output_queue.get()
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'
) as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_duplicate(fp1, fp2):
"""takes two files, does a diff on them, returns True if same"""
try:
subprocess.check_output(['diff', fp1, fp2])
return True
except subprocess.CalledProcessError:
return False
def identify_uniq_ads(session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
error_bugs.append(bug)
continue
if bug.is_ad():
height = '999'
width = '999'
if bug_filetype == 'swf':
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump',
'-X', bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump',
'-Y', bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError:
LOG.exception('swfdump error on file %s' %
bug_filepath)
else:
target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output([
'identify', '-format', '"%h"',
bug_filepath]).strip()
width = subprocess.check_output(['identify',
'-format', '"%w"', bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception('identify error on file %s' %
bug_filepath)
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.
get_filepath()):
dup = m
break
if dup:
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else:
ads[dup] = {test_site: bug_count}
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site: bug_count}
except KeyError:
target_bin[dimension] = [bug]
ads[bug] = {test_site: bug_count}
return ads, error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try:
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
fwtr.write(
'#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(
new_uuidname, name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
<|reserved_special_token_0|>
def generate_stats(results, ads, vmid, session_date, export_folder,
process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
totalads = 0
totaluniqads = len(ads)
totalad_category = {}
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w'
) as bugs_wtr:
bugs_wtr.write(
"""#Ad-UID, Website-URL, Refresh-Num, Training-Topic, Site-Context, BugCount, BugSrc
"""
)
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0
uniq_ads = []
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write(
'{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.
format(uuid, test_site, refresh_num,
train_category, 'N/A', bugcount, bug.get_src())
)
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w'
) as ses_wtr:
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write(
'#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(
train_category, test_site, num_of_visit,
totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results, out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1
with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.
format('Ad#', 'Company', 'FileType', 'Ad-Category',
'Website-URL', 'Refresh-Num', 'Training-Topic',
'Context-of-site', 'Total', 'Ad-src'))
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.
get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write(
"""{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},
"""
.format(file_name, bug.get_name(), bug.
get_filetype(), '', test_site, refresh_num,
train_category, 'N/A', bugcount, bug.get_src())
)
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or file_type.startswith('ASCII'
) or file_type.startswith('UTF-8 Unicode English'
) or file_type.startswith('very short'):
bug_type = 'text'
elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try:
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ''
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
def curl_worker_legacy(args):
output_dir = args[0]
saved_file_name = args[1]
path = args[2]
bug = args[3]
curl_result_queue = args[4]
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path, bug.
get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,
filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (
saved_file_name, filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
curl_result_queue.put(bug)
def process_results_legacy(refresh_count, output_dir, ext_queue,
result_queue, num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
bug_dict = {}
try:
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try:
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ''
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=
bugaffiliation, bug_type=bugtype, matched_pattern=
bugpattern, pathname=bugpathname)
try:
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,
dl_counter)
dl_counter += 1
save_to_path = os.path.join(output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, ((
output_dir, saved_location, save_to_path, bug,
curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'
) as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == 'STOP':
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,
bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e:
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,
filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (
saved_file_name, filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
def build_nodes(jsonData):
"""
This function takes a JSON encoded output of the firefox addon and builds a
call graph for the javascript/HTML redirections
@rtype nodes: dict
@return: A graph of redirection chains
"""
nodes = {}
def _process_cookiestr(cookieStr):
"""
parses a dictionary of req/resp calls to extract the cookie information
returns a list of cookies set on this domain
"""
cookie_list = []
for cookie in cookieStr.split('\n'):
c = {}
for cook in cookie.split(';'):
token = cook.split('=', 1)
if len(token) < 2:
continue
c[token[0]] = token[1]
cookie_list.append(c)
return cookie_list
def _check_node(d):
try:
domain_node = nodes[d]
except KeyError:
isBug, bug_name, bug_type = ADREGEX.search(domain)
domain_node = WebNode(domain, isBug, bug_name, bug_type)
nodes[d] = domain_node
return domain_node
for domain, dval in jsonData.items():
domain_node = _check_node(domain)
cookie_list = []
for info in dval:
domainPath = info['domainPath']
referrerPath = info['referrerPath']
referrer = info['referrer']
cookieBool = info['cookie']
parsed_cookie = None
if cookieBool:
cookieStr = info['cookiestr']
parsed_cookie = _process_cookiestr(cookieStr)
cookie_list.append(parsed_cookie)
domain_node.add_reqresp({'domainPath': domainPath, 'referrer':
referrer, 'referrerPath': referrerPath, 'cookieList':
parsed_cookie})
referrer_node = _check_node(referrer)
referrer_node.add_child(domain_node)
domain_node.add_parent(referrer_node)
domain_node.set_cookies(cookie_list)
return nodes
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {}
input_queue = Queue()
worker_output_queue = Queue()
ack_queue = Queue()
bug_dict = {}
try:
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
dl_counter = 0
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location = 'Visit%d_%s%d' % (refresh_count, bug.
get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join(output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers):
input_queue.put(('STOP',))
stopped = 0
while stopped < len(workers_dict):
ack = ack_queue.get()
p = workers_dict[ack]
p.join(timeout=1)
if p.is_alive():
p.terminate()
LOG.debug('terminating process %d' % ack)
stopped += 1
while not worker_output_queue.empty():
cbug = worker_output_queue.get()
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'
) as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_duplicate(fp1, fp2):
"""takes two files, does a diff on them, returns True if same"""
try:
subprocess.check_output(['diff', fp1, fp2])
return True
except subprocess.CalledProcessError:
return False
def identify_uniq_ads(session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
error_bugs.append(bug)
continue
if bug.is_ad():
height = '999'
width = '999'
if bug_filetype == 'swf':
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump',
'-X', bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump',
'-Y', bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError:
LOG.exception('swfdump error on file %s' %
bug_filepath)
else:
target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output([
'identify', '-format', '"%h"',
bug_filepath]).strip()
width = subprocess.check_output(['identify',
'-format', '"%w"', bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception('identify error on file %s' %
bug_filepath)
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.
get_filepath()):
dup = m
break
if dup:
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else:
ads[dup] = {test_site: bug_count}
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site: bug_count}
except KeyError:
target_bin[dimension] = [bug]
ads[bug] = {test_site: bug_count}
return ads, error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try:
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
fwtr.write(
'#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(
new_uuidname, name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
def write_session_info(vmid, machineid, profile, session_date, train_mode,
training_sites, test_sites, num_of_refresh, export_folder):
train_category = training_sites.keys()[0]
train_sites_to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder,
process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
totalads = 0
totaluniqads = len(ads)
totalad_category = {}
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w'
) as bugs_wtr:
bugs_wtr.write(
"""#Ad-UID, Website-URL, Refresh-Num, Training-Topic, Site-Context, BugCount, BugSrc
"""
)
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0
uniq_ads = []
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write(
'{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.
format(uuid, test_site, refresh_num,
train_category, 'N/A', bugcount, bug.get_src())
)
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w'
) as ses_wtr:
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write(
'#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(
train_category, test_site, num_of_visit,
totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results, out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1
with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.
format('Ad#', 'Company', 'FileType', 'Ad-Category',
'Website-URL', 'Refresh-Num', 'Training-Topic',
'Context-of-site', 'Total', 'Ad-src'))
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.
get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write(
"""{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},
"""
.format(file_name, bug.get_name(), bug.
get_filetype(), '', test_site, refresh_num,
train_category, 'N/A', bugcount, bug.get_src())
)
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or file_type.startswith('ASCII'
) or file_type.startswith('UTF-8 Unicode English'
) or file_type.startswith('very short'):
bug_type = 'text'
elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try:
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ''
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
def curl_worker_legacy(args):
output_dir = args[0]
saved_file_name = args[1]
path = args[2]
bug = args[3]
curl_result_queue = args[4]
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path, bug.
get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,
filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (
saved_file_name, filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
curl_result_queue.put(bug)
def process_results_legacy(refresh_count, output_dir, ext_queue,
result_queue, num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
bug_dict = {}
try:
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try:
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ''
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=
bugaffiliation, bug_type=bugtype, matched_pattern=
bugpattern, pathname=bugpathname)
try:
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,
dl_counter)
dl_counter += 1
save_to_path = os.path.join(output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, ((
output_dir, saved_location, save_to_path, bug,
curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'
) as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == 'STOP':
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,
bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e:
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,
filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (
saved_file_name, filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
def build_nodes(jsonData):
"""
This function takes a JSON encoded output of the firefox addon and builds a
call graph for the javascript/HTML redirections
@rtype nodes: dict
@return: A graph of redirection chains
"""
nodes = {}
def _process_cookiestr(cookieStr):
"""
parses a dictionary of req/resp calls to extract the cookie information
returns a list of cookies set on this domain
"""
cookie_list = []
for cookie in cookieStr.split('\n'):
c = {}
for cook in cookie.split(';'):
token = cook.split('=', 1)
if len(token) < 2:
continue
c[token[0]] = token[1]
cookie_list.append(c)
return cookie_list
def _check_node(d):
try:
domain_node = nodes[d]
except KeyError:
isBug, bug_name, bug_type = ADREGEX.search(domain)
domain_node = WebNode(domain, isBug, bug_name, bug_type)
nodes[d] = domain_node
return domain_node
for domain, dval in jsonData.items():
domain_node = _check_node(domain)
cookie_list = []
for info in dval:
domainPath = info['domainPath']
referrerPath = info['referrerPath']
referrer = info['referrer']
cookieBool = info['cookie']
parsed_cookie = None
if cookieBool:
cookieStr = info['cookiestr']
parsed_cookie = _process_cookiestr(cookieStr)
cookie_list.append(parsed_cookie)
domain_node.add_reqresp({'domainPath': domainPath, 'referrer':
referrer, 'referrerPath': referrerPath, 'cookieList':
parsed_cookie})
referrer_node = _check_node(referrer)
referrer_node.add_child(domain_node)
domain_node.add_parent(referrer_node)
domain_node.set_cookies(cookie_list)
return nodes
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {}
input_queue = Queue()
worker_output_queue = Queue()
ack_queue = Queue()
bug_dict = {}
try:
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
dl_counter = 0
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location = 'Visit%d_%s%d' % (refresh_count, bug.
get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join(output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers):
input_queue.put(('STOP',))
stopped = 0
while stopped < len(workers_dict):
ack = ack_queue.get()
p = workers_dict[ack]
p.join(timeout=1)
if p.is_alive():
p.terminate()
LOG.debug('terminating process %d' % ack)
stopped += 1
while not worker_output_queue.empty():
cbug = worker_output_queue.get()
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'
) as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
LOG = logging.getLogger('logAdGrabber')
ADREGEX = AdRegEx()
def check_duplicate(fp1, fp2):
"""takes two files, does a diff on them, returns True if same"""
try:
subprocess.check_output(['diff', fp1, fp2])
return True
except subprocess.CalledProcessError:
return False
def identify_uniq_ads(session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
error_bugs.append(bug)
continue
if bug.is_ad():
height = '999'
width = '999'
if bug_filetype == 'swf':
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump',
'-X', bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump',
'-Y', bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError:
LOG.exception('swfdump error on file %s' %
bug_filepath)
else:
target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output([
'identify', '-format', '"%h"',
bug_filepath]).strip()
width = subprocess.check_output(['identify',
'-format', '"%w"', bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception('identify error on file %s' %
bug_filepath)
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.
get_filepath()):
dup = m
break
if dup:
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else:
ads[dup] = {test_site: bug_count}
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site: bug_count}
except KeyError:
target_bin[dimension] = [bug]
ads[bug] = {test_site: bug_count}
return ads, error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try:
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
fwtr.write(
'#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(
new_uuidname, name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
def write_session_info(vmid, machineid, profile, session_date, train_mode,
training_sites, test_sites, num_of_refresh, export_folder):
train_category = training_sites.keys()[0]
train_sites_to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder,
process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
totalads = 0
totaluniqads = len(ads)
totalad_category = {}
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w'
) as bugs_wtr:
bugs_wtr.write(
"""#Ad-UID, Website-URL, Refresh-Num, Training-Topic, Site-Context, BugCount, BugSrc
"""
)
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0
uniq_ads = []
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write(
'{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.
format(uuid, test_site, refresh_num,
train_category, 'N/A', bugcount, bug.get_src())
)
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w'
) as ses_wtr:
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write(
'#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(
train_category, test_site, num_of_visit,
totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results, out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1
with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.
format('Ad#', 'Company', 'FileType', 'Ad-Category',
'Website-URL', 'Refresh-Num', 'Training-Topic',
'Context-of-site', 'Total', 'Ad-src'))
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.
get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write(
"""{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},
"""
.format(file_name, bug.get_name(), bug.
get_filetype(), '', test_site, refresh_num,
train_category, 'N/A', bugcount, bug.get_src())
)
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or file_type.startswith('ASCII'
) or file_type.startswith('UTF-8 Unicode English'
) or file_type.startswith('very short'):
bug_type = 'text'
elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try:
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ''
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
def curl_worker_legacy(args):
output_dir = args[0]
saved_file_name = args[1]
path = args[2]
bug = args[3]
curl_result_queue = args[4]
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path, bug.
get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,
filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (
saved_file_name, filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
curl_result_queue.put(bug)
def process_results_legacy(refresh_count, output_dir, ext_queue,
result_queue, num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
bug_dict = {}
try:
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try:
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ''
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=
bugaffiliation, bug_type=bugtype, matched_pattern=
bugpattern, pathname=bugpathname)
try:
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,
dl_counter)
dl_counter += 1
save_to_path = os.path.join(output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, ((
output_dir, saved_location, save_to_path, bug,
curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'
) as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == 'STOP':
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,
bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e:
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,
filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (
saved_file_name, filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
def build_nodes(jsonData):
"""
This function takes a JSON encoded output of the firefox addon and builds a
call graph for the javascript/HTML redirections
@rtype nodes: dict
@return: A graph of redirection chains
"""
nodes = {}
def _process_cookiestr(cookieStr):
"""
parses a dictionary of req/resp calls to extract the cookie information
returns a list of cookies set on this domain
"""
cookie_list = []
for cookie in cookieStr.split('\n'):
c = {}
for cook in cookie.split(';'):
token = cook.split('=', 1)
if len(token) < 2:
continue
c[token[0]] = token[1]
cookie_list.append(c)
return cookie_list
def _check_node(d):
try:
domain_node = nodes[d]
except KeyError:
isBug, bug_name, bug_type = ADREGEX.search(domain)
domain_node = WebNode(domain, isBug, bug_name, bug_type)
nodes[d] = domain_node
return domain_node
for domain, dval in jsonData.items():
domain_node = _check_node(domain)
cookie_list = []
for info in dval:
domainPath = info['domainPath']
referrerPath = info['referrerPath']
referrer = info['referrer']
cookieBool = info['cookie']
parsed_cookie = None
if cookieBool:
cookieStr = info['cookiestr']
parsed_cookie = _process_cookiestr(cookieStr)
cookie_list.append(parsed_cookie)
domain_node.add_reqresp({'domainPath': domainPath, 'referrer':
referrer, 'referrerPath': referrerPath, 'cookieList':
parsed_cookie})
referrer_node = _check_node(referrer)
referrer_node.add_child(domain_node)
domain_node.add_parent(referrer_node)
domain_node.set_cookies(cookie_list)
return nodes
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {}
input_queue = Queue()
worker_output_queue = Queue()
ack_queue = Queue()
bug_dict = {}
try:
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
dl_counter = 0
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location = 'Visit%d_%s%d' % (refresh_count, bug.
get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join(output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers):
input_queue.put(('STOP',))
stopped = 0
while stopped < len(workers_dict):
ack = ack_queue.get()
p = workers_dict[ack]
p.join(timeout=1)
if p.is_alive():
p.terminate()
LOG.debug('terminating process %d' % ack)
stopped += 1
while not worker_output_queue.empty():
cbug = worker_output_queue.get()
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'
) as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
return
<|reserved_special_token_1|>
from time import sleep
from uuid import uuid1
from pprint import pprint
from shutil import copy2
from multiprocessing import Process, Queue, Pool, Manager
from ad_grabber_classes import *
from adregex import *
from pygraph.classes.digraph import digraph
import os
import json
import jsonpickle
import subprocess
import cPickle
import logging
LOG = logging.getLogger("logAdGrabber")
ADREGEX = AdRegEx()
def check_duplicate(fp1, fp2):
"""takes two files, does a diff on them, returns True if same"""
try:
subprocess.check_output(['diff', fp1, fp2])
return True
except subprocess.CalledProcessError:
return False
def identify_uniq_ads(session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
# bin by dimensions
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
#LOG.debug('did not manage to curl the scripts for bug:%s' % bug)
error_bugs.append(bug)
continue
if bug.is_ad(): # give zerofucks to non-ads
height = '999'
width = '999'
if bug_filetype == 'swf':
# choose from the swf media bin
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump', '-X',
bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump', '-Y',
bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError :
LOG.exception("swfdump error on file %s" % bug_filepath)
else:
# choose from the img media bin
target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output(['identify', '-format', '"%h"',\
bug_filepath]).strip()
width = subprocess.check_output(['identify', '-format','"%w"',\
bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception("identify error on file %s" % bug_filepath)
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
# check all the images in the bin with the dimensions
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.get_filepath()):
dup = m
break
if dup:
# check if the duplicate ad came from a different test site
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else :
ads[dup] = {test_site : bug_count}
# delete old bug reference, add new one and point to duplicated
# bug
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site : bug_count}
# tally up the results
except KeyError: # The bin hasn't been created
target_bin[dimension] = [bug]
ads[bug] = {test_site : bug_count}
# else:
# notads
return ads,error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try :
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
# Relative location = Location of the ad within this current session
# Global location, added when an ad is matched with existing ads in DB
fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(new_uuidname,
name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
# write to a file in runinfo_dir to tell automation script this run is done
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
def write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites,
test_sites, num_of_refresh, export_folder):
train_category = training_sites.keys()[0]
train_sites_to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
# to be read and inserted into db
totalads = 0 # total number of ads seen during this session
totaluniqads = len(ads) # does not support multicategories at this point
# for each category, for each test site, count total number of ads seen
totalad_category = {}
# for each category, for each test site, count total number of uniq ads seen
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:
bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\
Site-Context, BugCount, BugSrc\n')
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0 # for each site
uniq_ads = [] # for each site
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(uuid, test_site,
refresh_num, train_category, 'N/A', bugcount, bug.get_src()))
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads # global count for total ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:
# write some metadata information about this session
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(train_category,
test_site, num_of_visit, totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results,out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1 # assign unique number to ads for export to mturk
#short_listed_companies = ['google adsense', 'doubleclick']
with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:
# write the titles
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.format(\
'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\
'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))
# make sure we only add one ad
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
#TODO check bug_type in ffext
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\
\n'.format(file_name, bug.get_name(), bug.get_filetype(),
'' ,test_site, refresh_num, train_category, 'N/A', bugcount,
bug.get_src()))
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or \
file_type.startswith('ASCII') or \
file_type.startswith('UTF-8 Unicode English') or \
file_type.startswith('very short') :
bug_type = 'text'
elif (file_type.endswith('1 x 1') and file_type.startswith('GIF')):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
def curl_worker_legacy(args):
output_dir = args[0]
saved_file_name = args[1]
path = args[2]
bug = args[3]
curl_result_queue = args[4]
# subprocess.call(['curl', '-o', path , bug.get_src() ])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
# Use the unix tool 'file' to check filetype
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
curl_result_queue.put(bug)
def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
# uses a pool of 'curl' workers
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\
dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, \
((output_dir, saved_location, save_to_path, bug, curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == "STOP":
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
#LOG.debug(task)
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
# subprocess.call(['curl', '-o', path , bug.get_src()])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e :
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
def build_nodes(jsonData):
"""
This function takes a JSON encoded output of the firefox addon and builds a
call graph for the javascript/HTML redirections
@rtype nodes: dict
@return: A graph of redirection chains
"""
nodes = {}
def _process_cookiestr(cookieStr):
"""
parses a dictionary of req/resp calls to extract the cookie information
returns a list of cookies set on this domain
"""
cookie_list = []
# parses cookie str if a cookie has been set
for cookie in cookieStr.split('\n'):
c = {}
for cook in cookie.split(';'):
token = cook.split('=', 1)
if len(token) < 2:
# usually this is just a flag e.g HTTPOnly, HTTPSOnly
continue
c[token[0]] = token[1]
cookie_list.append(c)
return cookie_list
def _check_node(d):
try:
domain_node = nodes[d]
except KeyError:
isBug, bug_name, bug_type = ADREGEX.search(domain)
domain_node = WebNode(domain, isBug, bug_name, bug_type)
nodes[d] = domain_node
return domain_node
#jsonData contains all the domains and all the req/resp pairs made to them
#iterating over the domains first
for domain, dval in jsonData.items():
# but first check if a node for this domain has been created or not
domain_node = _check_node(domain)
cookie_list = []
# iterating thru all the req/resp pairs on a domain
for info in dval:
domainPath = info['domainPath']
referrerPath = info['referrerPath']
referrer = info['referrer']
cookieBool = info['cookie']
parsed_cookie = None
if cookieBool:
cookieStr = info['cookiestr']
parsed_cookie = _process_cookiestr(cookieStr)
cookie_list.append(parsed_cookie)
domain_node.add_reqresp({'domainPath' : domainPath,
'referrer' : referrer,
'referrerPath' : referrerPath,
'cookieList' : parsed_cookie
})
# making sure that we also create the node for the referrer
referrer_node = _check_node(referrer)
referrer_node.add_child(domain_node)
domain_node.add_parent(referrer_node)
domain_node.set_cookies(cookie_list)
return nodes
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {} # keep track of worker processes
input_queue = Queue() # asynchronously feed workers task to do
worker_output_queue = Queue() # output queue from workers
ack_queue = Queue()
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
# Directory is created, Okay to pass
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,\
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
# uses a pool nodesurl' workers
# curl_worker_pool = Pool(processes=8)
# manager = Manager()
# curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers):
# send stop signal
input_queue.put(("STOP",))
stopped = 0
while stopped < len(workers_dict):
ack = ack_queue.get()
p = workers_dict[ack]
p.join(timeout=1)
if p.is_alive():
p.terminate()
LOG.debug('terminating process %d' % ack)
stopped += 1
while not worker_output_queue.empty():
# receive results from the worker
cbug = worker_output_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
return
|
flexible
|
{
"blob_id": "fdae984f7cf5e1c20dee197d3f2518a0c7c38bdc",
"index": 8085,
"step-1": "<mask token>\n\n\ndef check_duplicate(fp1, fp2):\n \"\"\"takes two files, does a diff on them, returns True if same\"\"\"\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False\n\n\ndef identify_uniq_ads(session_results):\n \"\"\"\n i) Identify duplicate ads\n ii) bin the ads by their dimensions\n iii) Keep track of the test sites and have many times they have displayed this\n ad\n \"\"\"\n ads = {}\n notads = {}\n swf_bin = {}\n img_bin = {}\n error_bugs = []\n for train_category, cat_dict in session_results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for index_count in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[index_count]\n for bug, bug_count in bug_dict.items():\n bug_filetype = bug.get_filetype()\n bug_filepath = bug.get_filepath()\n if bug_filepath == '':\n error_bugs.append(bug)\n continue\n if bug.is_ad():\n height = '999'\n width = '999'\n if bug_filetype == 'swf':\n target_bin = swf_bin\n try:\n width = subprocess.check_output(['swfdump',\n '-X', bug_filepath]).split(' ')[-1].strip()\n height = subprocess.check_output(['swfdump',\n '-Y', bug_filepath]).split(' ')[-1].strip()\n except subprocess.CalledProcessError:\n LOG.exception('swfdump error on file %s' %\n bug_filepath)\n else:\n target_bin = img_bin\n LOG.debug(bug_filepath)\n try:\n height = subprocess.check_output([\n 'identify', '-format', '\"%h\"',\n bug_filepath]).strip()\n width = subprocess.check_output(['identify',\n '-format', '\"%w\"', bug_filepath]).strip()\n except subprocess.CalledProcessError:\n LOG.exception('identify error on file %s' %\n bug_filepath)\n try:\n bug.set_dimension(height, width)\n dimension = '%s-%s' % (height, width)\n m_list = target_bin[dimension]\n dup = None\n for m in m_list:\n if check_duplicate(bug_filepath, m.\n get_filepath()):\n dup = m\n break\n if dup:\n if test_site in ads[dup]:\n ads[dup][test_site] += bug_count\n else:\n ads[dup] = {test_site: bug_count}\n del bug_dict[bug]\n bug_dict[dup] = bug_count\n else:\n target_bin[dimension].append(bug)\n ads[bug] = {test_site: bug_count}\n except KeyError:\n target_bin[dimension] = [bug]\n ads[bug] = {test_site: bug_count}\n return ads, error_bugs\n\n\ndef export_uniq_ads(ads, out_folder, rel_folder):\n \"\"\"\n Takes all the uniq ads seen in this session and writes its metadata\n information to a csv file\n \"\"\"\n try:\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n fwtr.write(\n '#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(\n new_uuidname, name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads\n\n\n<mask token>\n\n\ndef export_ads(results, out_folder):\n \"\"\"\n This function creates a csv file which contains all the unique ads seen in\n each test site (including all the refreshes)\n\n TODO update the doc\n results is a dictionary of the following\n results = { Category : Value, ... }\n value = { test_site_url : [ result1, result2, ... resultN], ... }\n resultN : { WebBug : count, ... }\n \"\"\"\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n export_ad_counter = 1\n with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.\n format('Ad#', 'Company', 'FileType', 'Ad-Category',\n 'Website-URL', 'Refresh-Num', 'Training-Topic',\n 'Context-of-site', 'Total', 'Ad-src'))\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.\n get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write(\n \"\"\"{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9}, \n\"\"\"\n .format(file_name, bug.get_name(), bug.\n get_filetype(), '', test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n export_ad_counter += 1\n\n\ndef get_bug_type(file_type):\n is_ad = False\n bug_type = 'text'\n if file_type.startswith('HTML') or file_type.startswith('ASCII'\n ) or file_type.startswith('UTF-8 Unicode English'\n ) or file_type.startswith('very short'):\n bug_type = 'text'\n elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):\n bug_type = 'gif'\n elif file_type.startswith('PNG'):\n bug_type = 'png'\n is_ad = True\n elif file_type.startswith('GIF'):\n bug_type = 'gif'\n is_ad = True\n elif file_type.startswith('Macromedia Flash'):\n bug_type = 'swf'\n is_ad = True\n elif file_type.startswith('JPEG'):\n bug_type = 'jpg'\n is_ad = True\n return bug_type, is_ad\n\n\ndef parse_buginfo(entry):\n \"\"\"\n Takes the json decoded bug information and inserts it into a WebBug instance\n \"\"\"\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n\n\n<mask token>\n\n\ndef process_results_legacy(refresh_count, output_dir, ext_queue,\n result_queue, num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=\n bugaffiliation, bug_type=bugtype, matched_pattern=\n bugpattern, pathname=bugpathname)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, ((\n output_dir, saved_location, save_to_path, bug,\n curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n\n\ndef curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\n ack_queue):\n while True:\n try:\n task = input_queue.get()\n if len(task) == 1 and task[0] == 'STOP':\n LOG.debug('curl_worker %d received stop' % worker_id)\n break\n except Exception:\n LOG.error('Error:')\n saved_file_name = task[0]\n path = task[1]\n bug = task[2]\n try:\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,\n bug.get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n except Exception as e:\n LOG.debug('Exception captured %s\\n\\n' % e)\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n worker_output_queue.put(bug)\n ack_queue.put(worker_id)\n return\n\n\n<mask token>\n\n\ndef filter_results(extQueue, timeout_value, url):\n \"\"\"\n This function takes the JSON output of the firefox addon, and matches the\n request URL against a list of known tracker/ads regexes. \n\n Returns data structure containing request/resp info\n Returns None if did not receive results from FF addon\n \"\"\"\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes\n\n\ndef process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n workers_dict = {}\n input_queue = Queue()\n worker_output_queue = Queue()\n ack_queue = Queue()\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n try:\n saved_location = 'Visit%d_%s%d' % (refresh_count, bug.\n get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n for i in range(num_of_workers):\n input_queue.put(('STOP',))\n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n while not worker_output_queue.empty():\n cbug = worker_output_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return\n",
"step-2": "<mask token>\n\n\ndef check_duplicate(fp1, fp2):\n \"\"\"takes two files, does a diff on them, returns True if same\"\"\"\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False\n\n\ndef identify_uniq_ads(session_results):\n \"\"\"\n i) Identify duplicate ads\n ii) bin the ads by their dimensions\n iii) Keep track of the test sites and have many times they have displayed this\n ad\n \"\"\"\n ads = {}\n notads = {}\n swf_bin = {}\n img_bin = {}\n error_bugs = []\n for train_category, cat_dict in session_results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for index_count in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[index_count]\n for bug, bug_count in bug_dict.items():\n bug_filetype = bug.get_filetype()\n bug_filepath = bug.get_filepath()\n if bug_filepath == '':\n error_bugs.append(bug)\n continue\n if bug.is_ad():\n height = '999'\n width = '999'\n if bug_filetype == 'swf':\n target_bin = swf_bin\n try:\n width = subprocess.check_output(['swfdump',\n '-X', bug_filepath]).split(' ')[-1].strip()\n height = subprocess.check_output(['swfdump',\n '-Y', bug_filepath]).split(' ')[-1].strip()\n except subprocess.CalledProcessError:\n LOG.exception('swfdump error on file %s' %\n bug_filepath)\n else:\n target_bin = img_bin\n LOG.debug(bug_filepath)\n try:\n height = subprocess.check_output([\n 'identify', '-format', '\"%h\"',\n bug_filepath]).strip()\n width = subprocess.check_output(['identify',\n '-format', '\"%w\"', bug_filepath]).strip()\n except subprocess.CalledProcessError:\n LOG.exception('identify error on file %s' %\n bug_filepath)\n try:\n bug.set_dimension(height, width)\n dimension = '%s-%s' % (height, width)\n m_list = target_bin[dimension]\n dup = None\n for m in m_list:\n if check_duplicate(bug_filepath, m.\n get_filepath()):\n dup = m\n break\n if dup:\n if test_site in ads[dup]:\n ads[dup][test_site] += bug_count\n else:\n ads[dup] = {test_site: bug_count}\n del bug_dict[bug]\n bug_dict[dup] = bug_count\n else:\n target_bin[dimension].append(bug)\n ads[bug] = {test_site: bug_count}\n except KeyError:\n target_bin[dimension] = [bug]\n ads[bug] = {test_site: bug_count}\n return ads, error_bugs\n\n\ndef export_uniq_ads(ads, out_folder, rel_folder):\n \"\"\"\n Takes all the uniq ads seen in this session and writes its metadata\n information to a csv file\n \"\"\"\n try:\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n fwtr.write(\n '#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(\n new_uuidname, name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads\n\n\ndef write_run_info(RUNINFO_DIR, session_date):\n fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)\n with open(fp, 'w') as fwtr:\n fwtr.write('OK')\n\n\n<mask token>\n\n\ndef generate_stats(results, ads, vmid, session_date, export_folder,\n process_ex_time):\n \"\"\"\n Generates stats on\n - uniq ads seen on the test sites\n - total number of ads seen on the test sites\n - total number of ads seen on all test sites\n - total number of uniq ads seen on all test sites\n \"\"\"\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n totalads = 0\n totaluniqads = len(ads)\n totalad_category = {}\n uniqad_category = {}\n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w'\n ) as bugs_wtr:\n bugs_wtr.write(\n \"\"\"#Ad-UID, Website-URL, Refresh-Num, Training-Topic, Site-Context, BugCount, BugSrc\n\"\"\"\n )\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0\n uniq_ads = []\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write(\n '{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.\n format(uuid, test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w'\n ) as ses_wtr:\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write(\n '#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(\n train_category, test_site, num_of_visit,\n totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))\n\n\ndef export_ads(results, out_folder):\n \"\"\"\n This function creates a csv file which contains all the unique ads seen in\n each test site (including all the refreshes)\n\n TODO update the doc\n results is a dictionary of the following\n results = { Category : Value, ... }\n value = { test_site_url : [ result1, result2, ... resultN], ... }\n resultN : { WebBug : count, ... }\n \"\"\"\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n export_ad_counter = 1\n with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.\n format('Ad#', 'Company', 'FileType', 'Ad-Category',\n 'Website-URL', 'Refresh-Num', 'Training-Topic',\n 'Context-of-site', 'Total', 'Ad-src'))\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.\n get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write(\n \"\"\"{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9}, \n\"\"\"\n .format(file_name, bug.get_name(), bug.\n get_filetype(), '', test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n export_ad_counter += 1\n\n\ndef get_bug_type(file_type):\n is_ad = False\n bug_type = 'text'\n if file_type.startswith('HTML') or file_type.startswith('ASCII'\n ) or file_type.startswith('UTF-8 Unicode English'\n ) or file_type.startswith('very short'):\n bug_type = 'text'\n elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):\n bug_type = 'gif'\n elif file_type.startswith('PNG'):\n bug_type = 'png'\n is_ad = True\n elif file_type.startswith('GIF'):\n bug_type = 'gif'\n is_ad = True\n elif file_type.startswith('Macromedia Flash'):\n bug_type = 'swf'\n is_ad = True\n elif file_type.startswith('JPEG'):\n bug_type = 'jpg'\n is_ad = True\n return bug_type, is_ad\n\n\ndef parse_buginfo(entry):\n \"\"\"\n Takes the json decoded bug information and inserts it into a WebBug instance\n \"\"\"\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n\n\ndef curl_worker_legacy(args):\n output_dir = args[0]\n saved_file_name = args[1]\n path = args[2]\n bug = args[3]\n curl_result_queue = args[4]\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path, bug.\n get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n curl_result_queue.put(bug)\n\n\ndef process_results_legacy(refresh_count, output_dir, ext_queue,\n result_queue, num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=\n bugaffiliation, bug_type=bugtype, matched_pattern=\n bugpattern, pathname=bugpathname)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, ((\n output_dir, saved_location, save_to_path, bug,\n curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n\n\ndef curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\n ack_queue):\n while True:\n try:\n task = input_queue.get()\n if len(task) == 1 and task[0] == 'STOP':\n LOG.debug('curl_worker %d received stop' % worker_id)\n break\n except Exception:\n LOG.error('Error:')\n saved_file_name = task[0]\n path = task[1]\n bug = task[2]\n try:\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,\n bug.get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n except Exception as e:\n LOG.debug('Exception captured %s\\n\\n' % e)\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n worker_output_queue.put(bug)\n ack_queue.put(worker_id)\n return\n\n\ndef build_nodes(jsonData):\n \"\"\"\n This function takes a JSON encoded output of the firefox addon and builds a\n call graph for the javascript/HTML redirections\n\n @rtype nodes: dict\n @return: A graph of redirection chains\n \"\"\"\n nodes = {}\n\n def _process_cookiestr(cookieStr):\n \"\"\"\n parses a dictionary of req/resp calls to extract the cookie information\n returns a list of cookies set on this domain\n \"\"\"\n cookie_list = []\n for cookie in cookieStr.split('\\n'):\n c = {}\n for cook in cookie.split(';'):\n token = cook.split('=', 1)\n if len(token) < 2:\n continue\n c[token[0]] = token[1]\n cookie_list.append(c)\n return cookie_list\n\n def _check_node(d):\n try:\n domain_node = nodes[d]\n except KeyError:\n isBug, bug_name, bug_type = ADREGEX.search(domain)\n domain_node = WebNode(domain, isBug, bug_name, bug_type)\n nodes[d] = domain_node\n return domain_node\n for domain, dval in jsonData.items():\n domain_node = _check_node(domain)\n cookie_list = []\n for info in dval:\n domainPath = info['domainPath']\n referrerPath = info['referrerPath']\n referrer = info['referrer']\n cookieBool = info['cookie']\n parsed_cookie = None\n if cookieBool:\n cookieStr = info['cookiestr']\n parsed_cookie = _process_cookiestr(cookieStr)\n cookie_list.append(parsed_cookie)\n domain_node.add_reqresp({'domainPath': domainPath, 'referrer':\n referrer, 'referrerPath': referrerPath, 'cookieList':\n parsed_cookie})\n referrer_node = _check_node(referrer)\n referrer_node.add_child(domain_node)\n domain_node.add_parent(referrer_node)\n domain_node.set_cookies(cookie_list)\n return nodes\n\n\ndef filter_results(extQueue, timeout_value, url):\n \"\"\"\n This function takes the JSON output of the firefox addon, and matches the\n request URL against a list of known tracker/ads regexes. \n\n Returns data structure containing request/resp info\n Returns None if did not receive results from FF addon\n \"\"\"\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes\n\n\ndef process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n workers_dict = {}\n input_queue = Queue()\n worker_output_queue = Queue()\n ack_queue = Queue()\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n try:\n saved_location = 'Visit%d_%s%d' % (refresh_count, bug.\n get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n for i in range(num_of_workers):\n input_queue.put(('STOP',))\n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n while not worker_output_queue.empty():\n cbug = worker_output_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return\n",
"step-3": "<mask token>\n\n\ndef check_duplicate(fp1, fp2):\n \"\"\"takes two files, does a diff on them, returns True if same\"\"\"\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False\n\n\ndef identify_uniq_ads(session_results):\n \"\"\"\n i) Identify duplicate ads\n ii) bin the ads by their dimensions\n iii) Keep track of the test sites and have many times they have displayed this\n ad\n \"\"\"\n ads = {}\n notads = {}\n swf_bin = {}\n img_bin = {}\n error_bugs = []\n for train_category, cat_dict in session_results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for index_count in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[index_count]\n for bug, bug_count in bug_dict.items():\n bug_filetype = bug.get_filetype()\n bug_filepath = bug.get_filepath()\n if bug_filepath == '':\n error_bugs.append(bug)\n continue\n if bug.is_ad():\n height = '999'\n width = '999'\n if bug_filetype == 'swf':\n target_bin = swf_bin\n try:\n width = subprocess.check_output(['swfdump',\n '-X', bug_filepath]).split(' ')[-1].strip()\n height = subprocess.check_output(['swfdump',\n '-Y', bug_filepath]).split(' ')[-1].strip()\n except subprocess.CalledProcessError:\n LOG.exception('swfdump error on file %s' %\n bug_filepath)\n else:\n target_bin = img_bin\n LOG.debug(bug_filepath)\n try:\n height = subprocess.check_output([\n 'identify', '-format', '\"%h\"',\n bug_filepath]).strip()\n width = subprocess.check_output(['identify',\n '-format', '\"%w\"', bug_filepath]).strip()\n except subprocess.CalledProcessError:\n LOG.exception('identify error on file %s' %\n bug_filepath)\n try:\n bug.set_dimension(height, width)\n dimension = '%s-%s' % (height, width)\n m_list = target_bin[dimension]\n dup = None\n for m in m_list:\n if check_duplicate(bug_filepath, m.\n get_filepath()):\n dup = m\n break\n if dup:\n if test_site in ads[dup]:\n ads[dup][test_site] += bug_count\n else:\n ads[dup] = {test_site: bug_count}\n del bug_dict[bug]\n bug_dict[dup] = bug_count\n else:\n target_bin[dimension].append(bug)\n ads[bug] = {test_site: bug_count}\n except KeyError:\n target_bin[dimension] = [bug]\n ads[bug] = {test_site: bug_count}\n return ads, error_bugs\n\n\ndef export_uniq_ads(ads, out_folder, rel_folder):\n \"\"\"\n Takes all the uniq ads seen in this session and writes its metadata\n information to a csv file\n \"\"\"\n try:\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n fwtr.write(\n '#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(\n new_uuidname, name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads\n\n\ndef write_run_info(RUNINFO_DIR, session_date):\n fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)\n with open(fp, 'w') as fwtr:\n fwtr.write('OK')\n\n\ndef write_session_info(vmid, machineid, profile, session_date, train_mode,\n training_sites, test_sites, num_of_refresh, export_folder):\n train_category = training_sites.keys()[0]\n train_sites_to_visit = training_sites[train_category]\n with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:\n fwtr.write('session_str : %s\\n' % session_date)\n fwtr.write('machine_info : %s\\n' % machineid)\n fwtr.write('vmid : %s\\n' % vmid)\n fwtr.write('profile : %s\\n' % profile)\n fwtr.write('train_mode : %s\\n' % train_mode)\n fwtr.write('num_of_refresh : %d\\n' % num_of_refresh)\n fwtr.write('training_topic : %s\\n' % train_category)\n fwtr.write('training_sites : ')\n for site in train_sites_to_visit:\n fwtr.write('%s, ' % site)\n fwtr.write('\\nnum_of_train_sites : %d\\n' % len(train_sites_to_visit))\n fwtr.write('test_sites : ')\n for site in test_sites:\n fwtr.write('%s, ' % site[1])\n fwtr.write('\\nnum_of_test_sites : %d\\n' % len(test_sites))\n\n\ndef generate_stats(results, ads, vmid, session_date, export_folder,\n process_ex_time):\n \"\"\"\n Generates stats on\n - uniq ads seen on the test sites\n - total number of ads seen on the test sites\n - total number of ads seen on all test sites\n - total number of uniq ads seen on all test sites\n \"\"\"\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n totalads = 0\n totaluniqads = len(ads)\n totalad_category = {}\n uniqad_category = {}\n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w'\n ) as bugs_wtr:\n bugs_wtr.write(\n \"\"\"#Ad-UID, Website-URL, Refresh-Num, Training-Topic, Site-Context, BugCount, BugSrc\n\"\"\"\n )\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0\n uniq_ads = []\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write(\n '{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.\n format(uuid, test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w'\n ) as ses_wtr:\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write(\n '#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(\n train_category, test_site, num_of_visit,\n totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))\n\n\ndef export_ads(results, out_folder):\n \"\"\"\n This function creates a csv file which contains all the unique ads seen in\n each test site (including all the refreshes)\n\n TODO update the doc\n results is a dictionary of the following\n results = { Category : Value, ... }\n value = { test_site_url : [ result1, result2, ... resultN], ... }\n resultN : { WebBug : count, ... }\n \"\"\"\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n export_ad_counter = 1\n with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.\n format('Ad#', 'Company', 'FileType', 'Ad-Category',\n 'Website-URL', 'Refresh-Num', 'Training-Topic',\n 'Context-of-site', 'Total', 'Ad-src'))\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.\n get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write(\n \"\"\"{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9}, \n\"\"\"\n .format(file_name, bug.get_name(), bug.\n get_filetype(), '', test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n export_ad_counter += 1\n\n\ndef get_bug_type(file_type):\n is_ad = False\n bug_type = 'text'\n if file_type.startswith('HTML') or file_type.startswith('ASCII'\n ) or file_type.startswith('UTF-8 Unicode English'\n ) or file_type.startswith('very short'):\n bug_type = 'text'\n elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):\n bug_type = 'gif'\n elif file_type.startswith('PNG'):\n bug_type = 'png'\n is_ad = True\n elif file_type.startswith('GIF'):\n bug_type = 'gif'\n is_ad = True\n elif file_type.startswith('Macromedia Flash'):\n bug_type = 'swf'\n is_ad = True\n elif file_type.startswith('JPEG'):\n bug_type = 'jpg'\n is_ad = True\n return bug_type, is_ad\n\n\ndef parse_buginfo(entry):\n \"\"\"\n Takes the json decoded bug information and inserts it into a WebBug instance\n \"\"\"\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n\n\ndef curl_worker_legacy(args):\n output_dir = args[0]\n saved_file_name = args[1]\n path = args[2]\n bug = args[3]\n curl_result_queue = args[4]\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path, bug.\n get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n curl_result_queue.put(bug)\n\n\ndef process_results_legacy(refresh_count, output_dir, ext_queue,\n result_queue, num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=\n bugaffiliation, bug_type=bugtype, matched_pattern=\n bugpattern, pathname=bugpathname)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, ((\n output_dir, saved_location, save_to_path, bug,\n curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n\n\ndef curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\n ack_queue):\n while True:\n try:\n task = input_queue.get()\n if len(task) == 1 and task[0] == 'STOP':\n LOG.debug('curl_worker %d received stop' % worker_id)\n break\n except Exception:\n LOG.error('Error:')\n saved_file_name = task[0]\n path = task[1]\n bug = task[2]\n try:\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,\n bug.get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n except Exception as e:\n LOG.debug('Exception captured %s\\n\\n' % e)\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n worker_output_queue.put(bug)\n ack_queue.put(worker_id)\n return\n\n\ndef build_nodes(jsonData):\n \"\"\"\n This function takes a JSON encoded output of the firefox addon and builds a\n call graph for the javascript/HTML redirections\n\n @rtype nodes: dict\n @return: A graph of redirection chains\n \"\"\"\n nodes = {}\n\n def _process_cookiestr(cookieStr):\n \"\"\"\n parses a dictionary of req/resp calls to extract the cookie information\n returns a list of cookies set on this domain\n \"\"\"\n cookie_list = []\n for cookie in cookieStr.split('\\n'):\n c = {}\n for cook in cookie.split(';'):\n token = cook.split('=', 1)\n if len(token) < 2:\n continue\n c[token[0]] = token[1]\n cookie_list.append(c)\n return cookie_list\n\n def _check_node(d):\n try:\n domain_node = nodes[d]\n except KeyError:\n isBug, bug_name, bug_type = ADREGEX.search(domain)\n domain_node = WebNode(domain, isBug, bug_name, bug_type)\n nodes[d] = domain_node\n return domain_node\n for domain, dval in jsonData.items():\n domain_node = _check_node(domain)\n cookie_list = []\n for info in dval:\n domainPath = info['domainPath']\n referrerPath = info['referrerPath']\n referrer = info['referrer']\n cookieBool = info['cookie']\n parsed_cookie = None\n if cookieBool:\n cookieStr = info['cookiestr']\n parsed_cookie = _process_cookiestr(cookieStr)\n cookie_list.append(parsed_cookie)\n domain_node.add_reqresp({'domainPath': domainPath, 'referrer':\n referrer, 'referrerPath': referrerPath, 'cookieList':\n parsed_cookie})\n referrer_node = _check_node(referrer)\n referrer_node.add_child(domain_node)\n domain_node.add_parent(referrer_node)\n domain_node.set_cookies(cookie_list)\n return nodes\n\n\ndef filter_results(extQueue, timeout_value, url):\n \"\"\"\n This function takes the JSON output of the firefox addon, and matches the\n request URL against a list of known tracker/ads regexes. \n\n Returns data structure containing request/resp info\n Returns None if did not receive results from FF addon\n \"\"\"\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes\n\n\ndef process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n workers_dict = {}\n input_queue = Queue()\n worker_output_queue = Queue()\n ack_queue = Queue()\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n try:\n saved_location = 'Visit%d_%s%d' % (refresh_count, bug.\n get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n for i in range(num_of_workers):\n input_queue.put(('STOP',))\n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n while not worker_output_queue.empty():\n cbug = worker_output_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return\n",
"step-4": "<mask token>\nLOG = logging.getLogger('logAdGrabber')\nADREGEX = AdRegEx()\n\n\ndef check_duplicate(fp1, fp2):\n \"\"\"takes two files, does a diff on them, returns True if same\"\"\"\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False\n\n\ndef identify_uniq_ads(session_results):\n \"\"\"\n i) Identify duplicate ads\n ii) bin the ads by their dimensions\n iii) Keep track of the test sites and have many times they have displayed this\n ad\n \"\"\"\n ads = {}\n notads = {}\n swf_bin = {}\n img_bin = {}\n error_bugs = []\n for train_category, cat_dict in session_results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for index_count in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[index_count]\n for bug, bug_count in bug_dict.items():\n bug_filetype = bug.get_filetype()\n bug_filepath = bug.get_filepath()\n if bug_filepath == '':\n error_bugs.append(bug)\n continue\n if bug.is_ad():\n height = '999'\n width = '999'\n if bug_filetype == 'swf':\n target_bin = swf_bin\n try:\n width = subprocess.check_output(['swfdump',\n '-X', bug_filepath]).split(' ')[-1].strip()\n height = subprocess.check_output(['swfdump',\n '-Y', bug_filepath]).split(' ')[-1].strip()\n except subprocess.CalledProcessError:\n LOG.exception('swfdump error on file %s' %\n bug_filepath)\n else:\n target_bin = img_bin\n LOG.debug(bug_filepath)\n try:\n height = subprocess.check_output([\n 'identify', '-format', '\"%h\"',\n bug_filepath]).strip()\n width = subprocess.check_output(['identify',\n '-format', '\"%w\"', bug_filepath]).strip()\n except subprocess.CalledProcessError:\n LOG.exception('identify error on file %s' %\n bug_filepath)\n try:\n bug.set_dimension(height, width)\n dimension = '%s-%s' % (height, width)\n m_list = target_bin[dimension]\n dup = None\n for m in m_list:\n if check_duplicate(bug_filepath, m.\n get_filepath()):\n dup = m\n break\n if dup:\n if test_site in ads[dup]:\n ads[dup][test_site] += bug_count\n else:\n ads[dup] = {test_site: bug_count}\n del bug_dict[bug]\n bug_dict[dup] = bug_count\n else:\n target_bin[dimension].append(bug)\n ads[bug] = {test_site: bug_count}\n except KeyError:\n target_bin[dimension] = [bug]\n ads[bug] = {test_site: bug_count}\n return ads, error_bugs\n\n\ndef export_uniq_ads(ads, out_folder, rel_folder):\n \"\"\"\n Takes all the uniq ads seen in this session and writes its metadata\n information to a csv file\n \"\"\"\n try:\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n fwtr.write(\n '#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(\n new_uuidname, name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads\n\n\ndef write_run_info(RUNINFO_DIR, session_date):\n fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)\n with open(fp, 'w') as fwtr:\n fwtr.write('OK')\n\n\ndef write_session_info(vmid, machineid, profile, session_date, train_mode,\n training_sites, test_sites, num_of_refresh, export_folder):\n train_category = training_sites.keys()[0]\n train_sites_to_visit = training_sites[train_category]\n with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:\n fwtr.write('session_str : %s\\n' % session_date)\n fwtr.write('machine_info : %s\\n' % machineid)\n fwtr.write('vmid : %s\\n' % vmid)\n fwtr.write('profile : %s\\n' % profile)\n fwtr.write('train_mode : %s\\n' % train_mode)\n fwtr.write('num_of_refresh : %d\\n' % num_of_refresh)\n fwtr.write('training_topic : %s\\n' % train_category)\n fwtr.write('training_sites : ')\n for site in train_sites_to_visit:\n fwtr.write('%s, ' % site)\n fwtr.write('\\nnum_of_train_sites : %d\\n' % len(train_sites_to_visit))\n fwtr.write('test_sites : ')\n for site in test_sites:\n fwtr.write('%s, ' % site[1])\n fwtr.write('\\nnum_of_test_sites : %d\\n' % len(test_sites))\n\n\ndef generate_stats(results, ads, vmid, session_date, export_folder,\n process_ex_time):\n \"\"\"\n Generates stats on\n - uniq ads seen on the test sites\n - total number of ads seen on the test sites\n - total number of ads seen on all test sites\n - total number of uniq ads seen on all test sites\n \"\"\"\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n totalads = 0\n totaluniqads = len(ads)\n totalad_category = {}\n uniqad_category = {}\n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w'\n ) as bugs_wtr:\n bugs_wtr.write(\n \"\"\"#Ad-UID, Website-URL, Refresh-Num, Training-Topic, Site-Context, BugCount, BugSrc\n\"\"\"\n )\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0\n uniq_ads = []\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write(\n '{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.\n format(uuid, test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w'\n ) as ses_wtr:\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write(\n '#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(\n train_category, test_site, num_of_visit,\n totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))\n\n\ndef export_ads(results, out_folder):\n \"\"\"\n This function creates a csv file which contains all the unique ads seen in\n each test site (including all the refreshes)\n\n TODO update the doc\n results is a dictionary of the following\n results = { Category : Value, ... }\n value = { test_site_url : [ result1, result2, ... resultN], ... }\n resultN : { WebBug : count, ... }\n \"\"\"\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n export_ad_counter = 1\n with open(os.path.join(out_folder, 'ad_labelling.csv'), 'w') as fwtr:\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.\n format('Ad#', 'Company', 'FileType', 'Ad-Category',\n 'Website-URL', 'Refresh-Num', 'Training-Topic',\n 'Context-of-site', 'Total', 'Ad-src'))\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.\n get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write(\n \"\"\"{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9}, \n\"\"\"\n .format(file_name, bug.get_name(), bug.\n get_filetype(), '', test_site, refresh_num,\n train_category, 'N/A', bugcount, bug.get_src())\n )\n export_ad_counter += 1\n\n\ndef get_bug_type(file_type):\n is_ad = False\n bug_type = 'text'\n if file_type.startswith('HTML') or file_type.startswith('ASCII'\n ) or file_type.startswith('UTF-8 Unicode English'\n ) or file_type.startswith('very short'):\n bug_type = 'text'\n elif file_type.endswith('1 x 1') and file_type.startswith('GIF'):\n bug_type = 'gif'\n elif file_type.startswith('PNG'):\n bug_type = 'png'\n is_ad = True\n elif file_type.startswith('GIF'):\n bug_type = 'gif'\n is_ad = True\n elif file_type.startswith('Macromedia Flash'):\n bug_type = 'swf'\n is_ad = True\n elif file_type.startswith('JPEG'):\n bug_type = 'jpg'\n is_ad = True\n return bug_type, is_ad\n\n\ndef parse_buginfo(entry):\n \"\"\"\n Takes the json decoded bug information and inserts it into a WebBug instance\n \"\"\"\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n\n\ndef curl_worker_legacy(args):\n output_dir = args[0]\n saved_file_name = args[1]\n path = args[2]\n bug = args[3]\n curl_result_queue = args[4]\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path, bug.\n get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n curl_result_queue.put(bug)\n\n\ndef process_results_legacy(refresh_count, output_dir, ext_queue,\n result_queue, num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ', '').replace('/', '_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try:\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = ''\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=\n bugaffiliation, bug_type=bugtype, matched_pattern=\n bugpattern, pathname=bugpathname)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n saved_location = 'Visit%d_%s%d' % (refresh_count, bugname,\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, ((\n output_dir, saved_location, save_to_path, bug,\n curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n\n\ndef curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\n ack_queue):\n while True:\n try:\n task = input_queue.get()\n if len(task) == 1 and task[0] == 'STOP':\n LOG.debug('curl_worker %d received stop' % worker_id)\n break\n except Exception:\n LOG.error('Error:')\n saved_file_name = task[0]\n path = task[1]\n bug = task[2]\n try:\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path,\n bug.get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n except Exception as e:\n LOG.debug('Exception captured %s\\n\\n' % e)\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name,\n filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (\n saved_file_name, filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n worker_output_queue.put(bug)\n ack_queue.put(worker_id)\n return\n\n\ndef build_nodes(jsonData):\n \"\"\"\n This function takes a JSON encoded output of the firefox addon and builds a\n call graph for the javascript/HTML redirections\n\n @rtype nodes: dict\n @return: A graph of redirection chains\n \"\"\"\n nodes = {}\n\n def _process_cookiestr(cookieStr):\n \"\"\"\n parses a dictionary of req/resp calls to extract the cookie information\n returns a list of cookies set on this domain\n \"\"\"\n cookie_list = []\n for cookie in cookieStr.split('\\n'):\n c = {}\n for cook in cookie.split(';'):\n token = cook.split('=', 1)\n if len(token) < 2:\n continue\n c[token[0]] = token[1]\n cookie_list.append(c)\n return cookie_list\n\n def _check_node(d):\n try:\n domain_node = nodes[d]\n except KeyError:\n isBug, bug_name, bug_type = ADREGEX.search(domain)\n domain_node = WebNode(domain, isBug, bug_name, bug_type)\n nodes[d] = domain_node\n return domain_node\n for domain, dval in jsonData.items():\n domain_node = _check_node(domain)\n cookie_list = []\n for info in dval:\n domainPath = info['domainPath']\n referrerPath = info['referrerPath']\n referrer = info['referrer']\n cookieBool = info['cookie']\n parsed_cookie = None\n if cookieBool:\n cookieStr = info['cookiestr']\n parsed_cookie = _process_cookiestr(cookieStr)\n cookie_list.append(parsed_cookie)\n domain_node.add_reqresp({'domainPath': domainPath, 'referrer':\n referrer, 'referrerPath': referrerPath, 'cookieList':\n parsed_cookie})\n referrer_node = _check_node(referrer)\n referrer_node.add_child(domain_node)\n domain_node.add_parent(referrer_node)\n domain_node.set_cookies(cookie_list)\n return nodes\n\n\ndef filter_results(extQueue, timeout_value, url):\n \"\"\"\n This function takes the JSON output of the firefox addon, and matches the\n request URL against a list of known tracker/ads regexes. \n\n Returns data structure containing request/resp info\n Returns None if did not receive results from FF addon\n \"\"\"\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes\n\n\ndef process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n workers_dict = {}\n input_queue = Queue()\n worker_output_queue = Queue()\n ack_queue = Queue()\n bug_dict = {}\n try:\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n dl_counter = 0\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1\n try:\n saved_location = 'Visit%d_%s%d' % (refresh_count, bug.\n get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join(output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n for i in range(num_of_workers):\n input_queue.put(('STOP',))\n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n while not worker_output_queue.empty():\n cbug = worker_output_queue.get()\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open(os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w'\n ) as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return\n",
"step-5": "from time import sleep\nfrom uuid import uuid1\nfrom pprint import pprint\nfrom shutil import copy2\nfrom multiprocessing import Process, Queue, Pool, Manager\nfrom ad_grabber_classes import *\nfrom adregex import *\nfrom pygraph.classes.digraph import digraph\n\nimport os\nimport json\nimport jsonpickle\nimport subprocess\nimport cPickle\nimport logging\nLOG = logging.getLogger(\"logAdGrabber\")\nADREGEX = AdRegEx()\n\ndef check_duplicate(fp1, fp2):\n \"\"\"takes two files, does a diff on them, returns True if same\"\"\"\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False\n\ndef identify_uniq_ads(session_results):\n \"\"\"\n i) Identify duplicate ads\n ii) bin the ads by their dimensions\n iii) Keep track of the test sites and have many times they have displayed this\n ad\n \"\"\"\n # bin by dimensions\n ads = {}\n notads = {}\n swf_bin = {}\n img_bin = {}\n error_bugs = []\n for train_category, cat_dict in session_results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for index_count in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[index_count] \n for bug, bug_count in bug_dict.items():\n bug_filetype = bug.get_filetype()\n bug_filepath = bug.get_filepath()\n if bug_filepath == '':\n #LOG.debug('did not manage to curl the scripts for bug:%s' % bug)\n error_bugs.append(bug)\n continue\n\n if bug.is_ad(): # give zerofucks to non-ads\n height = '999'\n width = '999'\n if bug_filetype == 'swf':\n # choose from the swf media bin\n target_bin = swf_bin\n try:\n width = subprocess.check_output(['swfdump', '-X',\n bug_filepath]).split(' ')[-1].strip()\n height = subprocess.check_output(['swfdump', '-Y',\n bug_filepath]).split(' ')[-1].strip()\n except subprocess.CalledProcessError :\n LOG.exception(\"swfdump error on file %s\" % bug_filepath)\n else:\n # choose from the img media bin\n target_bin = img_bin\n LOG.debug(bug_filepath)\n try:\n height = subprocess.check_output(['identify', '-format', '\"%h\"',\\\n bug_filepath]).strip()\n width = subprocess.check_output(['identify', '-format','\"%w\"',\\\n bug_filepath]).strip()\n except subprocess.CalledProcessError:\n LOG.exception(\"identify error on file %s\" % bug_filepath)\n\n try:\n bug.set_dimension(height, width)\n dimension = '%s-%s' % (height, width)\n # check all the images in the bin with the dimensions\n m_list = target_bin[dimension]\n dup = None\n for m in m_list:\n if check_duplicate(bug_filepath, m.get_filepath()): \n dup = m\n break\n if dup:\n # check if the duplicate ad came from a different test site\n if test_site in ads[dup]:\n ads[dup][test_site] += bug_count\n else :\n ads[dup] = {test_site : bug_count}\n # delete old bug reference, add new one and point to duplicated\n # bug\n del bug_dict[bug]\n bug_dict[dup] = bug_count\n\n else: \n target_bin[dimension].append(bug)\n ads[bug] = {test_site : bug_count}\n # tally up the results\n except KeyError: # The bin hasn't been created\n target_bin[dimension] = [bug]\n ads[bug] = {test_site : bug_count}\n # else:\n # notads\n\n return ads,error_bugs\n\n\ndef export_uniq_ads(ads, out_folder, rel_folder):\n \"\"\"\n Takes all the uniq ads seen in this session and writes its metadata\n information to a csv file\n \"\"\"\n try :\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n # Relative location = Location of the ad within this current session\n # Global location, added when an ad is matched with existing ads in DB\n fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n \n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(new_uuidname,\n name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads\n\ndef write_run_info(RUNINFO_DIR, session_date):\n # write to a file in runinfo_dir to tell automation script this run is done\n fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)\n with open(fp, 'w') as fwtr:\n fwtr.write('OK')\n\ndef write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites,\n test_sites, num_of_refresh, export_folder):\n train_category = training_sites.keys()[0]\n train_sites_to_visit = training_sites[train_category]\n with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:\n fwtr.write('session_str : %s\\n' % session_date) \n fwtr.write('machine_info : %s\\n' % machineid)\n fwtr.write('vmid : %s\\n' % vmid)\n fwtr.write('profile : %s\\n' % profile)\n fwtr.write('train_mode : %s\\n' % train_mode)\n fwtr.write('num_of_refresh : %d\\n' % num_of_refresh)\n fwtr.write('training_topic : %s\\n' % train_category)\n fwtr.write('training_sites : ')\n for site in train_sites_to_visit:\n fwtr.write('%s, ' % site)\n fwtr.write('\\nnum_of_train_sites : %d\\n' % len(train_sites_to_visit))\n fwtr.write('test_sites : ')\n for site in test_sites: \n fwtr.write('%s, ' % site[1])\n fwtr.write('\\nnum_of_test_sites : %d\\n' % len(test_sites))\n\n\ndef generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):\n \"\"\"\n Generates stats on\n - uniq ads seen on the test sites\n - total number of ads seen on the test sites\n - total number of ads seen on all test sites\n - total number of uniq ads seen on all test sites\n \"\"\"\n try:\n os.makedirs(export_folder)\n except OSError:\n pass\n\n # to be read and inserted into db\n totalads = 0 # total number of ads seen during this session\n totaluniqads = len(ads) # does not support multicategories at this point\n\n # for each category, for each test site, count total number of ads seen\n totalad_category = {} \n # for each category, for each test site, count total number of uniq ads seen\n uniqad_category = {}\n \n with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:\n bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\\\n Site-Context, BugCount, BugSrc\\n')\n for train_category, cat_dict in results.items():\n totalad_category[train_category] = {}\n uniqad_category[train_category] = {}\n for test_site, bug_dict_list in cat_dict.items():\n total_ads = 0 # for each site\n uniq_ads = [] # for each site\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if bug.is_ad():\n uuid = bug.get_uuid()\n bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(uuid, test_site,\n refresh_num, train_category, 'N/A', bugcount, bug.get_src()))\n total_ads += bugcount\n if bug not in uniq_ads:\n uniq_ads.append(bug)\n totalad_category[train_category][test_site] = total_ads\n uniqad_category[train_category][test_site] = len(uniq_ads)\n totalads += total_ads # global count for total ads\n\n with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:\n # write some metadata information about this session\n ses_wtr.write('#VMID: %s\\n' % vmid)\n ses_wtr.write('#Session-Date: %s\\n' % session_date)\n ses_wtr.write('#Time to complete: %s\\n' % process_ex_time)\n ses_wtr.write('#Training Categories: %s\\n' % str(results.keys()))\n ses_wtr.write('#Total Number of ads: %d\\n' % totalads)\n ses_wtr.write('#Total Uniq ads: %d\\n\\n' % totaluniqads)\n ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\\n')\n\n for train_category, cat_dict in results.items(): \n for test_site, bug_dict_list in cat_dict.items():\n num_of_visit = len(bug_dict_list)\n ses_wtr.write('{0}, {1}, {2}, {3}, {4}\\n'.format(train_category,\n test_site, num_of_visit, totalad_category[train_category][test_site],\n uniqad_category[train_category][test_site]))\n\n\ndef export_ads(results,out_folder):\n \"\"\"\n This function creates a csv file which contains all the unique ads seen in\n each test site (including all the refreshes)\n\n TODO update the doc\n results is a dictionary of the following\n results = { Category : Value, ... }\n value = { test_site_url : [ result1, result2, ... resultN], ... }\n resultN : { WebBug : count, ... }\n \"\"\"\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n \n export_ad_counter = 1 # assign unique number to ads for export to mturk\n #short_listed_companies = ['google adsense', 'doubleclick']\n with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:\n # write the titles\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.format(\\\n 'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\\\n 'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))\n # make sure we only add one ad\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n #TODO check bug_type in ffext\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\\\n \\n'.format(file_name, bug.get_name(), bug.get_filetype(),\n '' ,test_site, refresh_num, train_category, 'N/A', bugcount,\n bug.get_src()))\n export_ad_counter += 1\n\n\ndef get_bug_type(file_type):\n is_ad = False\n bug_type = 'text'\n if file_type.startswith('HTML') or \\\n file_type.startswith('ASCII') or \\\n file_type.startswith('UTF-8 Unicode English') or \\\n file_type.startswith('very short') :\n bug_type = 'text'\n elif (file_type.endswith('1 x 1') and file_type.startswith('GIF')): \n bug_type = 'gif'\n elif file_type.startswith('PNG'):\n bug_type = 'png'\n is_ad = True\n elif file_type.startswith('GIF'):\n bug_type = 'gif'\n is_ad = True\n elif file_type.startswith('Macromedia Flash'):\n bug_type = 'swf'\n is_ad = True\n elif file_type.startswith('JPEG'):\n bug_type = 'jpg'\n is_ad = True\n return bug_type, is_ad\n\n\ndef parse_buginfo(entry):\n \"\"\"\n Takes the json decoded bug information and inserts it into a WebBug instance\n \"\"\"\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n\ndef curl_worker_legacy(args):\n output_dir = args[0]\n saved_file_name = args[1]\n path = args[2]\n bug = args[3]\n curl_result_queue = args[4]\n\n # subprocess.call(['curl', '-o', path , bug.get_src() ])\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])\n # Use the unix tool 'file' to check filetype\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n filetype, is_ad = get_bug_type(subpr_out)\n\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\\\n filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n curl_result_queue.put(bug)\n\ndef process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\\\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n\n # uses a pool of 'curl' workers\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\\\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, \\\n ((output_dir, saved_location, save_to_path, bug, curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n\n\ndef curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\\\n ack_queue):\n while True:\n try: \n task = input_queue.get()\n if len(task) == 1 and task[0] == \"STOP\":\n LOG.debug('curl_worker %d received stop' % worker_id)\n break\n except Exception:\n LOG.error('Error:')\n #LOG.debug(task)\n\n saved_file_name = task[0]\n path = task[1]\n bug = task[2]\n \n try:\n # subprocess.call(['curl', '-o', path , bug.get_src()])\n subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])\n subpr_out = subprocess.check_output(['file', '-b', path]).strip()\n except Exception as e : \n LOG.debug('Exception captured %s\\n\\n' % e)\n\n filetype, is_ad = get_bug_type(subpr_out)\n if is_ad:\n new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))\n else:\n new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\\\n filetype))\n os.rename(path, new_path)\n bug.set_is_ad(is_ad)\n bug.set_filetype(filetype)\n bug.set_filepath(new_path)\n worker_output_queue.put(bug)\n ack_queue.put(worker_id)\n return \n\n\ndef build_nodes(jsonData):\n \"\"\"\n This function takes a JSON encoded output of the firefox addon and builds a\n call graph for the javascript/HTML redirections\n\n @rtype nodes: dict\n @return: A graph of redirection chains\n \"\"\"\n nodes = {}\n\n def _process_cookiestr(cookieStr):\n \"\"\"\n parses a dictionary of req/resp calls to extract the cookie information\n returns a list of cookies set on this domain\n \"\"\"\n cookie_list = []\n # parses cookie str if a cookie has been set\n for cookie in cookieStr.split('\\n'):\n c = {}\n for cook in cookie.split(';'):\n token = cook.split('=', 1)\n if len(token) < 2: \n # usually this is just a flag e.g HTTPOnly, HTTPSOnly\n continue\n c[token[0]] = token[1]\n cookie_list.append(c)\n return cookie_list \n \n def _check_node(d):\n try:\n domain_node = nodes[d]\n except KeyError:\n isBug, bug_name, bug_type = ADREGEX.search(domain)\n domain_node = WebNode(domain, isBug, bug_name, bug_type)\n nodes[d] = domain_node\n return domain_node \n \n #jsonData contains all the domains and all the req/resp pairs made to them\n #iterating over the domains first\n for domain, dval in jsonData.items():\n # but first check if a node for this domain has been created or not\n domain_node = _check_node(domain)\n cookie_list = []\n # iterating thru all the req/resp pairs on a domain\n for info in dval:\n domainPath = info['domainPath']\n referrerPath = info['referrerPath']\n referrer = info['referrer']\n cookieBool = info['cookie'] \n \n parsed_cookie = None \n if cookieBool:\n cookieStr = info['cookiestr']\n parsed_cookie = _process_cookiestr(cookieStr)\n cookie_list.append(parsed_cookie)\n domain_node.add_reqresp({'domainPath' : domainPath,\n 'referrer' : referrer,\n 'referrerPath' : referrerPath,\n 'cookieList' : parsed_cookie\n })\n # making sure that we also create the node for the referrer\n referrer_node = _check_node(referrer)\n referrer_node.add_child(domain_node)\n domain_node.add_parent(referrer_node)\n domain_node.set_cookies(cookie_list)\n return nodes\n\n\ndef filter_results(extQueue, timeout_value, url):\n \"\"\"\n This function takes the JSON output of the firefox addon, and matches the\n request URL against a list of known tracker/ads regexes. \n\n Returns data structure containing request/resp info\n Returns None if did not receive results from FF addon\n \"\"\"\n from Queue import Empty\n try:\n LOG.debug('Timeout value in filter_result :%d' % timeout_value)\n nodes = extQueue.get(True, timeout=timeout_value)\n \n except Empty as e:\n LOG.info('Did not receive any results from FF plugin for %s' % url)\n nodes = None\n finally:\n while not extQueue.empty():\n extQueue.get()\n return nodes\n\ndef process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n \"\"\"\n This function goes through all the bugs identified by the firefox plugin and\n aggregates each bug's occurence in a given page. The aggregation is necessary\n for duplicate ads on the same page\n \"\"\"\n workers_dict = {} # keep track of worker processes\n input_queue = Queue() # asynchronously feed workers task to do \n worker_output_queue = Queue() # output queue from workers\n ack_queue = Queue()\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n # Directory is created, Okay to pass\n pass\n\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\\\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n # uses a pool nodesurl' workers\n # curl_worker_pool = Pool(processes=8)\n # manager = Manager()\n # curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n try:\n saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n\n for i in range(num_of_workers):\n # send stop signal\n input_queue.put((\"STOP\",))\n \n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n \n while not worker_output_queue.empty():\n # receive results from the worker\n cbug = worker_output_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return\n\n\n \n\n",
"step-ids": [
10,
14,
15,
16,
18
]
}
|
[
10,
14,
15,
16,
18
] |
<|reserved_special_token_0|>
class BotModel(nn.Module):
def __init__(self, dim, statedim, rel_count):
super(BotModel, self).__init__()
self.dim = dim
self.hid2state = nn.Linear(dim * 3 + statedim * 2, statedim)
self.state2probL = nn.ModuleList([nn.Linear(statedim, 7) for i in
range(0, rel_count)])
def forward(self, ent_vec, bot_word_vec, memory, rel, target, training):
inp = torch.cat([bot_word_vec, ent_vec, memory, target])
outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)
prob = F.softmax(self.state2probL[rel - 1](outp), dim=0)
return outp, prob
class Model(nn.Module):
def __init__(self, lr, dim, statedim, wv, rel_count):
super(Model, self).__init__()
self.dim = dim
self.statedim = statedim
self.rel_count = rel_count
self.topModel = TopModel(dim, statedim, rel_count)
self.botModel = BotModel(dim, statedim, rel_count)
wvTensor = torch.FloatTensor(wv)
self.wordvector = nn.Embedding(wvTensor.size(0), wvTensor.size(1))
self.wordvector.weight = nn.Parameter(wvTensor)
self.relationvector = nn.Embedding(rel_count + 1, dim)
self.entitytypevector = nn.Embedding(7, dim)
self.preLSTML = nn.LSTMCell(dim, dim)
self.preLSTMR = nn.LSTMCell(dim, dim)
self.top2target = nn.Linear(statedim, statedim)
self.top2bot = nn.Linear(statedim, statedim)
self.bot2top = nn.Linear(statedim, statedim)
def sample(self, prob, training, preoptions, position):
if not training:
return torch.max(prob, 0)[1]
elif preoptions is not None:
return autograd.Variable(torch.cuda.LongTensor(1).fill_(
preoptions[position]))
else:
return torch.multinomial(prob, 1)
def forward(self, mode, text, preoptions=None, preactions=None):
textin = torch.cuda.LongTensor(text)
wvs = self.wordvector(autograd.Variable(textin))
top_action, top_actprob = [], []
bot_action, bot_actprob = [], []
training = True if 'test' not in mode else False
prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
front, back = [(0) for i in range(len(text))], [(0) for i in range(
len(text))]
for x in range(len(text)):
prehid, prec = self.preLSTML(wvs[x], (prehid, prec))
front[x] = prehid
prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
for x in range(len(text))[::-1]:
prehid, prec = self.preLSTMR(wvs[x], (prehid, prec))
back[x] = prehid
wordin = []
for x in range(len(text)):
wordin.append(torch.cat([front[x], back[x]]))
mem = autograd.Variable(torch.cuda.FloatTensor(self.statedim).fill_(0))
action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
rel_action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
for x in range(len(text)):
mem, prob = self.topModel(wordin[x], self.relationvector(
rel_action)[0], mem, training)
action = self.sample(prob, training, preoptions, x)
if action.data[0] != 0:
rel_action = action
actprob = prob[action]
top_action.append(action.cpu().data[0])
if not training:
top_actprob.append(actprob.cpu().data[0])
else:
top_actprob.append(actprob)
if 'NER' in mode and action.data[0] > 0:
rel = action.data[0]
target = self.top2target(mem)
actionb = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
actions, actprobs = [], []
mem = self.top2bot(mem)
for y in range(len(text)):
mem, probb = self.botModel(self.entitytypevector(
actionb)[0], wordin[y], mem, rel, target, training)
actionb = self.sample(probb, training, preactions[x] if
preactions is not None else None, y)
actprobb = probb[actionb]
actions.append(actionb.cpu().data[0])
if not training:
actprobs.append(actprobb.cpu().data[0])
else:
actprobs.append(actprobb)
mem = self.bot2top(mem)
bot_action.append(actions)
bot_actprob.append(actprobs)
return top_action, top_actprob, bot_action, bot_actprob
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TopModel(nn.Module):
<|reserved_special_token_0|>
def forward(self, top_word_vec, rel_vec, memory, training):
inp = torch.cat([top_word_vec, rel_vec, memory])
outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)
prob = F.softmax(self.state2prob(outp), dim=0)
return outp, prob
class BotModel(nn.Module):
def __init__(self, dim, statedim, rel_count):
super(BotModel, self).__init__()
self.dim = dim
self.hid2state = nn.Linear(dim * 3 + statedim * 2, statedim)
self.state2probL = nn.ModuleList([nn.Linear(statedim, 7) for i in
range(0, rel_count)])
def forward(self, ent_vec, bot_word_vec, memory, rel, target, training):
inp = torch.cat([bot_word_vec, ent_vec, memory, target])
outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)
prob = F.softmax(self.state2probL[rel - 1](outp), dim=0)
return outp, prob
class Model(nn.Module):
def __init__(self, lr, dim, statedim, wv, rel_count):
super(Model, self).__init__()
self.dim = dim
self.statedim = statedim
self.rel_count = rel_count
self.topModel = TopModel(dim, statedim, rel_count)
self.botModel = BotModel(dim, statedim, rel_count)
wvTensor = torch.FloatTensor(wv)
self.wordvector = nn.Embedding(wvTensor.size(0), wvTensor.size(1))
self.wordvector.weight = nn.Parameter(wvTensor)
self.relationvector = nn.Embedding(rel_count + 1, dim)
self.entitytypevector = nn.Embedding(7, dim)
self.preLSTML = nn.LSTMCell(dim, dim)
self.preLSTMR = nn.LSTMCell(dim, dim)
self.top2target = nn.Linear(statedim, statedim)
self.top2bot = nn.Linear(statedim, statedim)
self.bot2top = nn.Linear(statedim, statedim)
def sample(self, prob, training, preoptions, position):
if not training:
return torch.max(prob, 0)[1]
elif preoptions is not None:
return autograd.Variable(torch.cuda.LongTensor(1).fill_(
preoptions[position]))
else:
return torch.multinomial(prob, 1)
def forward(self, mode, text, preoptions=None, preactions=None):
textin = torch.cuda.LongTensor(text)
wvs = self.wordvector(autograd.Variable(textin))
top_action, top_actprob = [], []
bot_action, bot_actprob = [], []
training = True if 'test' not in mode else False
prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
front, back = [(0) for i in range(len(text))], [(0) for i in range(
len(text))]
for x in range(len(text)):
prehid, prec = self.preLSTML(wvs[x], (prehid, prec))
front[x] = prehid
prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
for x in range(len(text))[::-1]:
prehid, prec = self.preLSTMR(wvs[x], (prehid, prec))
back[x] = prehid
wordin = []
for x in range(len(text)):
wordin.append(torch.cat([front[x], back[x]]))
mem = autograd.Variable(torch.cuda.FloatTensor(self.statedim).fill_(0))
action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
rel_action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
for x in range(len(text)):
mem, prob = self.topModel(wordin[x], self.relationvector(
rel_action)[0], mem, training)
action = self.sample(prob, training, preoptions, x)
if action.data[0] != 0:
rel_action = action
actprob = prob[action]
top_action.append(action.cpu().data[0])
if not training:
top_actprob.append(actprob.cpu().data[0])
else:
top_actprob.append(actprob)
if 'NER' in mode and action.data[0] > 0:
rel = action.data[0]
target = self.top2target(mem)
actionb = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
actions, actprobs = [], []
mem = self.top2bot(mem)
for y in range(len(text)):
mem, probb = self.botModel(self.entitytypevector(
actionb)[0], wordin[y], mem, rel, target, training)
actionb = self.sample(probb, training, preactions[x] if
preactions is not None else None, y)
actprobb = probb[actionb]
actions.append(actionb.cpu().data[0])
if not training:
actprobs.append(actprobb.cpu().data[0])
else:
actprobs.append(actprobb)
mem = self.bot2top(mem)
bot_action.append(actions)
bot_actprob.append(actprobs)
return top_action, top_actprob, bot_action, bot_actprob
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TopModel(nn.Module):
def __init__(self, dim, statedim, rel_count):
super(TopModel, self).__init__()
self.dim = dim
self.hid2state = nn.Linear(dim * 3 + statedim, statedim)
self.state2prob = nn.Linear(statedim, rel_count + 1)
def forward(self, top_word_vec, rel_vec, memory, training):
inp = torch.cat([top_word_vec, rel_vec, memory])
outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)
prob = F.softmax(self.state2prob(outp), dim=0)
return outp, prob
class BotModel(nn.Module):
def __init__(self, dim, statedim, rel_count):
super(BotModel, self).__init__()
self.dim = dim
self.hid2state = nn.Linear(dim * 3 + statedim * 2, statedim)
self.state2probL = nn.ModuleList([nn.Linear(statedim, 7) for i in
range(0, rel_count)])
def forward(self, ent_vec, bot_word_vec, memory, rel, target, training):
inp = torch.cat([bot_word_vec, ent_vec, memory, target])
outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)
prob = F.softmax(self.state2probL[rel - 1](outp), dim=0)
return outp, prob
class Model(nn.Module):
def __init__(self, lr, dim, statedim, wv, rel_count):
super(Model, self).__init__()
self.dim = dim
self.statedim = statedim
self.rel_count = rel_count
self.topModel = TopModel(dim, statedim, rel_count)
self.botModel = BotModel(dim, statedim, rel_count)
wvTensor = torch.FloatTensor(wv)
self.wordvector = nn.Embedding(wvTensor.size(0), wvTensor.size(1))
self.wordvector.weight = nn.Parameter(wvTensor)
self.relationvector = nn.Embedding(rel_count + 1, dim)
self.entitytypevector = nn.Embedding(7, dim)
self.preLSTML = nn.LSTMCell(dim, dim)
self.preLSTMR = nn.LSTMCell(dim, dim)
self.top2target = nn.Linear(statedim, statedim)
self.top2bot = nn.Linear(statedim, statedim)
self.bot2top = nn.Linear(statedim, statedim)
def sample(self, prob, training, preoptions, position):
if not training:
return torch.max(prob, 0)[1]
elif preoptions is not None:
return autograd.Variable(torch.cuda.LongTensor(1).fill_(
preoptions[position]))
else:
return torch.multinomial(prob, 1)
def forward(self, mode, text, preoptions=None, preactions=None):
textin = torch.cuda.LongTensor(text)
wvs = self.wordvector(autograd.Variable(textin))
top_action, top_actprob = [], []
bot_action, bot_actprob = [], []
training = True if 'test' not in mode else False
prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
front, back = [(0) for i in range(len(text))], [(0) for i in range(
len(text))]
for x in range(len(text)):
prehid, prec = self.preLSTML(wvs[x], (prehid, prec))
front[x] = prehid
prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
for x in range(len(text))[::-1]:
prehid, prec = self.preLSTMR(wvs[x], (prehid, prec))
back[x] = prehid
wordin = []
for x in range(len(text)):
wordin.append(torch.cat([front[x], back[x]]))
mem = autograd.Variable(torch.cuda.FloatTensor(self.statedim).fill_(0))
action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
rel_action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
for x in range(len(text)):
mem, prob = self.topModel(wordin[x], self.relationvector(
rel_action)[0], mem, training)
action = self.sample(prob, training, preoptions, x)
if action.data[0] != 0:
rel_action = action
actprob = prob[action]
top_action.append(action.cpu().data[0])
if not training:
top_actprob.append(actprob.cpu().data[0])
else:
top_actprob.append(actprob)
if 'NER' in mode and action.data[0] > 0:
rel = action.data[0]
target = self.top2target(mem)
actionb = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
actions, actprobs = [], []
mem = self.top2bot(mem)
for y in range(len(text)):
mem, probb = self.botModel(self.entitytypevector(
actionb)[0], wordin[y], mem, rel, target, training)
actionb = self.sample(probb, training, preactions[x] if
preactions is not None else None, y)
actprobb = probb[actionb]
actions.append(actionb.cpu().data[0])
if not training:
actprobs.append(actprobb.cpu().data[0])
else:
actprobs.append(actprobb)
mem = self.bot2top(mem)
bot_action.append(actions)
bot_actprob.append(actprobs)
return top_action, top_actprob, bot_action, bot_actprob
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
class TopModel(nn.Module):
def __init__(self, dim, statedim, rel_count):
super(TopModel, self).__init__()
self.dim = dim
self.hid2state = nn.Linear(dim * 3 + statedim, statedim)
self.state2prob = nn.Linear(statedim, rel_count + 1)
def forward(self, top_word_vec, rel_vec, memory, training):
inp = torch.cat([top_word_vec, rel_vec, memory])
outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)
prob = F.softmax(self.state2prob(outp), dim=0)
return outp, prob
class BotModel(nn.Module):
def __init__(self, dim, statedim, rel_count):
super(BotModel, self).__init__()
self.dim = dim
self.hid2state = nn.Linear(dim * 3 + statedim * 2, statedim)
self.state2probL = nn.ModuleList([nn.Linear(statedim, 7) for i in
range(0, rel_count)])
def forward(self, ent_vec, bot_word_vec, memory, rel, target, training):
inp = torch.cat([bot_word_vec, ent_vec, memory, target])
outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)
prob = F.softmax(self.state2probL[rel - 1](outp), dim=0)
return outp, prob
class Model(nn.Module):
def __init__(self, lr, dim, statedim, wv, rel_count):
super(Model, self).__init__()
self.dim = dim
self.statedim = statedim
self.rel_count = rel_count
self.topModel = TopModel(dim, statedim, rel_count)
self.botModel = BotModel(dim, statedim, rel_count)
wvTensor = torch.FloatTensor(wv)
self.wordvector = nn.Embedding(wvTensor.size(0), wvTensor.size(1))
self.wordvector.weight = nn.Parameter(wvTensor)
self.relationvector = nn.Embedding(rel_count + 1, dim)
self.entitytypevector = nn.Embedding(7, dim)
self.preLSTML = nn.LSTMCell(dim, dim)
self.preLSTMR = nn.LSTMCell(dim, dim)
self.top2target = nn.Linear(statedim, statedim)
self.top2bot = nn.Linear(statedim, statedim)
self.bot2top = nn.Linear(statedim, statedim)
def sample(self, prob, training, preoptions, position):
if not training:
return torch.max(prob, 0)[1]
elif preoptions is not None:
return autograd.Variable(torch.cuda.LongTensor(1).fill_(
preoptions[position]))
else:
return torch.multinomial(prob, 1)
def forward(self, mode, text, preoptions=None, preactions=None):
textin = torch.cuda.LongTensor(text)
wvs = self.wordvector(autograd.Variable(textin))
top_action, top_actprob = [], []
bot_action, bot_actprob = [], []
training = True if 'test' not in mode else False
prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
front, back = [(0) for i in range(len(text))], [(0) for i in range(
len(text))]
for x in range(len(text)):
prehid, prec = self.preLSTML(wvs[x], (prehid, prec))
front[x] = prehid
prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))
for x in range(len(text))[::-1]:
prehid, prec = self.preLSTMR(wvs[x], (prehid, prec))
back[x] = prehid
wordin = []
for x in range(len(text)):
wordin.append(torch.cat([front[x], back[x]]))
mem = autograd.Variable(torch.cuda.FloatTensor(self.statedim).fill_(0))
action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
rel_action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
for x in range(len(text)):
mem, prob = self.topModel(wordin[x], self.relationvector(
rel_action)[0], mem, training)
action = self.sample(prob, training, preoptions, x)
if action.data[0] != 0:
rel_action = action
actprob = prob[action]
top_action.append(action.cpu().data[0])
if not training:
top_actprob.append(actprob.cpu().data[0])
else:
top_actprob.append(actprob)
if 'NER' in mode and action.data[0] > 0:
rel = action.data[0]
target = self.top2target(mem)
actionb = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))
actions, actprobs = [], []
mem = self.top2bot(mem)
for y in range(len(text)):
mem, probb = self.botModel(self.entitytypevector(
actionb)[0], wordin[y], mem, rel, target, training)
actionb = self.sample(probb, training, preactions[x] if
preactions is not None else None, y)
actprobb = probb[actionb]
actions.append(actionb.cpu().data[0])
if not training:
actprobs.append(actprobb.cpu().data[0])
else:
actprobs.append(actprobb)
mem = self.bot2top(mem)
bot_action.append(actions)
bot_actprob.append(actprobs)
return top_action, top_actprob, bot_action, bot_actprob
<|reserved_special_token_1|>
############################################################
# Hierarchical Reinforcement Learning for Relation Extraction
# Multiprocessing with CUDA
# Require: PyTorch 0.3.0
# Author: Tianyang Zhang, Ryuichi Takanobu
# E-mail: [email protected], [email protected]
############################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
class TopModel(nn.Module):
def __init__(self, dim, statedim, rel_count):
super(TopModel, self).__init__()
self.dim = dim
self.hid2state = nn.Linear(dim*3 + statedim, statedim)
self.state2prob = nn.Linear(statedim, rel_count+1)
def forward(self, top_word_vec, rel_vec, memory, training):
inp = torch.cat([top_word_vec, rel_vec, memory])
outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)
prob = F.softmax(self.state2prob(outp), dim=0)
return outp, prob
class BotModel(nn.Module):
def __init__(self, dim, statedim, rel_count):
super(BotModel, self).__init__()
self.dim = dim
self.hid2state = nn.Linear(dim*3 + statedim*2, statedim)
self.state2probL = nn.ModuleList([nn.Linear(statedim, 7) for i in range(0, rel_count)])
def forward(self, ent_vec, bot_word_vec, memory, rel, target, training):
inp = torch.cat([bot_word_vec, ent_vec, memory, target])
outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)
prob = F.softmax(self.state2probL[rel-1](outp), dim=0)
return outp, prob
class Model(nn.Module):
def __init__(self, lr, dim, statedim, wv, rel_count):
super(Model, self).__init__()
self.dim = dim
self.statedim = statedim
self.rel_count = rel_count
self.topModel = TopModel(dim, statedim, rel_count)
self.botModel = BotModel(dim, statedim, rel_count)
wvTensor = torch.FloatTensor(wv)
self.wordvector = nn.Embedding(wvTensor.size(0), wvTensor.size(1))
self.wordvector.weight = nn.Parameter(wvTensor)
self.relationvector = nn.Embedding(rel_count+1, dim)
self.entitytypevector = nn.Embedding(7, dim)
self.preLSTML = nn.LSTMCell(dim, dim)
self.preLSTMR = nn.LSTMCell(dim, dim)
self.top2target = nn.Linear(statedim, statedim)
self.top2bot = nn.Linear(statedim, statedim)
self.bot2top = nn.Linear(statedim, statedim)
def sample(self, prob, training, preoptions, position):
if not training:
return torch.max(prob, 0)[1]
elif preoptions is not None:
return autograd.Variable(torch.cuda.LongTensor(1, ).fill_(preoptions[position]))
else:
return torch.multinomial(prob, 1)
def forward(self, mode, text, preoptions=None, preactions=None):
textin = torch.cuda.LongTensor(text)
wvs = self.wordvector(autograd.Variable(textin))
top_action, top_actprob = [], []
bot_action, bot_actprob = [], []
training = True if "test" not in mode else False
#-----------------------------------------------------------------
# Prepare
prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim, ).fill_(0))
prec = autograd.Variable(torch.cuda.FloatTensor(self.dim, ).fill_(0))
front, back = [0 for i in range(len(text))], [0 for i in range(len(text))]
for x in range(len(text)):
prehid, prec = self.preLSTML(wvs[x], (prehid, prec))
front[x] = prehid
prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim, ).fill_(0))
prec = autograd.Variable(torch.cuda.FloatTensor(self.dim, ).fill_(0))
for x in range(len(text))[::-1]:
prehid, prec = self.preLSTMR(wvs[x], (prehid, prec))
back[x] = prehid
wordin = []
for x in range(len(text)):
wordin.append(torch.cat([front[x], back[x]]))
#------------------------------------------------------------------
# First Layer
mem = autograd.Variable(torch.cuda.FloatTensor(self.statedim, ).fill_(0))
action = autograd.Variable(torch.cuda.LongTensor(1, ).fill_(0))
rel_action = autograd.Variable(torch.cuda.LongTensor(1, ).fill_(0))
for x in range(len(text)):
mem, prob = self.topModel(wordin[x],\
self.relationvector(rel_action)[0], mem, training)
action = self.sample(prob, training, preoptions, x)
if action.data[0] != 0:
rel_action = action
actprob = prob[action]
top_action.append(action.cpu().data[0])
if not training:
top_actprob.append(actprob.cpu().data[0])
else:
top_actprob.append(actprob)
#----------------------------------------------------------------
# Second Layer
if "NER" in mode and action.data[0] > 0:
rel = action.data[0]
target = self.top2target(mem)
actionb = autograd.Variable(torch.cuda.LongTensor(1, ).fill_(0))
actions, actprobs = [], []
mem = self.top2bot(mem)
for y in range(len(text)):
mem, probb = self.botModel(\
self.entitytypevector(actionb)[0], wordin[y], \
mem, rel, target, training)
actionb = self.sample(probb, training, preactions[x] if preactions is not None else None, y)
actprobb = probb[actionb]
actions.append(actionb.cpu().data[0])
if not training:
actprobs.append(actprobb.cpu().data[0])
else:
actprobs.append(actprobb)
mem = self.bot2top(mem)
bot_action.append(actions)
bot_actprob.append(actprobs)
return top_action, top_actprob, bot_action, bot_actprob
|
flexible
|
{
"blob_id": "699410536c9a195024c5abbcccc88c17e8e095e3",
"index": 6003,
"step-1": "<mask token>\n\n\nclass BotModel(nn.Module):\n\n def __init__(self, dim, statedim, rel_count):\n super(BotModel, self).__init__()\n self.dim = dim\n self.hid2state = nn.Linear(dim * 3 + statedim * 2, statedim)\n self.state2probL = nn.ModuleList([nn.Linear(statedim, 7) for i in\n range(0, rel_count)])\n\n def forward(self, ent_vec, bot_word_vec, memory, rel, target, training):\n inp = torch.cat([bot_word_vec, ent_vec, memory, target])\n outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)\n prob = F.softmax(self.state2probL[rel - 1](outp), dim=0)\n return outp, prob\n\n\nclass Model(nn.Module):\n\n def __init__(self, lr, dim, statedim, wv, rel_count):\n super(Model, self).__init__()\n self.dim = dim\n self.statedim = statedim\n self.rel_count = rel_count\n self.topModel = TopModel(dim, statedim, rel_count)\n self.botModel = BotModel(dim, statedim, rel_count)\n wvTensor = torch.FloatTensor(wv)\n self.wordvector = nn.Embedding(wvTensor.size(0), wvTensor.size(1))\n self.wordvector.weight = nn.Parameter(wvTensor)\n self.relationvector = nn.Embedding(rel_count + 1, dim)\n self.entitytypevector = nn.Embedding(7, dim)\n self.preLSTML = nn.LSTMCell(dim, dim)\n self.preLSTMR = nn.LSTMCell(dim, dim)\n self.top2target = nn.Linear(statedim, statedim)\n self.top2bot = nn.Linear(statedim, statedim)\n self.bot2top = nn.Linear(statedim, statedim)\n\n def sample(self, prob, training, preoptions, position):\n if not training:\n return torch.max(prob, 0)[1]\n elif preoptions is not None:\n return autograd.Variable(torch.cuda.LongTensor(1).fill_(\n preoptions[position]))\n else:\n return torch.multinomial(prob, 1)\n\n def forward(self, mode, text, preoptions=None, preactions=None):\n textin = torch.cuda.LongTensor(text)\n wvs = self.wordvector(autograd.Variable(textin))\n top_action, top_actprob = [], []\n bot_action, bot_actprob = [], []\n training = True if 'test' not in mode else False\n prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n front, back = [(0) for i in range(len(text))], [(0) for i in range(\n len(text))]\n for x in range(len(text)):\n prehid, prec = self.preLSTML(wvs[x], (prehid, prec))\n front[x] = prehid\n prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n for x in range(len(text))[::-1]:\n prehid, prec = self.preLSTMR(wvs[x], (prehid, prec))\n back[x] = prehid\n wordin = []\n for x in range(len(text)):\n wordin.append(torch.cat([front[x], back[x]]))\n mem = autograd.Variable(torch.cuda.FloatTensor(self.statedim).fill_(0))\n action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n rel_action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n for x in range(len(text)):\n mem, prob = self.topModel(wordin[x], self.relationvector(\n rel_action)[0], mem, training)\n action = self.sample(prob, training, preoptions, x)\n if action.data[0] != 0:\n rel_action = action\n actprob = prob[action]\n top_action.append(action.cpu().data[0])\n if not training:\n top_actprob.append(actprob.cpu().data[0])\n else:\n top_actprob.append(actprob)\n if 'NER' in mode and action.data[0] > 0:\n rel = action.data[0]\n target = self.top2target(mem)\n actionb = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n actions, actprobs = [], []\n mem = self.top2bot(mem)\n for y in range(len(text)):\n mem, probb = self.botModel(self.entitytypevector(\n actionb)[0], wordin[y], mem, rel, target, training)\n actionb = self.sample(probb, training, preactions[x] if\n preactions is not None else None, y)\n actprobb = probb[actionb]\n actions.append(actionb.cpu().data[0])\n if not training:\n actprobs.append(actprobb.cpu().data[0])\n else:\n actprobs.append(actprobb)\n mem = self.bot2top(mem)\n bot_action.append(actions)\n bot_actprob.append(actprobs)\n return top_action, top_actprob, bot_action, bot_actprob\n",
"step-2": "<mask token>\n\n\nclass TopModel(nn.Module):\n <mask token>\n\n def forward(self, top_word_vec, rel_vec, memory, training):\n inp = torch.cat([top_word_vec, rel_vec, memory])\n outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)\n prob = F.softmax(self.state2prob(outp), dim=0)\n return outp, prob\n\n\nclass BotModel(nn.Module):\n\n def __init__(self, dim, statedim, rel_count):\n super(BotModel, self).__init__()\n self.dim = dim\n self.hid2state = nn.Linear(dim * 3 + statedim * 2, statedim)\n self.state2probL = nn.ModuleList([nn.Linear(statedim, 7) for i in\n range(0, rel_count)])\n\n def forward(self, ent_vec, bot_word_vec, memory, rel, target, training):\n inp = torch.cat([bot_word_vec, ent_vec, memory, target])\n outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)\n prob = F.softmax(self.state2probL[rel - 1](outp), dim=0)\n return outp, prob\n\n\nclass Model(nn.Module):\n\n def __init__(self, lr, dim, statedim, wv, rel_count):\n super(Model, self).__init__()\n self.dim = dim\n self.statedim = statedim\n self.rel_count = rel_count\n self.topModel = TopModel(dim, statedim, rel_count)\n self.botModel = BotModel(dim, statedim, rel_count)\n wvTensor = torch.FloatTensor(wv)\n self.wordvector = nn.Embedding(wvTensor.size(0), wvTensor.size(1))\n self.wordvector.weight = nn.Parameter(wvTensor)\n self.relationvector = nn.Embedding(rel_count + 1, dim)\n self.entitytypevector = nn.Embedding(7, dim)\n self.preLSTML = nn.LSTMCell(dim, dim)\n self.preLSTMR = nn.LSTMCell(dim, dim)\n self.top2target = nn.Linear(statedim, statedim)\n self.top2bot = nn.Linear(statedim, statedim)\n self.bot2top = nn.Linear(statedim, statedim)\n\n def sample(self, prob, training, preoptions, position):\n if not training:\n return torch.max(prob, 0)[1]\n elif preoptions is not None:\n return autograd.Variable(torch.cuda.LongTensor(1).fill_(\n preoptions[position]))\n else:\n return torch.multinomial(prob, 1)\n\n def forward(self, mode, text, preoptions=None, preactions=None):\n textin = torch.cuda.LongTensor(text)\n wvs = self.wordvector(autograd.Variable(textin))\n top_action, top_actprob = [], []\n bot_action, bot_actprob = [], []\n training = True if 'test' not in mode else False\n prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n front, back = [(0) for i in range(len(text))], [(0) for i in range(\n len(text))]\n for x in range(len(text)):\n prehid, prec = self.preLSTML(wvs[x], (prehid, prec))\n front[x] = prehid\n prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n for x in range(len(text))[::-1]:\n prehid, prec = self.preLSTMR(wvs[x], (prehid, prec))\n back[x] = prehid\n wordin = []\n for x in range(len(text)):\n wordin.append(torch.cat([front[x], back[x]]))\n mem = autograd.Variable(torch.cuda.FloatTensor(self.statedim).fill_(0))\n action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n rel_action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n for x in range(len(text)):\n mem, prob = self.topModel(wordin[x], self.relationvector(\n rel_action)[0], mem, training)\n action = self.sample(prob, training, preoptions, x)\n if action.data[0] != 0:\n rel_action = action\n actprob = prob[action]\n top_action.append(action.cpu().data[0])\n if not training:\n top_actprob.append(actprob.cpu().data[0])\n else:\n top_actprob.append(actprob)\n if 'NER' in mode and action.data[0] > 0:\n rel = action.data[0]\n target = self.top2target(mem)\n actionb = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n actions, actprobs = [], []\n mem = self.top2bot(mem)\n for y in range(len(text)):\n mem, probb = self.botModel(self.entitytypevector(\n actionb)[0], wordin[y], mem, rel, target, training)\n actionb = self.sample(probb, training, preactions[x] if\n preactions is not None else None, y)\n actprobb = probb[actionb]\n actions.append(actionb.cpu().data[0])\n if not training:\n actprobs.append(actprobb.cpu().data[0])\n else:\n actprobs.append(actprobb)\n mem = self.bot2top(mem)\n bot_action.append(actions)\n bot_actprob.append(actprobs)\n return top_action, top_actprob, bot_action, bot_actprob\n",
"step-3": "<mask token>\n\n\nclass TopModel(nn.Module):\n\n def __init__(self, dim, statedim, rel_count):\n super(TopModel, self).__init__()\n self.dim = dim\n self.hid2state = nn.Linear(dim * 3 + statedim, statedim)\n self.state2prob = nn.Linear(statedim, rel_count + 1)\n\n def forward(self, top_word_vec, rel_vec, memory, training):\n inp = torch.cat([top_word_vec, rel_vec, memory])\n outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)\n prob = F.softmax(self.state2prob(outp), dim=0)\n return outp, prob\n\n\nclass BotModel(nn.Module):\n\n def __init__(self, dim, statedim, rel_count):\n super(BotModel, self).__init__()\n self.dim = dim\n self.hid2state = nn.Linear(dim * 3 + statedim * 2, statedim)\n self.state2probL = nn.ModuleList([nn.Linear(statedim, 7) for i in\n range(0, rel_count)])\n\n def forward(self, ent_vec, bot_word_vec, memory, rel, target, training):\n inp = torch.cat([bot_word_vec, ent_vec, memory, target])\n outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)\n prob = F.softmax(self.state2probL[rel - 1](outp), dim=0)\n return outp, prob\n\n\nclass Model(nn.Module):\n\n def __init__(self, lr, dim, statedim, wv, rel_count):\n super(Model, self).__init__()\n self.dim = dim\n self.statedim = statedim\n self.rel_count = rel_count\n self.topModel = TopModel(dim, statedim, rel_count)\n self.botModel = BotModel(dim, statedim, rel_count)\n wvTensor = torch.FloatTensor(wv)\n self.wordvector = nn.Embedding(wvTensor.size(0), wvTensor.size(1))\n self.wordvector.weight = nn.Parameter(wvTensor)\n self.relationvector = nn.Embedding(rel_count + 1, dim)\n self.entitytypevector = nn.Embedding(7, dim)\n self.preLSTML = nn.LSTMCell(dim, dim)\n self.preLSTMR = nn.LSTMCell(dim, dim)\n self.top2target = nn.Linear(statedim, statedim)\n self.top2bot = nn.Linear(statedim, statedim)\n self.bot2top = nn.Linear(statedim, statedim)\n\n def sample(self, prob, training, preoptions, position):\n if not training:\n return torch.max(prob, 0)[1]\n elif preoptions is not None:\n return autograd.Variable(torch.cuda.LongTensor(1).fill_(\n preoptions[position]))\n else:\n return torch.multinomial(prob, 1)\n\n def forward(self, mode, text, preoptions=None, preactions=None):\n textin = torch.cuda.LongTensor(text)\n wvs = self.wordvector(autograd.Variable(textin))\n top_action, top_actprob = [], []\n bot_action, bot_actprob = [], []\n training = True if 'test' not in mode else False\n prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n front, back = [(0) for i in range(len(text))], [(0) for i in range(\n len(text))]\n for x in range(len(text)):\n prehid, prec = self.preLSTML(wvs[x], (prehid, prec))\n front[x] = prehid\n prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n for x in range(len(text))[::-1]:\n prehid, prec = self.preLSTMR(wvs[x], (prehid, prec))\n back[x] = prehid\n wordin = []\n for x in range(len(text)):\n wordin.append(torch.cat([front[x], back[x]]))\n mem = autograd.Variable(torch.cuda.FloatTensor(self.statedim).fill_(0))\n action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n rel_action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n for x in range(len(text)):\n mem, prob = self.topModel(wordin[x], self.relationvector(\n rel_action)[0], mem, training)\n action = self.sample(prob, training, preoptions, x)\n if action.data[0] != 0:\n rel_action = action\n actprob = prob[action]\n top_action.append(action.cpu().data[0])\n if not training:\n top_actprob.append(actprob.cpu().data[0])\n else:\n top_actprob.append(actprob)\n if 'NER' in mode and action.data[0] > 0:\n rel = action.data[0]\n target = self.top2target(mem)\n actionb = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n actions, actprobs = [], []\n mem = self.top2bot(mem)\n for y in range(len(text)):\n mem, probb = self.botModel(self.entitytypevector(\n actionb)[0], wordin[y], mem, rel, target, training)\n actionb = self.sample(probb, training, preactions[x] if\n preactions is not None else None, y)\n actprobb = probb[actionb]\n actions.append(actionb.cpu().data[0])\n if not training:\n actprobs.append(actprobb.cpu().data[0])\n else:\n actprobs.append(actprobb)\n mem = self.bot2top(mem)\n bot_action.append(actions)\n bot_actprob.append(actprobs)\n return top_action, top_actprob, bot_action, bot_actprob\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\n\n\nclass TopModel(nn.Module):\n\n def __init__(self, dim, statedim, rel_count):\n super(TopModel, self).__init__()\n self.dim = dim\n self.hid2state = nn.Linear(dim * 3 + statedim, statedim)\n self.state2prob = nn.Linear(statedim, rel_count + 1)\n\n def forward(self, top_word_vec, rel_vec, memory, training):\n inp = torch.cat([top_word_vec, rel_vec, memory])\n outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)\n prob = F.softmax(self.state2prob(outp), dim=0)\n return outp, prob\n\n\nclass BotModel(nn.Module):\n\n def __init__(self, dim, statedim, rel_count):\n super(BotModel, self).__init__()\n self.dim = dim\n self.hid2state = nn.Linear(dim * 3 + statedim * 2, statedim)\n self.state2probL = nn.ModuleList([nn.Linear(statedim, 7) for i in\n range(0, rel_count)])\n\n def forward(self, ent_vec, bot_word_vec, memory, rel, target, training):\n inp = torch.cat([bot_word_vec, ent_vec, memory, target])\n outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)\n prob = F.softmax(self.state2probL[rel - 1](outp), dim=0)\n return outp, prob\n\n\nclass Model(nn.Module):\n\n def __init__(self, lr, dim, statedim, wv, rel_count):\n super(Model, self).__init__()\n self.dim = dim\n self.statedim = statedim\n self.rel_count = rel_count\n self.topModel = TopModel(dim, statedim, rel_count)\n self.botModel = BotModel(dim, statedim, rel_count)\n wvTensor = torch.FloatTensor(wv)\n self.wordvector = nn.Embedding(wvTensor.size(0), wvTensor.size(1))\n self.wordvector.weight = nn.Parameter(wvTensor)\n self.relationvector = nn.Embedding(rel_count + 1, dim)\n self.entitytypevector = nn.Embedding(7, dim)\n self.preLSTML = nn.LSTMCell(dim, dim)\n self.preLSTMR = nn.LSTMCell(dim, dim)\n self.top2target = nn.Linear(statedim, statedim)\n self.top2bot = nn.Linear(statedim, statedim)\n self.bot2top = nn.Linear(statedim, statedim)\n\n def sample(self, prob, training, preoptions, position):\n if not training:\n return torch.max(prob, 0)[1]\n elif preoptions is not None:\n return autograd.Variable(torch.cuda.LongTensor(1).fill_(\n preoptions[position]))\n else:\n return torch.multinomial(prob, 1)\n\n def forward(self, mode, text, preoptions=None, preactions=None):\n textin = torch.cuda.LongTensor(text)\n wvs = self.wordvector(autograd.Variable(textin))\n top_action, top_actprob = [], []\n bot_action, bot_actprob = [], []\n training = True if 'test' not in mode else False\n prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n front, back = [(0) for i in range(len(text))], [(0) for i in range(\n len(text))]\n for x in range(len(text)):\n prehid, prec = self.preLSTML(wvs[x], (prehid, prec))\n front[x] = prehid\n prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n prec = autograd.Variable(torch.cuda.FloatTensor(self.dim).fill_(0))\n for x in range(len(text))[::-1]:\n prehid, prec = self.preLSTMR(wvs[x], (prehid, prec))\n back[x] = prehid\n wordin = []\n for x in range(len(text)):\n wordin.append(torch.cat([front[x], back[x]]))\n mem = autograd.Variable(torch.cuda.FloatTensor(self.statedim).fill_(0))\n action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n rel_action = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n for x in range(len(text)):\n mem, prob = self.topModel(wordin[x], self.relationvector(\n rel_action)[0], mem, training)\n action = self.sample(prob, training, preoptions, x)\n if action.data[0] != 0:\n rel_action = action\n actprob = prob[action]\n top_action.append(action.cpu().data[0])\n if not training:\n top_actprob.append(actprob.cpu().data[0])\n else:\n top_actprob.append(actprob)\n if 'NER' in mode and action.data[0] > 0:\n rel = action.data[0]\n target = self.top2target(mem)\n actionb = autograd.Variable(torch.cuda.LongTensor(1).fill_(0))\n actions, actprobs = [], []\n mem = self.top2bot(mem)\n for y in range(len(text)):\n mem, probb = self.botModel(self.entitytypevector(\n actionb)[0], wordin[y], mem, rel, target, training)\n actionb = self.sample(probb, training, preactions[x] if\n preactions is not None else None, y)\n actprobb = probb[actionb]\n actions.append(actionb.cpu().data[0])\n if not training:\n actprobs.append(actprobb.cpu().data[0])\n else:\n actprobs.append(actprobb)\n mem = self.bot2top(mem)\n bot_action.append(actions)\n bot_actprob.append(actprobs)\n return top_action, top_actprob, bot_action, bot_actprob\n",
"step-5": "############################################################\n# Hierarchical Reinforcement Learning for Relation Extraction\n# Multiprocessing with CUDA\n# Require: PyTorch 0.3.0\n# Author: Tianyang Zhang, Ryuichi Takanobu\n# E-mail: [email protected], [email protected]\n############################################################\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\n\nclass TopModel(nn.Module):\n def __init__(self, dim, statedim, rel_count):\n super(TopModel, self).__init__()\n self.dim = dim\n self.hid2state = nn.Linear(dim*3 + statedim, statedim)\n self.state2prob = nn.Linear(statedim, rel_count+1)\n\n def forward(self, top_word_vec, rel_vec, memory, training): \n inp = torch.cat([top_word_vec, rel_vec, memory])\n outp = F.dropout(F.tanh(self.hid2state(inp)), training=training) \n prob = F.softmax(self.state2prob(outp), dim=0)\n return outp, prob \n\nclass BotModel(nn.Module):\n def __init__(self, dim, statedim, rel_count):\n super(BotModel, self).__init__()\n self.dim = dim\n self.hid2state = nn.Linear(dim*3 + statedim*2, statedim)\n self.state2probL = nn.ModuleList([nn.Linear(statedim, 7) for i in range(0, rel_count)])\n\n def forward(self, ent_vec, bot_word_vec, memory, rel, target, training): \n inp = torch.cat([bot_word_vec, ent_vec, memory, target])\n outp = F.dropout(F.tanh(self.hid2state(inp)), training=training)\n prob = F.softmax(self.state2probL[rel-1](outp), dim=0)\n return outp, prob \n\nclass Model(nn.Module):\n def __init__(self, lr, dim, statedim, wv, rel_count):\n super(Model, self).__init__()\n self.dim = dim\n self.statedim = statedim\n self.rel_count = rel_count\n self.topModel = TopModel(dim, statedim, rel_count)\n self.botModel = BotModel(dim, statedim, rel_count)\n wvTensor = torch.FloatTensor(wv)\n self.wordvector = nn.Embedding(wvTensor.size(0), wvTensor.size(1))\n self.wordvector.weight = nn.Parameter(wvTensor)\n self.relationvector = nn.Embedding(rel_count+1, dim)\n self.entitytypevector = nn.Embedding(7, dim)\n self.preLSTML = nn.LSTMCell(dim, dim)\n self.preLSTMR = nn.LSTMCell(dim, dim)\n self.top2target = nn.Linear(statedim, statedim)\n self.top2bot = nn.Linear(statedim, statedim)\n self.bot2top = nn.Linear(statedim, statedim)\n \n def sample(self, prob, training, preoptions, position):\n if not training:\n return torch.max(prob, 0)[1]\n elif preoptions is not None:\n return autograd.Variable(torch.cuda.LongTensor(1, ).fill_(preoptions[position]))\n else:\n return torch.multinomial(prob, 1)\n\n def forward(self, mode, text, preoptions=None, preactions=None):\n textin = torch.cuda.LongTensor(text)\n wvs = self.wordvector(autograd.Variable(textin))\n top_action, top_actprob = [], []\n bot_action, bot_actprob = [], [] \n training = True if \"test\" not in mode else False\n\n #-----------------------------------------------------------------\n # Prepare\n prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim, ).fill_(0))\n prec = autograd.Variable(torch.cuda.FloatTensor(self.dim, ).fill_(0))\n front, back = [0 for i in range(len(text))], [0 for i in range(len(text))]\n for x in range(len(text)):\n prehid, prec = self.preLSTML(wvs[x], (prehid, prec))\n front[x] = prehid\n prehid = autograd.Variable(torch.cuda.FloatTensor(self.dim, ).fill_(0))\n prec = autograd.Variable(torch.cuda.FloatTensor(self.dim, ).fill_(0))\n for x in range(len(text))[::-1]:\n prehid, prec = self.preLSTMR(wvs[x], (prehid, prec))\n back[x] = prehid\n wordin = []\n for x in range(len(text)):\n wordin.append(torch.cat([front[x], back[x]]))\n #------------------------------------------------------------------\n # First Layer\n mem = autograd.Variable(torch.cuda.FloatTensor(self.statedim, ).fill_(0))\n action = autograd.Variable(torch.cuda.LongTensor(1, ).fill_(0))\n rel_action = autograd.Variable(torch.cuda.LongTensor(1, ).fill_(0)) \n for x in range(len(text)): \n mem, prob = self.topModel(wordin[x],\\\n self.relationvector(rel_action)[0], mem, training)\n action = self.sample(prob, training, preoptions, x)\n if action.data[0] != 0: \n rel_action = action\n actprob = prob[action]\n top_action.append(action.cpu().data[0])\n if not training:\n top_actprob.append(actprob.cpu().data[0])\n else:\n top_actprob.append(actprob)\n\n #----------------------------------------------------------------\n # Second Layer\n if \"NER\" in mode and action.data[0] > 0:\n rel = action.data[0]\n target = self.top2target(mem)\n actionb = autograd.Variable(torch.cuda.LongTensor(1, ).fill_(0))\n actions, actprobs = [], []\n mem = self.top2bot(mem)\n for y in range(len(text)):\n mem, probb = self.botModel(\\\n self.entitytypevector(actionb)[0], wordin[y], \\\n mem, rel, target, training)\n actionb = self.sample(probb, training, preactions[x] if preactions is not None else None, y)\n actprobb = probb[actionb]\n actions.append(actionb.cpu().data[0])\n if not training:\n actprobs.append(actprobb.cpu().data[0]) \n else:\n actprobs.append(actprobb)\n mem = self.bot2top(mem)\n bot_action.append(actions)\n bot_actprob.append(actprobs)\n return top_action, top_actprob, bot_action, bot_actprob\n\n",
"step-ids": [
7,
9,
10,
11,
12
]
}
|
[
7,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
<|reserved_special_token_0|>
def check_sig(payload, sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == 'Algorand':
print('Algorand')
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print('Algo sig verifies!')
result = True
elif platform == 'Ethereum':
print('Ethereum')
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json
)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig
) == pk:
print('Eth sig verifies!')
result = True
return result, payload_json
def fill_order():
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
if (existing_order.buy_currency == current_order.sell_currency and
existing_order.sell_currency == current_order.buy_currency and
existing_order.sell_amount / existing_order.buy_amount >=
current_order.buy_amount / current_order.sell_amount and
existing_order.counterparty_id == None):
order_list.append(existing_order)
if len(order_list) > 0:
match_order = order_list[0]
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
if current_order.sell_amount < match_order.buy_amount:
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = (match_order.sell_amount / match_order.
buy_amount)
sell_amount_new_match = diff * exchange_rate_match
new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=
match_order.receiver_pk, buy_currency=match_order.
buy_currency, sell_currency=match_order.sell_currency,
buy_amount=diff, sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print('M')
fill_order()
if current_order.buy_amount > match_order.sell_amount:
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = (current_order.buy_amount /
current_order.sell_amount)
sell_amount_new_current = diff / exchange_rate_current
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk, buy_currency=
current_order.buy_currency, sell_currency=current_order.
sell_currency, buy_amount=diff, sell_amount=
sell_amount_new_current, creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print('C')
fill_order()
<|reserved_special_token_0|>
def row2dict(row):
return {c.name: getattr(row, c.name) for c in row.__table__.columns}
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
<|reserved_special_token_0|>
@app.route('/trade', methods=['POST'])
def trade():
print('In trade endpoint')
if request.method == 'POST':
print('--------- trade ---------')
content = request.get_json(silent=True)
print(f'content = {json.dumps(content)}')
columns = ['sender_pk', 'receiver_pk', 'buy_currency',
'sell_currency', 'buy_amount', 'sell_amount', 'platform']
fields = ['sig', 'payload']
for field in fields:
if not field in content.keys():
print(f'{field} not received by Trade')
print(json.dumps(content))
log_message(content)
return jsonify(False)
for column in columns:
if not column in content['payload'].keys():
print(f'{column} not received by Trade')
print(json.dumps(content))
log_message(content)
return jsonify(False)
sig = content['sig']
payload = content['payload']
platform = payload['platform']
platforms = ['Algorand', 'Ethereum']
if not platform in platforms:
print('input platform is not Algorand or Ethereum')
return jsonify(False)
check_result = check_sig(payload, sig)
result = check_result[0]
payload_json = check_result[1]
if result is False:
print('signature does NOT verify')
log_message(payload_json)
return jsonify(result)
if result is True:
print('signature verifies')
create_session()
order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=
payload['receiver_pk'], buy_currency=payload['buy_currency'
], sell_currency=payload['sell_currency'], buy_amount=
payload['buy_amount'], sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
print('--------- order_book ---------')
create_session()
order_dict_list = [row2dict(order) for order in g.session.query(Order).
all()]
result = {'data': order_dict_list}
print('order book length: ')
print(len(order_dict_list))
shutdown_session()
return jsonify(result)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
@app.teardown_appcontext
def shutdown_session(exception=None):
sys.stdout.flush()
g.session.commit()
g.session.remove()
<|reserved_special_token_0|>
def check_sig(payload, sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == 'Algorand':
print('Algorand')
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print('Algo sig verifies!')
result = True
elif platform == 'Ethereum':
print('Ethereum')
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json
)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig
) == pk:
print('Eth sig verifies!')
result = True
return result, payload_json
def fill_order():
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
if (existing_order.buy_currency == current_order.sell_currency and
existing_order.sell_currency == current_order.buy_currency and
existing_order.sell_amount / existing_order.buy_amount >=
current_order.buy_amount / current_order.sell_amount and
existing_order.counterparty_id == None):
order_list.append(existing_order)
if len(order_list) > 0:
match_order = order_list[0]
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
if current_order.sell_amount < match_order.buy_amount:
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = (match_order.sell_amount / match_order.
buy_amount)
sell_amount_new_match = diff * exchange_rate_match
new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=
match_order.receiver_pk, buy_currency=match_order.
buy_currency, sell_currency=match_order.sell_currency,
buy_amount=diff, sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print('M')
fill_order()
if current_order.buy_amount > match_order.sell_amount:
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = (current_order.buy_amount /
current_order.sell_amount)
sell_amount_new_current = diff / exchange_rate_current
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk, buy_currency=
current_order.buy_currency, sell_currency=current_order.
sell_currency, buy_amount=diff, sell_amount=
sell_amount_new_current, creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print('C')
fill_order()
<|reserved_special_token_0|>
def row2dict(row):
return {c.name: getattr(row, c.name) for c in row.__table__.columns}
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
<|reserved_special_token_0|>
@app.route('/trade', methods=['POST'])
def trade():
print('In trade endpoint')
if request.method == 'POST':
print('--------- trade ---------')
content = request.get_json(silent=True)
print(f'content = {json.dumps(content)}')
columns = ['sender_pk', 'receiver_pk', 'buy_currency',
'sell_currency', 'buy_amount', 'sell_amount', 'platform']
fields = ['sig', 'payload']
for field in fields:
if not field in content.keys():
print(f'{field} not received by Trade')
print(json.dumps(content))
log_message(content)
return jsonify(False)
for column in columns:
if not column in content['payload'].keys():
print(f'{column} not received by Trade')
print(json.dumps(content))
log_message(content)
return jsonify(False)
sig = content['sig']
payload = content['payload']
platform = payload['platform']
platforms = ['Algorand', 'Ethereum']
if not platform in platforms:
print('input platform is not Algorand or Ethereum')
return jsonify(False)
check_result = check_sig(payload, sig)
result = check_result[0]
payload_json = check_result[1]
if result is False:
print('signature does NOT verify')
log_message(payload_json)
return jsonify(result)
if result is True:
print('signature verifies')
create_session()
order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=
payload['receiver_pk'], buy_currency=payload['buy_currency'
], sell_currency=payload['sell_currency'], buy_amount=
payload['buy_amount'], sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
print('--------- order_book ---------')
create_session()
order_dict_list = [row2dict(order) for order in g.session.query(Order).
all()]
result = {'data': order_dict_list}
print('order book length: ')
print(len(order_dict_list))
shutdown_session()
return jsonify(result)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
engine = create_engine('sqlite:///orders.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
app = Flask(__name__)
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
@app.teardown_appcontext
def shutdown_session(exception=None):
sys.stdout.flush()
g.session.commit()
g.session.remove()
<|reserved_special_token_0|>
def check_sig(payload, sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == 'Algorand':
print('Algorand')
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print('Algo sig verifies!')
result = True
elif platform == 'Ethereum':
print('Ethereum')
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json
)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig
) == pk:
print('Eth sig verifies!')
result = True
return result, payload_json
def fill_order():
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
if (existing_order.buy_currency == current_order.sell_currency and
existing_order.sell_currency == current_order.buy_currency and
existing_order.sell_amount / existing_order.buy_amount >=
current_order.buy_amount / current_order.sell_amount and
existing_order.counterparty_id == None):
order_list.append(existing_order)
if len(order_list) > 0:
match_order = order_list[0]
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
if current_order.sell_amount < match_order.buy_amount:
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = (match_order.sell_amount / match_order.
buy_amount)
sell_amount_new_match = diff * exchange_rate_match
new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=
match_order.receiver_pk, buy_currency=match_order.
buy_currency, sell_currency=match_order.sell_currency,
buy_amount=diff, sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print('M')
fill_order()
if current_order.buy_amount > match_order.sell_amount:
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = (current_order.buy_amount /
current_order.sell_amount)
sell_amount_new_current = diff / exchange_rate_current
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk, buy_currency=
current_order.buy_currency, sell_currency=current_order.
sell_currency, buy_amount=diff, sell_amount=
sell_amount_new_current, creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print('C')
fill_order()
def log_message(d):
create_session()
order_obj = Log(message=d)
g.session.add(order_obj)
shutdown_session()
def row2dict(row):
return {c.name: getattr(row, c.name) for c in row.__table__.columns}
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
<|reserved_special_token_0|>
@app.route('/trade', methods=['POST'])
def trade():
print('In trade endpoint')
if request.method == 'POST':
print('--------- trade ---------')
content = request.get_json(silent=True)
print(f'content = {json.dumps(content)}')
columns = ['sender_pk', 'receiver_pk', 'buy_currency',
'sell_currency', 'buy_amount', 'sell_amount', 'platform']
fields = ['sig', 'payload']
for field in fields:
if not field in content.keys():
print(f'{field} not received by Trade')
print(json.dumps(content))
log_message(content)
return jsonify(False)
for column in columns:
if not column in content['payload'].keys():
print(f'{column} not received by Trade')
print(json.dumps(content))
log_message(content)
return jsonify(False)
sig = content['sig']
payload = content['payload']
platform = payload['platform']
platforms = ['Algorand', 'Ethereum']
if not platform in platforms:
print('input platform is not Algorand or Ethereum')
return jsonify(False)
check_result = check_sig(payload, sig)
result = check_result[0]
payload_json = check_result[1]
if result is False:
print('signature does NOT verify')
log_message(payload_json)
return jsonify(result)
if result is True:
print('signature verifies')
create_session()
order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=
payload['receiver_pk'], buy_currency=payload['buy_currency'
], sell_currency=payload['sell_currency'], buy_amount=
payload['buy_amount'], sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
print('--------- order_book ---------')
create_session()
order_dict_list = [row2dict(order) for order in g.session.query(Order).
all()]
result = {'data': order_dict_list}
print('order book length: ')
print(len(order_dict_list))
shutdown_session()
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
<|reserved_special_token_1|>
from flask import Flask, request, g
from flask_restful import Resource, Api
from sqlalchemy import create_engine
from flask import jsonify
import json
import eth_account
import algosdk
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import load_only
from datetime import datetime
import sys
from models import Base, Order, Log
engine = create_engine('sqlite:///orders.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
app = Flask(__name__)
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
@app.teardown_appcontext
def shutdown_session(exception=None):
sys.stdout.flush()
g.session.commit()
g.session.remove()
<|reserved_special_token_0|>
def check_sig(payload, sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == 'Algorand':
print('Algorand')
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print('Algo sig verifies!')
result = True
elif platform == 'Ethereum':
print('Ethereum')
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json
)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig
) == pk:
print('Eth sig verifies!')
result = True
return result, payload_json
def fill_order():
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
if (existing_order.buy_currency == current_order.sell_currency and
existing_order.sell_currency == current_order.buy_currency and
existing_order.sell_amount / existing_order.buy_amount >=
current_order.buy_amount / current_order.sell_amount and
existing_order.counterparty_id == None):
order_list.append(existing_order)
if len(order_list) > 0:
match_order = order_list[0]
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
if current_order.sell_amount < match_order.buy_amount:
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = (match_order.sell_amount / match_order.
buy_amount)
sell_amount_new_match = diff * exchange_rate_match
new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=
match_order.receiver_pk, buy_currency=match_order.
buy_currency, sell_currency=match_order.sell_currency,
buy_amount=diff, sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print('M')
fill_order()
if current_order.buy_amount > match_order.sell_amount:
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = (current_order.buy_amount /
current_order.sell_amount)
sell_amount_new_current = diff / exchange_rate_current
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk, buy_currency=
current_order.buy_currency, sell_currency=current_order.
sell_currency, buy_amount=diff, sell_amount=
sell_amount_new_current, creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print('C')
fill_order()
def log_message(d):
create_session()
order_obj = Log(message=d)
g.session.add(order_obj)
shutdown_session()
def row2dict(row):
return {c.name: getattr(row, c.name) for c in row.__table__.columns}
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
<|reserved_special_token_0|>
@app.route('/trade', methods=['POST'])
def trade():
print('In trade endpoint')
if request.method == 'POST':
print('--------- trade ---------')
content = request.get_json(silent=True)
print(f'content = {json.dumps(content)}')
columns = ['sender_pk', 'receiver_pk', 'buy_currency',
'sell_currency', 'buy_amount', 'sell_amount', 'platform']
fields = ['sig', 'payload']
for field in fields:
if not field in content.keys():
print(f'{field} not received by Trade')
print(json.dumps(content))
log_message(content)
return jsonify(False)
for column in columns:
if not column in content['payload'].keys():
print(f'{column} not received by Trade')
print(json.dumps(content))
log_message(content)
return jsonify(False)
sig = content['sig']
payload = content['payload']
platform = payload['platform']
platforms = ['Algorand', 'Ethereum']
if not platform in platforms:
print('input platform is not Algorand or Ethereum')
return jsonify(False)
check_result = check_sig(payload, sig)
result = check_result[0]
payload_json = check_result[1]
if result is False:
print('signature does NOT verify')
log_message(payload_json)
return jsonify(result)
if result is True:
print('signature verifies')
create_session()
order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=
payload['receiver_pk'], buy_currency=payload['buy_currency'
], sell_currency=payload['sell_currency'], buy_amount=
payload['buy_amount'], sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
print('--------- order_book ---------')
create_session()
order_dict_list = [row2dict(order) for order in g.session.query(Order).
all()]
result = {'data': order_dict_list}
print('order book length: ')
print(len(order_dict_list))
shutdown_session()
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
<|reserved_special_token_1|>
from flask import Flask, request, g
from flask_restful import Resource, Api
from sqlalchemy import create_engine
from flask import jsonify
import json
import eth_account
import algosdk
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import load_only
from datetime import datetime
import sys
from models import Base, Order, Log
engine = create_engine('sqlite:///orders.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
app = Flask(__name__)
# These decorators allow you to use g.session to access the database inside the request code
# g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
@app.teardown_appcontext
# def shutdown_session(response_or_exc):
def shutdown_session(exception=None):
sys.stdout.flush()
g.session.commit()
g.session.remove()
""" Suggested helper methods """
# check whether “sig” is a valid signature of json.dumps(payload),
# using the signature algorithm specified by the platform field.
# Be sure to verify the payload using the sender_pk.
def check_sig(payload,sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == "Algorand":
print("Algorand")
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print("Algo sig verifies!")
result = True
elif platform == "Ethereum":
print("Ethereum")
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk:
print("Eth sig verifies!")
result = True
return result, payload_json
# def fill_order(order,txes=[]):
# pass
# the inner recursive function
def fill_order():
# get the order you just inserted from the DB
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
# print("_order_id")
# print(current_order.id)
# Check if there are any existing orders that match and add them into a list
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
# if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)):
if ((existing_order.buy_currency == current_order.sell_currency)
and (existing_order.sell_currency == current_order.buy_currency)
and (existing_order.sell_amount / existing_order.buy_amount
>= current_order.buy_amount / current_order.sell_amount)
and (existing_order.counterparty_id == None)):
order_list.append(existing_order)
# If a match is found between order and existing_order
if (len(order_list) > 0):
# print(" order_list_length")
# print(len(order_list))
# pick the first one in the list
match_order = order_list[0]
# Set the filled field to be the current timestamp on both orders
# Set counterparty_id to be the id of the other order
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
# if both orders can completely fill each other
# no child order needs to be generated
# If match_order is not completely filled
if (current_order.sell_amount < match_order.buy_amount):
# print("_match_order is not completely filled")
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = match_order.sell_amount / match_order.buy_amount
sell_amount_new_match = diff * exchange_rate_match
# print(match_order.id)
# print(diff)
# print(sell_amount_new_match)
new_order = Order(sender_pk=match_order.sender_pk,
receiver_pk=match_order.receiver_pk,
buy_currency=match_order.buy_currency,
sell_currency=match_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print("M")
fill_order()
# If current_order is not completely filled
if (current_order.buy_amount > match_order.sell_amount):
# print("_current_order is not completely filled")
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = current_order.buy_amount / current_order.sell_amount
sell_amount_new_current = diff / exchange_rate_current
# print(current_order.id)
# print(diff)
# print(sell_amount_new_current)
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk,
buy_currency=current_order.buy_currency,
sell_currency=current_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_current,
creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print("C")
fill_order()
# Takes input dictionary d and writes it to the Log table
# Hint: use json.dumps or str() to get it in a nice string form
def log_message(d):
create_session()
order_obj = Log(message=d)
g.session.add(order_obj)
shutdown_session()
# convert a row in DB into a dict
def row2dict(row):
return {
c.name: getattr(row, c.name)
for c in row.__table__.columns
}
# print a dictionary nicely
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
""" End of helper methods """
@app.route('/trade', methods=['POST'])
def trade():
print("In trade endpoint")
if request.method == "POST":
print("--------- trade ---------")
content = request.get_json(silent=True)
print( f"content = {json.dumps(content)}" )
columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ]
fields = [ "sig", "payload" ]
# check whether the input contains both "sig" and "payload"
for field in fields:
if not field in content.keys():
print( f"{field} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
# check whether the input contains all 7 fields of payload
for column in columns:
if not column in content['payload'].keys():
print( f"{column} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
#Your code here
#Note that you can access the database session using g.session
# TODO 1: Check the signature
# extract contents from json
sig = content['sig']
payload = content['payload']
platform = payload['platform']
# The platform must be either “Algorand” or "Ethereum".
platforms = ["Algorand", "Ethereum"]
if not platform in platforms:
print("input platform is not Algorand or Ethereum")
return jsonify(False)
# check signature
check_result = check_sig(payload,sig)
result = check_result[0]
payload_json = check_result[1]
# TODO 2: Add the order to the database
# TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful
# If the signature does not verify, do not insert the order into the “Order” table.
# Instead, insert a record into the “Log” table, with the message field set to be json.dumps(payload).
if result is False:
print("signature does NOT verify")
log_message(payload_json)
return jsonify(result)
# If the signature verifies, store the signature,
# as well as all of the fields under the ‘payload’ in the “Order” table EXCEPT for 'platform’.
if result is True:
print("signature verifies")
create_session()
order_obj = Order(sender_pk=payload['sender_pk'],
receiver_pk=payload['receiver_pk'],
buy_currency=payload['buy_currency'],
sell_currency=payload['sell_currency'],
buy_amount=payload['buy_amount'],
sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
# TODO 3: Fill the order
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
#Your code here
#Note that you can access the database session using g.session
# The “/order_book” endpoint should return a list of all orders in the database.
# The response should contain a single key “data” that refers to a list of orders formatted as JSON.
# Each order should be a dict with (at least) the following fields
# ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", “signature”).
print("--------- order_book ---------")
create_session()
# get orders from DB into a list
order_dict_list = [
row2dict(order)
for order in g.session.query(Order).all()
]
# add the list into a dict
result = {
'data': order_dict_list
}
print("order book length: ")
print(len(order_dict_list))
# print_dict(order_dict_list[-2])
# print_dict(order_dict_list[-1])
shutdown_session()
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
|
flexible
|
{
"blob_id": "d9bdf466abecb50c399556b99b41896eead0cb4b",
"index": 2959,
"step-1": "<mask token>\n\n\[email protected]_request\ndef create_session():\n g.session = scoped_session(DBSession)\n\n\n<mask token>\n\n\ndef check_sig(payload, sig):\n pk = payload['sender_pk']\n platform = payload['platform']\n payload_json = json.dumps(payload)\n result = False\n if platform == 'Algorand':\n print('Algorand')\n if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):\n print('Algo sig verifies!')\n result = True\n elif platform == 'Ethereum':\n print('Ethereum')\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json\n )\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig\n ) == pk:\n print('Eth sig verifies!')\n result = True\n return result, payload_json\n\n\ndef fill_order():\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n for existing_order in orders:\n if (existing_order.buy_currency == current_order.sell_currency and \n existing_order.sell_currency == current_order.buy_currency and \n existing_order.sell_amount / existing_order.buy_amount >= \n current_order.buy_amount / current_order.sell_amount and \n existing_order.counterparty_id == None):\n order_list.append(existing_order)\n if len(order_list) > 0:\n match_order = order_list[0]\n match_order.filled = datetime.now()\n current_order.filled = datetime.now()\n match_order.counterparty_id = current_order.id\n current_order.counterparty_id = match_order.id\n g.session.commit()\n if current_order.sell_amount < match_order.buy_amount:\n diff = match_order.buy_amount - current_order.sell_amount\n exchange_rate_match = (match_order.sell_amount / match_order.\n buy_amount)\n sell_amount_new_match = diff * exchange_rate_match\n new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=\n match_order.receiver_pk, buy_currency=match_order.\n buy_currency, sell_currency=match_order.sell_currency,\n buy_amount=diff, sell_amount=sell_amount_new_match,\n creator_id=match_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('M')\n fill_order()\n if current_order.buy_amount > match_order.sell_amount:\n diff = current_order.buy_amount - match_order.sell_amount\n exchange_rate_current = (current_order.buy_amount /\n current_order.sell_amount)\n sell_amount_new_current = diff / exchange_rate_current\n new_order = Order(sender_pk=current_order.sender_pk,\n receiver_pk=current_order.receiver_pk, buy_currency=\n current_order.buy_currency, sell_currency=current_order.\n sell_currency, buy_amount=diff, sell_amount=\n sell_amount_new_current, creator_id=current_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('C')\n fill_order()\n\n\n<mask token>\n\n\ndef row2dict(row):\n return {c.name: getattr(row, c.name) for c in row.__table__.columns}\n\n\ndef print_dict(d):\n for key, value in d.items():\n print(key, ' : ', value)\n\n\n<mask token>\n\n\[email protected]('/trade', methods=['POST'])\ndef trade():\n print('In trade endpoint')\n if request.method == 'POST':\n print('--------- trade ---------')\n content = request.get_json(silent=True)\n print(f'content = {json.dumps(content)}')\n columns = ['sender_pk', 'receiver_pk', 'buy_currency',\n 'sell_currency', 'buy_amount', 'sell_amount', 'platform']\n fields = ['sig', 'payload']\n for field in fields:\n if not field in content.keys():\n print(f'{field} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n for column in columns:\n if not column in content['payload'].keys():\n print(f'{column} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n platforms = ['Algorand', 'Ethereum']\n if not platform in platforms:\n print('input platform is not Algorand or Ethereum')\n return jsonify(False)\n check_result = check_sig(payload, sig)\n result = check_result[0]\n payload_json = check_result[1]\n if result is False:\n print('signature does NOT verify')\n log_message(payload_json)\n return jsonify(result)\n if result is True:\n print('signature verifies')\n create_session()\n order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=\n payload['receiver_pk'], buy_currency=payload['buy_currency'\n ], sell_currency=payload['sell_currency'], buy_amount=\n payload['buy_amount'], sell_amount=payload['sell_amount'],\n signature=sig)\n g.session.add(order_obj)\n fill_order()\n shutdown_session()\n return jsonify(result)\n\n\[email protected]('/order_book')\ndef order_book():\n print('--------- order_book ---------')\n create_session()\n order_dict_list = [row2dict(order) for order in g.session.query(Order).\n all()]\n result = {'data': order_dict_list}\n print('order book length: ')\n print(len(order_dict_list))\n shutdown_session()\n return jsonify(result)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_request\ndef create_session():\n g.session = scoped_session(DBSession)\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n sys.stdout.flush()\n g.session.commit()\n g.session.remove()\n\n\n<mask token>\n\n\ndef check_sig(payload, sig):\n pk = payload['sender_pk']\n platform = payload['platform']\n payload_json = json.dumps(payload)\n result = False\n if platform == 'Algorand':\n print('Algorand')\n if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):\n print('Algo sig verifies!')\n result = True\n elif platform == 'Ethereum':\n print('Ethereum')\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json\n )\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig\n ) == pk:\n print('Eth sig verifies!')\n result = True\n return result, payload_json\n\n\ndef fill_order():\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n for existing_order in orders:\n if (existing_order.buy_currency == current_order.sell_currency and \n existing_order.sell_currency == current_order.buy_currency and \n existing_order.sell_amount / existing_order.buy_amount >= \n current_order.buy_amount / current_order.sell_amount and \n existing_order.counterparty_id == None):\n order_list.append(existing_order)\n if len(order_list) > 0:\n match_order = order_list[0]\n match_order.filled = datetime.now()\n current_order.filled = datetime.now()\n match_order.counterparty_id = current_order.id\n current_order.counterparty_id = match_order.id\n g.session.commit()\n if current_order.sell_amount < match_order.buy_amount:\n diff = match_order.buy_amount - current_order.sell_amount\n exchange_rate_match = (match_order.sell_amount / match_order.\n buy_amount)\n sell_amount_new_match = diff * exchange_rate_match\n new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=\n match_order.receiver_pk, buy_currency=match_order.\n buy_currency, sell_currency=match_order.sell_currency,\n buy_amount=diff, sell_amount=sell_amount_new_match,\n creator_id=match_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('M')\n fill_order()\n if current_order.buy_amount > match_order.sell_amount:\n diff = current_order.buy_amount - match_order.sell_amount\n exchange_rate_current = (current_order.buy_amount /\n current_order.sell_amount)\n sell_amount_new_current = diff / exchange_rate_current\n new_order = Order(sender_pk=current_order.sender_pk,\n receiver_pk=current_order.receiver_pk, buy_currency=\n current_order.buy_currency, sell_currency=current_order.\n sell_currency, buy_amount=diff, sell_amount=\n sell_amount_new_current, creator_id=current_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('C')\n fill_order()\n\n\n<mask token>\n\n\ndef row2dict(row):\n return {c.name: getattr(row, c.name) for c in row.__table__.columns}\n\n\ndef print_dict(d):\n for key, value in d.items():\n print(key, ' : ', value)\n\n\n<mask token>\n\n\[email protected]('/trade', methods=['POST'])\ndef trade():\n print('In trade endpoint')\n if request.method == 'POST':\n print('--------- trade ---------')\n content = request.get_json(silent=True)\n print(f'content = {json.dumps(content)}')\n columns = ['sender_pk', 'receiver_pk', 'buy_currency',\n 'sell_currency', 'buy_amount', 'sell_amount', 'platform']\n fields = ['sig', 'payload']\n for field in fields:\n if not field in content.keys():\n print(f'{field} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n for column in columns:\n if not column in content['payload'].keys():\n print(f'{column} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n platforms = ['Algorand', 'Ethereum']\n if not platform in platforms:\n print('input platform is not Algorand or Ethereum')\n return jsonify(False)\n check_result = check_sig(payload, sig)\n result = check_result[0]\n payload_json = check_result[1]\n if result is False:\n print('signature does NOT verify')\n log_message(payload_json)\n return jsonify(result)\n if result is True:\n print('signature verifies')\n create_session()\n order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=\n payload['receiver_pk'], buy_currency=payload['buy_currency'\n ], sell_currency=payload['sell_currency'], buy_amount=\n payload['buy_amount'], sell_amount=payload['sell_amount'],\n signature=sig)\n g.session.add(order_obj)\n fill_order()\n shutdown_session()\n return jsonify(result)\n\n\[email protected]('/order_book')\ndef order_book():\n print('--------- order_book ---------')\n create_session()\n order_dict_list = [row2dict(order) for order in g.session.query(Order).\n all()]\n result = {'data': order_dict_list}\n print('order book length: ')\n print(len(order_dict_list))\n shutdown_session()\n return jsonify(result)\n\n\n<mask token>\n",
"step-3": "<mask token>\nengine = create_engine('sqlite:///orders.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\napp = Flask(__name__)\n\n\[email protected]_request\ndef create_session():\n g.session = scoped_session(DBSession)\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n sys.stdout.flush()\n g.session.commit()\n g.session.remove()\n\n\n<mask token>\n\n\ndef check_sig(payload, sig):\n pk = payload['sender_pk']\n platform = payload['platform']\n payload_json = json.dumps(payload)\n result = False\n if platform == 'Algorand':\n print('Algorand')\n if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):\n print('Algo sig verifies!')\n result = True\n elif platform == 'Ethereum':\n print('Ethereum')\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json\n )\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig\n ) == pk:\n print('Eth sig verifies!')\n result = True\n return result, payload_json\n\n\ndef fill_order():\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n for existing_order in orders:\n if (existing_order.buy_currency == current_order.sell_currency and \n existing_order.sell_currency == current_order.buy_currency and \n existing_order.sell_amount / existing_order.buy_amount >= \n current_order.buy_amount / current_order.sell_amount and \n existing_order.counterparty_id == None):\n order_list.append(existing_order)\n if len(order_list) > 0:\n match_order = order_list[0]\n match_order.filled = datetime.now()\n current_order.filled = datetime.now()\n match_order.counterparty_id = current_order.id\n current_order.counterparty_id = match_order.id\n g.session.commit()\n if current_order.sell_amount < match_order.buy_amount:\n diff = match_order.buy_amount - current_order.sell_amount\n exchange_rate_match = (match_order.sell_amount / match_order.\n buy_amount)\n sell_amount_new_match = diff * exchange_rate_match\n new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=\n match_order.receiver_pk, buy_currency=match_order.\n buy_currency, sell_currency=match_order.sell_currency,\n buy_amount=diff, sell_amount=sell_amount_new_match,\n creator_id=match_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('M')\n fill_order()\n if current_order.buy_amount > match_order.sell_amount:\n diff = current_order.buy_amount - match_order.sell_amount\n exchange_rate_current = (current_order.buy_amount /\n current_order.sell_amount)\n sell_amount_new_current = diff / exchange_rate_current\n new_order = Order(sender_pk=current_order.sender_pk,\n receiver_pk=current_order.receiver_pk, buy_currency=\n current_order.buy_currency, sell_currency=current_order.\n sell_currency, buy_amount=diff, sell_amount=\n sell_amount_new_current, creator_id=current_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('C')\n fill_order()\n\n\ndef log_message(d):\n create_session()\n order_obj = Log(message=d)\n g.session.add(order_obj)\n shutdown_session()\n\n\ndef row2dict(row):\n return {c.name: getattr(row, c.name) for c in row.__table__.columns}\n\n\ndef print_dict(d):\n for key, value in d.items():\n print(key, ' : ', value)\n\n\n<mask token>\n\n\[email protected]('/trade', methods=['POST'])\ndef trade():\n print('In trade endpoint')\n if request.method == 'POST':\n print('--------- trade ---------')\n content = request.get_json(silent=True)\n print(f'content = {json.dumps(content)}')\n columns = ['sender_pk', 'receiver_pk', 'buy_currency',\n 'sell_currency', 'buy_amount', 'sell_amount', 'platform']\n fields = ['sig', 'payload']\n for field in fields:\n if not field in content.keys():\n print(f'{field} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n for column in columns:\n if not column in content['payload'].keys():\n print(f'{column} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n platforms = ['Algorand', 'Ethereum']\n if not platform in platforms:\n print('input platform is not Algorand or Ethereum')\n return jsonify(False)\n check_result = check_sig(payload, sig)\n result = check_result[0]\n payload_json = check_result[1]\n if result is False:\n print('signature does NOT verify')\n log_message(payload_json)\n return jsonify(result)\n if result is True:\n print('signature verifies')\n create_session()\n order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=\n payload['receiver_pk'], buy_currency=payload['buy_currency'\n ], sell_currency=payload['sell_currency'], buy_amount=\n payload['buy_amount'], sell_amount=payload['sell_amount'],\n signature=sig)\n g.session.add(order_obj)\n fill_order()\n shutdown_session()\n return jsonify(result)\n\n\[email protected]('/order_book')\ndef order_book():\n print('--------- order_book ---------')\n create_session()\n order_dict_list = [row2dict(order) for order in g.session.query(Order).\n all()]\n result = {'data': order_dict_list}\n print('order book length: ')\n print(len(order_dict_list))\n shutdown_session()\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n",
"step-4": "from flask import Flask, request, g\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom flask import jsonify\nimport json\nimport eth_account\nimport algosdk\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import load_only\nfrom datetime import datetime\nimport sys\nfrom models import Base, Order, Log\nengine = create_engine('sqlite:///orders.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\napp = Flask(__name__)\n\n\[email protected]_request\ndef create_session():\n g.session = scoped_session(DBSession)\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n sys.stdout.flush()\n g.session.commit()\n g.session.remove()\n\n\n<mask token>\n\n\ndef check_sig(payload, sig):\n pk = payload['sender_pk']\n platform = payload['platform']\n payload_json = json.dumps(payload)\n result = False\n if platform == 'Algorand':\n print('Algorand')\n if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):\n print('Algo sig verifies!')\n result = True\n elif platform == 'Ethereum':\n print('Ethereum')\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json\n )\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig\n ) == pk:\n print('Eth sig verifies!')\n result = True\n return result, payload_json\n\n\ndef fill_order():\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n for existing_order in orders:\n if (existing_order.buy_currency == current_order.sell_currency and \n existing_order.sell_currency == current_order.buy_currency and \n existing_order.sell_amount / existing_order.buy_amount >= \n current_order.buy_amount / current_order.sell_amount and \n existing_order.counterparty_id == None):\n order_list.append(existing_order)\n if len(order_list) > 0:\n match_order = order_list[0]\n match_order.filled = datetime.now()\n current_order.filled = datetime.now()\n match_order.counterparty_id = current_order.id\n current_order.counterparty_id = match_order.id\n g.session.commit()\n if current_order.sell_amount < match_order.buy_amount:\n diff = match_order.buy_amount - current_order.sell_amount\n exchange_rate_match = (match_order.sell_amount / match_order.\n buy_amount)\n sell_amount_new_match = diff * exchange_rate_match\n new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=\n match_order.receiver_pk, buy_currency=match_order.\n buy_currency, sell_currency=match_order.sell_currency,\n buy_amount=diff, sell_amount=sell_amount_new_match,\n creator_id=match_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('M')\n fill_order()\n if current_order.buy_amount > match_order.sell_amount:\n diff = current_order.buy_amount - match_order.sell_amount\n exchange_rate_current = (current_order.buy_amount /\n current_order.sell_amount)\n sell_amount_new_current = diff / exchange_rate_current\n new_order = Order(sender_pk=current_order.sender_pk,\n receiver_pk=current_order.receiver_pk, buy_currency=\n current_order.buy_currency, sell_currency=current_order.\n sell_currency, buy_amount=diff, sell_amount=\n sell_amount_new_current, creator_id=current_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('C')\n fill_order()\n\n\ndef log_message(d):\n create_session()\n order_obj = Log(message=d)\n g.session.add(order_obj)\n shutdown_session()\n\n\ndef row2dict(row):\n return {c.name: getattr(row, c.name) for c in row.__table__.columns}\n\n\ndef print_dict(d):\n for key, value in d.items():\n print(key, ' : ', value)\n\n\n<mask token>\n\n\[email protected]('/trade', methods=['POST'])\ndef trade():\n print('In trade endpoint')\n if request.method == 'POST':\n print('--------- trade ---------')\n content = request.get_json(silent=True)\n print(f'content = {json.dumps(content)}')\n columns = ['sender_pk', 'receiver_pk', 'buy_currency',\n 'sell_currency', 'buy_amount', 'sell_amount', 'platform']\n fields = ['sig', 'payload']\n for field in fields:\n if not field in content.keys():\n print(f'{field} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n for column in columns:\n if not column in content['payload'].keys():\n print(f'{column} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n platforms = ['Algorand', 'Ethereum']\n if not platform in platforms:\n print('input platform is not Algorand or Ethereum')\n return jsonify(False)\n check_result = check_sig(payload, sig)\n result = check_result[0]\n payload_json = check_result[1]\n if result is False:\n print('signature does NOT verify')\n log_message(payload_json)\n return jsonify(result)\n if result is True:\n print('signature verifies')\n create_session()\n order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=\n payload['receiver_pk'], buy_currency=payload['buy_currency'\n ], sell_currency=payload['sell_currency'], buy_amount=\n payload['buy_amount'], sell_amount=payload['sell_amount'],\n signature=sig)\n g.session.add(order_obj)\n fill_order()\n shutdown_session()\n return jsonify(result)\n\n\[email protected]('/order_book')\ndef order_book():\n print('--------- order_book ---------')\n create_session()\n order_dict_list = [row2dict(order) for order in g.session.query(Order).\n all()]\n result = {'data': order_dict_list}\n print('order book length: ')\n print(len(order_dict_list))\n shutdown_session()\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n",
"step-5": "from flask import Flask, request, g\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom flask import jsonify\nimport json\nimport eth_account\nimport algosdk\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import load_only\nfrom datetime import datetime\nimport sys\n\nfrom models import Base, Order, Log\nengine = create_engine('sqlite:///orders.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\n\napp = Flask(__name__)\n\n# These decorators allow you to use g.session to access the database inside the request code\n# g is an \"application global\" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals\n\[email protected]_request\ndef create_session():\n g.session = scoped_session(DBSession) \n\[email protected]_appcontext\n# def shutdown_session(response_or_exc):\ndef shutdown_session(exception=None):\n sys.stdout.flush()\n g.session.commit()\n g.session.remove()\n\n\n\"\"\" Suggested helper methods \"\"\"\n\n\n# check whether “sig” is a valid signature of json.dumps(payload),\n# using the signature algorithm specified by the platform field.\n# Be sure to verify the payload using the sender_pk.\ndef check_sig(payload,sig):\n \n pk = payload['sender_pk']\n platform = payload['platform']\n payload_json = json.dumps(payload)\n result = False\n \n if platform == \"Algorand\":\n print(\"Algorand\")\n if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):\n print(\"Algo sig verifies!\")\n result = True\n\n elif platform == \"Ethereum\":\n print(\"Ethereum\")\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json)\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk:\n print(\"Eth sig verifies!\")\n result = True\n \n return result, payload_json\n\n\n\n\n\n\n\n\n\n# def fill_order(order,txes=[]):\n# pass\n\n\n# the inner recursive function\ndef fill_order():\n # get the order you just inserted from the DB\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n # print(\"_order_id\")\n # print(current_order.id)\n\n # Check if there are any existing orders that match and add them into a list\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n for existing_order in orders:\n # if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)):\n if ((existing_order.buy_currency == current_order.sell_currency)\n and (existing_order.sell_currency == current_order.buy_currency)\n and (existing_order.sell_amount / existing_order.buy_amount\n >= current_order.buy_amount / current_order.sell_amount)\n and (existing_order.counterparty_id == None)):\n order_list.append(existing_order)\n\n # If a match is found between order and existing_order\n if (len(order_list) > 0):\n # print(\" order_list_length\")\n # print(len(order_list))\n # pick the first one in the list\n match_order = order_list[0]\n\n # Set the filled field to be the current timestamp on both orders\n # Set counterparty_id to be the id of the other order\n match_order.filled = datetime.now()\n current_order.filled = datetime.now()\n match_order.counterparty_id = current_order.id\n current_order.counterparty_id = match_order.id\n g.session.commit()\n\n # if both orders can completely fill each other\n # no child order needs to be generated\n\n # If match_order is not completely filled\n if (current_order.sell_amount < match_order.buy_amount):\n # print(\"_match_order is not completely filled\")\n diff = match_order.buy_amount - current_order.sell_amount\n exchange_rate_match = match_order.sell_amount / match_order.buy_amount\n sell_amount_new_match = diff * exchange_rate_match\n # print(match_order.id)\n # print(diff)\n # print(sell_amount_new_match)\n new_order = Order(sender_pk=match_order.sender_pk,\n receiver_pk=match_order.receiver_pk,\n buy_currency=match_order.buy_currency,\n sell_currency=match_order.sell_currency,\n buy_amount=diff,\n sell_amount=sell_amount_new_match,\n creator_id=match_order.id)\n g.session.add(new_order)\n g.session.commit()\n print(\"M\")\n fill_order()\n\n # If current_order is not completely filled\n if (current_order.buy_amount > match_order.sell_amount):\n # print(\"_current_order is not completely filled\")\n diff = current_order.buy_amount - match_order.sell_amount\n exchange_rate_current = current_order.buy_amount / current_order.sell_amount\n sell_amount_new_current = diff / exchange_rate_current\n # print(current_order.id)\n # print(diff)\n # print(sell_amount_new_current)\n new_order = Order(sender_pk=current_order.sender_pk,\n receiver_pk=current_order.receiver_pk,\n buy_currency=current_order.buy_currency,\n sell_currency=current_order.sell_currency,\n buy_amount=diff,\n sell_amount=sell_amount_new_current,\n creator_id=current_order.id)\n g.session.add(new_order)\n g.session.commit()\n print(\"C\")\n fill_order()\n\n\n\n\n\n\n\n\n# Takes input dictionary d and writes it to the Log table\n# Hint: use json.dumps or str() to get it in a nice string form\ndef log_message(d):\n create_session()\n order_obj = Log(message=d)\n g.session.add(order_obj)\n shutdown_session()\n\n\n# convert a row in DB into a dict\ndef row2dict(row):\n return {\n c.name: getattr(row, c.name)\n for c in row.__table__.columns\n }\n\n# print a dictionary nicely\ndef print_dict(d):\n for key, value in d.items():\n print(key, ' : ', value)\n\n \n \n \n\n \n\"\"\" End of helper methods \"\"\"\n\n\[email protected]('/trade', methods=['POST'])\ndef trade():\n print(\"In trade endpoint\")\n if request.method == \"POST\":\n print(\"--------- trade ---------\")\n content = request.get_json(silent=True)\n print( f\"content = {json.dumps(content)}\" )\n columns = [ \"sender_pk\", \"receiver_pk\", \"buy_currency\", \"sell_currency\", \"buy_amount\", \"sell_amount\", \"platform\" ]\n fields = [ \"sig\", \"payload\" ]\n\n # check whether the input contains both \"sig\" and \"payload\"\n for field in fields:\n if not field in content.keys():\n print( f\"{field} not received by Trade\" )\n print( json.dumps(content) )\n log_message(content)\n return jsonify( False )\n \n # check whether the input contains all 7 fields of payload\n for column in columns:\n if not column in content['payload'].keys():\n print( f\"{column} not received by Trade\" )\n print( json.dumps(content) )\n log_message(content)\n return jsonify( False )\n \n #Your code here\n #Note that you can access the database session using g.session\n\n # TODO 1: Check the signature\n \n # extract contents from json\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n\n # The platform must be either “Algorand” or \"Ethereum\".\n platforms = [\"Algorand\", \"Ethereum\"]\n if not platform in platforms:\n print(\"input platform is not Algorand or Ethereum\")\n return jsonify(False)\n \n # check signature\n check_result = check_sig(payload,sig)\n result = check_result[0]\n payload_json = check_result[1]\n \n # TODO 2: Add the order to the database\n # TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful\n \n # If the signature does not verify, do not insert the order into the “Order” table.\n # Instead, insert a record into the “Log” table, with the message field set to be json.dumps(payload).\n if result is False:\n print(\"signature does NOT verify\")\n log_message(payload_json) \n return jsonify(result)\n \n # If the signature verifies, store the signature,\n # as well as all of the fields under the ‘payload’ in the “Order” table EXCEPT for 'platform’.\n if result is True:\n print(\"signature verifies\")\n create_session()\n order_obj = Order(sender_pk=payload['sender_pk'],\n receiver_pk=payload['receiver_pk'],\n buy_currency=payload['buy_currency'],\n sell_currency=payload['sell_currency'],\n buy_amount=payload['buy_amount'],\n sell_amount=payload['sell_amount'],\n signature=sig) \n g.session.add(order_obj)\n \n # TODO 3: Fill the order\n fill_order()\n shutdown_session()\n return jsonify(result)\n \n \n \n \n\[email protected]('/order_book')\ndef order_book():\n #Your code here\n #Note that you can access the database session using g.session\n \n # The “/order_book” endpoint should return a list of all orders in the database.\n # The response should contain a single key “data” that refers to a list of orders formatted as JSON.\n # Each order should be a dict with (at least) the following fields\n # (\"sender_pk\", \"receiver_pk\", \"buy_currency\", \"sell_currency\", \"buy_amount\", \"sell_amount\", “signature”).\n print(\"--------- order_book ---------\")\n create_session()\n \n # get orders from DB into a list\n order_dict_list = [\n row2dict(order)\n for order in g.session.query(Order).all()\n ]\n \n # add the list into a dict\n result = {\n 'data': order_dict_list\n } \n \n print(\"order book length: \")\n print(len(order_dict_list))\n # print_dict(order_dict_list[-2])\n # print_dict(order_dict_list[-1])\n\n shutdown_session()\n return jsonify(result)\n \n\n \nif __name__ == '__main__':\n app.run(port='5002')\n",
"step-ids": [
7,
8,
11,
12,
13
]
}
|
[
7,
8,
11,
12,
13
] |
"""
Write two functions:
1. `to_list()`, which converts a number to an integer list of its digits.
2. `to_number()`, which converts a list of integers back to its number.
### Examples
to_list(235) ➞ [2, 3, 5]
to_list(0) ➞ [0]
to_number([2, 3, 5]) ➞ 235
to_number([0]) ➞ 0
### Notes
All test cases will be weakly positive numbers: `>= 0`
"""
def to_list(num):
a=list(map(int,str(num)))
return a
def to_number(lst):
res=int("".join(map(str,lst)))
return res
|
normal
|
{
"blob_id": "5cced6d9f5e01b88951059bc89c5d10cfd160f60",
"index": 8826,
"step-1": "\"\"\"\r\n\n\nWrite two functions:\n\n 1. `to_list()`, which converts a number to an integer list of its digits.\n 2. `to_number()`, which converts a list of integers back to its number.\n\n### Examples\n\n to_list(235) ➞ [2, 3, 5]\n \n to_list(0) ➞ [0]\n \n to_number([2, 3, 5]) ➞ 235\n \n to_number([0]) ➞ 0\n\n### Notes\n\nAll test cases will be weakly positive numbers: `>= 0`\n\n\"\"\"\r\n\ndef to_list(num):\n a=list(map(int,str(num)))\n return a\n \n\ndef to_number(lst):\n res=int(\"\".join(map(str,lst)))\n return res\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Account:
'''은행계좌를 표현하는 클래스'''
def __init__(self,name,account):
self.name = name
self._balance = amount
def __str__(self):
return '예금주 {}, 잔고 {}'.format(slef.name, self._balance)
def _info(self):
print('\t')
|
normal
|
{
"blob_id": "2dc4a4ae8e02e823073b1a9711dbd864a54bab43",
"index": 5072,
"step-1": "class Account:\n '''은행계좌를 표현하는 클래스'''\n \n\n def __init__(self,name,account):\n self.name = name\n self._balance = amount\n\n def __str__(self):\n return '예금주 {}, 잔고 {}'.format(slef.name, self._balance)\n\n def _info(self):\n print('\\t')",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(JobListing)
admin.site.register(Employer)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import JobListing
from .models import Employer
admin.site.register(JobListing)
admin.site.register(Employer)
|
flexible
|
{
"blob_id": "a96575d507a91472176c99d4d55e2a3bbf8111d1",
"index": 2707,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(JobListing)\nadmin.site.register(Employer)\n",
"step-3": "from django.contrib import admin\nfrom .models import JobListing\nfrom .models import Employer\nadmin.site.register(JobListing)\nadmin.site.register(Employer)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(f'NOW PROCESSING POST ID: {POST_ID}')
<|reserved_special_token_0|>
tts.comment_to_mp3(post_title, './quota.txt', 'titles', 0, randomize=True)
<|reserved_special_token_0|>
for comment in comments_from_post:
tts.comment_to_mp3(comment, './quota.txt', POST_ID, n, randomize=True)
n += 1
tts.comment_to_mp3(
"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!"
, './quota.txt', 'duck', 1, randomize=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
POST_ID = sys.argv[1]
NUM_POSTS = int(sys.argv[2])
reddit_object = praw.Reddit(client_id='aAhfCgWHCGOylw', client_secret=
'FLrVvWquolZc4cnKaEhULqzfUYsxQQ', user_agent='reddit_to_vid')
print(f'NOW PROCESSING POST ID: {POST_ID}')
comments_from_post, post_title = reddit.get_top_comments_from_id(reddit_object,
POST_ID, NUM_POSTS)
tts.comment_to_mp3(post_title, './quota.txt', 'titles', 0, randomize=True)
n = 1
for comment in comments_from_post:
tts.comment_to_mp3(comment, './quota.txt', POST_ID, n, randomize=True)
n += 1
tts.comment_to_mp3(
"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!"
, './quota.txt', 'duck', 1, randomize=True)
<|reserved_special_token_1|>
import reddit
import tts
import sys
import praw
import os
POST_ID = sys.argv[1]
NUM_POSTS = int(sys.argv[2])
reddit_object = praw.Reddit(client_id='aAhfCgWHCGOylw', client_secret=
'FLrVvWquolZc4cnKaEhULqzfUYsxQQ', user_agent='reddit_to_vid')
print(f'NOW PROCESSING POST ID: {POST_ID}')
comments_from_post, post_title = reddit.get_top_comments_from_id(reddit_object,
POST_ID, NUM_POSTS)
tts.comment_to_mp3(post_title, './quota.txt', 'titles', 0, randomize=True)
n = 1
for comment in comments_from_post:
tts.comment_to_mp3(comment, './quota.txt', POST_ID, n, randomize=True)
n += 1
tts.comment_to_mp3(
"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!"
, './quota.txt', 'duck', 1, randomize=True)
<|reserved_special_token_1|>
import reddit
import tts
import sys
import praw
import os
#TODO: CENSOR CURSE WORDS,tag images that have curse words in them. strip punctuation from comment replies mp3
#TODO: pay for ads :thinking: buy views?
#TODO: sort by top upvotes
#todo: remove the formatting stuff
#todo: redo ducking
#todo quick script to get high upvote replies
#todo: remove hyperlinks
POST_ID = sys.argv[1]
NUM_POSTS = int(sys.argv[2])
reddit_object = praw.Reddit(
client_id="aAhfCgWHCGOylw",
client_secret="FLrVvWquolZc4cnKaEhULqzfUYsxQQ",
user_agent='reddit_to_vid')
print(f"NOW PROCESSING POST ID: {POST_ID}")
comments_from_post,post_title = reddit.get_top_comments_from_id(reddit_object,POST_ID,NUM_POSTS)
tts.comment_to_mp3(post_title,'./quota.txt','titles',0,randomize=True)
n = 1
for comment in comments_from_post:
tts.comment_to_mp3(comment,"./quota.txt",POST_ID,n,randomize=True)
n+=1
tts.comment_to_mp3("Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!","./quota.txt",'duck',1,randomize=True)
|
flexible
|
{
"blob_id": "fd57e13269ca00ed5eb05e00bd7999c041141187",
"index": 4256,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'NOW PROCESSING POST ID: {POST_ID}')\n<mask token>\ntts.comment_to_mp3(post_title, './quota.txt', 'titles', 0, randomize=True)\n<mask token>\nfor comment in comments_from_post:\n tts.comment_to_mp3(comment, './quota.txt', POST_ID, n, randomize=True)\n n += 1\ntts.comment_to_mp3(\n \"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!\"\n , './quota.txt', 'duck', 1, randomize=True)\n",
"step-3": "<mask token>\nPOST_ID = sys.argv[1]\nNUM_POSTS = int(sys.argv[2])\nreddit_object = praw.Reddit(client_id='aAhfCgWHCGOylw', client_secret=\n 'FLrVvWquolZc4cnKaEhULqzfUYsxQQ', user_agent='reddit_to_vid')\nprint(f'NOW PROCESSING POST ID: {POST_ID}')\ncomments_from_post, post_title = reddit.get_top_comments_from_id(reddit_object,\n POST_ID, NUM_POSTS)\ntts.comment_to_mp3(post_title, './quota.txt', 'titles', 0, randomize=True)\nn = 1\nfor comment in comments_from_post:\n tts.comment_to_mp3(comment, './quota.txt', POST_ID, n, randomize=True)\n n += 1\ntts.comment_to_mp3(\n \"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!\"\n , './quota.txt', 'duck', 1, randomize=True)\n",
"step-4": "import reddit\nimport tts\nimport sys\nimport praw\nimport os\nPOST_ID = sys.argv[1]\nNUM_POSTS = int(sys.argv[2])\nreddit_object = praw.Reddit(client_id='aAhfCgWHCGOylw', client_secret=\n 'FLrVvWquolZc4cnKaEhULqzfUYsxQQ', user_agent='reddit_to_vid')\nprint(f'NOW PROCESSING POST ID: {POST_ID}')\ncomments_from_post, post_title = reddit.get_top_comments_from_id(reddit_object,\n POST_ID, NUM_POSTS)\ntts.comment_to_mp3(post_title, './quota.txt', 'titles', 0, randomize=True)\nn = 1\nfor comment in comments_from_post:\n tts.comment_to_mp3(comment, './quota.txt', POST_ID, n, randomize=True)\n n += 1\ntts.comment_to_mp3(\n \"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!\"\n , './quota.txt', 'duck', 1, randomize=True)\n",
"step-5": "import reddit\r\nimport tts\r\nimport sys\r\nimport praw\r\nimport os\r\n\r\n#TODO: CENSOR CURSE WORDS,tag images that have curse words in them. strip punctuation from comment replies mp3\r\n#TODO: pay for ads :thinking: buy views?\r\n#TODO: sort by top upvotes\r\n#todo: remove the formatting stuff\r\n#todo: redo ducking\r\n#todo quick script to get high upvote replies\r\n#todo: remove hyperlinks\r\n\r\nPOST_ID = sys.argv[1]\r\nNUM_POSTS = int(sys.argv[2])\r\n\r\nreddit_object = praw.Reddit(\r\n client_id=\"aAhfCgWHCGOylw\",\r\n client_secret=\"FLrVvWquolZc4cnKaEhULqzfUYsxQQ\",\r\n user_agent='reddit_to_vid')\r\n\r\n\r\nprint(f\"NOW PROCESSING POST ID: {POST_ID}\")\r\ncomments_from_post,post_title = reddit.get_top_comments_from_id(reddit_object,POST_ID,NUM_POSTS)\r\ntts.comment_to_mp3(post_title,'./quota.txt','titles',0,randomize=True)\r\nn = 1\r\nfor comment in comments_from_post:\r\n tts.comment_to_mp3(comment,\"./quota.txt\",POST_ID,n,randomize=True)\r\n n+=1\r\ntts.comment_to_mp3(\"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!\",\"./quota.txt\",'duck',1,randomize=True)\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from covid import FuzzyNet
import numpy as np
import time
if __name__ == '__main__':
# mx1,mx2,mx3,my1,my2,my3, dx1,dx2,dx3,dy1,dy2,dy3, p1,p2,p3,p4,p5,p6,p7,p8,p9, q1,q2,q3,q4,q5,q6,q7,q8,q9, r1,r2,r3,r4,r5,r6,r7,r8,r9
generations = 100
for generation in range(generations):
population = np.random.randint(0, 255, size=(200, 39), dtype=np.uint8)
print('Population\n', population, end='\n\n')
cov19 = FuzzyNet()
print('Base Z matrix\n', cov19.Z, end='\n\n')
population_fa, population_matrix_z, best_FA_index = cov19.get_FA(population=population)
if population_fa[best_FA_index] < 40:
print('Best Z matrix\n', population_matrix_z[best_FA_index], population_fa[best_FA_index])
cov19.plot(population_matrix_z[best_FA_index])
time.sleep(5)
# xi , yj = zij
|
normal
|
{
"blob_id": "99f50d393e750bd8fa5bee21d99f08d20b9f5fe9",
"index": 9102,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n generations = 100\n for generation in range(generations):\n population = np.random.randint(0, 255, size=(200, 39), dtype=np.uint8)\n print('Population\\n', population, end='\\n\\n')\n cov19 = FuzzyNet()\n print('Base Z matrix\\n', cov19.Z, end='\\n\\n')\n population_fa, population_matrix_z, best_FA_index = cov19.get_FA(\n population=population)\n if population_fa[best_FA_index] < 40:\n print('Best Z matrix\\n', population_matrix_z[best_FA_index],\n population_fa[best_FA_index])\n cov19.plot(population_matrix_z[best_FA_index])\n time.sleep(5)\n",
"step-3": "from covid import FuzzyNet\nimport numpy as np\nimport time\nif __name__ == '__main__':\n generations = 100\n for generation in range(generations):\n population = np.random.randint(0, 255, size=(200, 39), dtype=np.uint8)\n print('Population\\n', population, end='\\n\\n')\n cov19 = FuzzyNet()\n print('Base Z matrix\\n', cov19.Z, end='\\n\\n')\n population_fa, population_matrix_z, best_FA_index = cov19.get_FA(\n population=population)\n if population_fa[best_FA_index] < 40:\n print('Best Z matrix\\n', population_matrix_z[best_FA_index],\n population_fa[best_FA_index])\n cov19.plot(population_matrix_z[best_FA_index])\n time.sleep(5)\n",
"step-4": "from covid import FuzzyNet\nimport numpy as np\nimport time\n\n\nif __name__ == '__main__':\n # mx1,mx2,mx3,my1,my2,my3, dx1,dx2,dx3,dy1,dy2,dy3, p1,p2,p3,p4,p5,p6,p7,p8,p9, q1,q2,q3,q4,q5,q6,q7,q8,q9, r1,r2,r3,r4,r5,r6,r7,r8,r9\n generations = 100\n\n for generation in range(generations):\n population = np.random.randint(0, 255, size=(200, 39), dtype=np.uint8)\n print('Population\\n', population, end='\\n\\n')\n\n cov19 = FuzzyNet()\n print('Base Z matrix\\n', cov19.Z, end='\\n\\n')\n population_fa, population_matrix_z, best_FA_index = cov19.get_FA(population=population)\n\n if population_fa[best_FA_index] < 40:\n print('Best Z matrix\\n', population_matrix_z[best_FA_index], population_fa[best_FA_index])\n cov19.plot(population_matrix_z[best_FA_index])\n time.sleep(5)\n\n# xi , yj = zij",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import connection
from .models import Order
from .models import Package
from .models import DeliveryStatus
from .models import CalcParameters
class DataService:
def __init__(self):
pass
@staticmethod
def get_all_orders():
orders = Order.objects.order_by('-order_date')
# create new variables for display
for o in orders:
o.package_names = ', '.join([p.name for p in list(o.packages.all())])
o.delivery_date = o.deliveryinfo_set.get().delivery_date
o.delivery_charge = o.deliveryinfo_set.get().charge
return orders
@staticmethod
def get_all_packages():
return Package.objects.all()
@staticmethod
def get_shopping_list_details(order_ids, dish_ids=None):
"""
:param order_ids: a list of order ids as int or str. Or a single order id as int or str
:param dish_ids: Restrict shopping list to these dishes.
A list of dish ids as int or str. Or a single order id as int or str.
:return: Return shopping list for the given orders
"""
if isinstance(order_ids, str):
order_ids = [int(order_ids)]
if isinstance(order_ids, int):
order_ids = [order_ids]
if not isinstance(order_ids, list):
raise Exception('Expecting a single order id or a list of order ids. Got [{ids}]'.format(ids=order_ids))
SQL = """select
d.id dish_id,
d.name dish_name,
sum(op.package_qty) dish_qty,
sum(d.portion_count) portion_count,
i.name ingredient_name,
round(sum(di.ingredient_weight * op.package_qty), 2) total_ingredient_weight,
round(sum(di.ingredient_weight * (i.cost_price/i.measure) * op.package_qty), 2) total_cost_price
from
orders o, order_package op, package_dish pd, dish d, dish_ingredient di, ingredient i
where
o.id = op.order_id and
op.package_id = pd.package_id and
pd.dish_id = d.id and
d.id = di.dish_id and
di.ingredient_id = i.id and
o.id in ({ids})
group by d.id, d.name, i.name
order by d.name, i.name""".format(ids=','.join([str(x) for x in order_ids]))
with connection.cursor() as cursor:
cursor.execute(SQL)
rows = cursor.fetchall()
# return a list of tuples rather than a tuple of tuples
return [row for row in rows]
class StaticDataDao(type):
@property
def delivery_statuses(cls):
if getattr(cls, '_delivery_statuses', None) is None:
cls._delivery_statuses = list(DeliveryStatus.objects.all())
return cls._delivery_statuses
@property
def calc_parameters(cls):
if getattr(cls, '_calc_parameters', None) is None:
m = {}
for p in list(CalcParameters.objects.all()):
m[p.name] = p.value
cls._calc_parameters = m
return cls._calc_parameters
class StaticDataService(object):
__metaclass__ = StaticDataDao
|
normal
|
{
"blob_id": "2e66a31638eb4e619f14a29d5d3847482d207003",
"index": 3996,
"step-1": "<mask token>\n\n\nclass StaticDataDao(type):\n\n @property\n def delivery_statuses(cls):\n if getattr(cls, '_delivery_statuses', None) is None:\n cls._delivery_statuses = list(DeliveryStatus.objects.all())\n return cls._delivery_statuses\n\n @property\n def calc_parameters(cls):\n if getattr(cls, '_calc_parameters', None) is None:\n m = {}\n for p in list(CalcParameters.objects.all()):\n m[p.name] = p.value\n cls._calc_parameters = m\n return cls._calc_parameters\n\n\nclass StaticDataService(object):\n __metaclass__ = StaticDataDao\n",
"step-2": "<mask token>\n\n\nclass DataService:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass StaticDataDao(type):\n\n @property\n def delivery_statuses(cls):\n if getattr(cls, '_delivery_statuses', None) is None:\n cls._delivery_statuses = list(DeliveryStatus.objects.all())\n return cls._delivery_statuses\n\n @property\n def calc_parameters(cls):\n if getattr(cls, '_calc_parameters', None) is None:\n m = {}\n for p in list(CalcParameters.objects.all()):\n m[p.name] = p.value\n cls._calc_parameters = m\n return cls._calc_parameters\n\n\nclass StaticDataService(object):\n __metaclass__ = StaticDataDao\n",
"step-3": "<mask token>\n\n\nclass DataService:\n\n def __init__(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n\nclass StaticDataDao(type):\n\n @property\n def delivery_statuses(cls):\n if getattr(cls, '_delivery_statuses', None) is None:\n cls._delivery_statuses = list(DeliveryStatus.objects.all())\n return cls._delivery_statuses\n\n @property\n def calc_parameters(cls):\n if getattr(cls, '_calc_parameters', None) is None:\n m = {}\n for p in list(CalcParameters.objects.all()):\n m[p.name] = p.value\n cls._calc_parameters = m\n return cls._calc_parameters\n\n\nclass StaticDataService(object):\n __metaclass__ = StaticDataDao\n",
"step-4": "<mask token>\n\n\nclass DataService:\n\n def __init__(self):\n pass\n\n @staticmethod\n def get_all_orders():\n orders = Order.objects.order_by('-order_date')\n for o in orders:\n o.package_names = ', '.join([p.name for p in list(o.packages.\n all())])\n o.delivery_date = o.deliveryinfo_set.get().delivery_date\n o.delivery_charge = o.deliveryinfo_set.get().charge\n return orders\n <mask token>\n <mask token>\n\n\nclass StaticDataDao(type):\n\n @property\n def delivery_statuses(cls):\n if getattr(cls, '_delivery_statuses', None) is None:\n cls._delivery_statuses = list(DeliveryStatus.objects.all())\n return cls._delivery_statuses\n\n @property\n def calc_parameters(cls):\n if getattr(cls, '_calc_parameters', None) is None:\n m = {}\n for p in list(CalcParameters.objects.all()):\n m[p.name] = p.value\n cls._calc_parameters = m\n return cls._calc_parameters\n\n\nclass StaticDataService(object):\n __metaclass__ = StaticDataDao\n",
"step-5": "from django.db import connection\n\nfrom .models import Order\nfrom .models import Package\nfrom .models import DeliveryStatus\nfrom .models import CalcParameters\n\n\nclass DataService:\n def __init__(self):\n pass\n\n @staticmethod\n def get_all_orders():\n orders = Order.objects.order_by('-order_date')\n # create new variables for display\n for o in orders:\n o.package_names = ', '.join([p.name for p in list(o.packages.all())])\n o.delivery_date = o.deliveryinfo_set.get().delivery_date\n o.delivery_charge = o.deliveryinfo_set.get().charge\n return orders\n\n @staticmethod\n def get_all_packages():\n return Package.objects.all()\n\n @staticmethod\n def get_shopping_list_details(order_ids, dish_ids=None):\n \"\"\"\n :param order_ids: a list of order ids as int or str. Or a single order id as int or str\n :param dish_ids: Restrict shopping list to these dishes.\n A list of dish ids as int or str. Or a single order id as int or str.\n :return: Return shopping list for the given orders\n \"\"\"\n if isinstance(order_ids, str):\n order_ids = [int(order_ids)]\n if isinstance(order_ids, int):\n order_ids = [order_ids]\n if not isinstance(order_ids, list):\n raise Exception('Expecting a single order id or a list of order ids. Got [{ids}]'.format(ids=order_ids))\n\n SQL = \"\"\"select\n d.id dish_id,\n d.name dish_name,\n sum(op.package_qty) dish_qty,\n sum(d.portion_count) portion_count,\n i.name ingredient_name,\n round(sum(di.ingredient_weight * op.package_qty), 2) total_ingredient_weight,\n round(sum(di.ingredient_weight * (i.cost_price/i.measure) * op.package_qty), 2) total_cost_price\n from\n orders o, order_package op, package_dish pd, dish d, dish_ingredient di, ingredient i\n where\n o.id = op.order_id and\n op.package_id = pd.package_id and\n pd.dish_id = d.id and\n d.id = di.dish_id and\n di.ingredient_id = i.id and\n o.id in ({ids})\n group by d.id,\td.name, i.name\n order by d.name, i.name\"\"\".format(ids=','.join([str(x) for x in order_ids]))\n\n with connection.cursor() as cursor:\n cursor.execute(SQL)\n rows = cursor.fetchall()\n\n # return a list of tuples rather than a tuple of tuples\n return [row for row in rows]\n\n\nclass StaticDataDao(type):\n @property\n def delivery_statuses(cls):\n if getattr(cls, '_delivery_statuses', None) is None:\n cls._delivery_statuses = list(DeliveryStatus.objects.all())\n return cls._delivery_statuses\n\n @property\n def calc_parameters(cls):\n if getattr(cls, '_calc_parameters', None) is None:\n m = {}\n for p in list(CalcParameters.objects.all()):\n m[p.name] = p.value\n cls._calc_parameters = m\n return cls._calc_parameters\n\n\nclass StaticDataService(object):\n __metaclass__ = StaticDataDao\n",
"step-ids": [
5,
6,
7,
8,
12
]
}
|
[
5,
6,
7,
8,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cv2.namedWindow('st', cv2.WINDOW_NORMAL)
cv2.imshow('st', img)
cv2.imwrite('mes.png', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
img = cv2.imread('d:\\st.jpg', 0)
cv2.namedWindow('st', cv2.WINDOW_NORMAL)
cv2.imshow('st', img)
cv2.imwrite('mes.png', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import tensorflow as tf
import cv2
img = cv2.imread('d:\\st.jpg', 0)
cv2.namedWindow('st', cv2.WINDOW_NORMAL)
cv2.imshow('st', img)
cv2.imwrite('mes.png', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import tensorflow as tf
import cv2
img=cv2.imread('d:\st.jpg',0)
cv2.namedWindow('st',cv2.WINDOW_NORMAL)#可以调整图像窗口大小
cv2.imshow('st',img)
cv2.imwrite('mes.png',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "6b5399effe73d27eade0381f016cd7819a6e104a",
"index": 2466,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.namedWindow('st', cv2.WINDOW_NORMAL)\ncv2.imshow('st', img)\ncv2.imwrite('mes.png', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimg = cv2.imread('d:\\\\st.jpg', 0)\ncv2.namedWindow('st', cv2.WINDOW_NORMAL)\ncv2.imshow('st', img)\ncv2.imwrite('mes.png', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import tensorflow as tf\nimport cv2\nimg = cv2.imread('d:\\\\st.jpg', 0)\ncv2.namedWindow('st', cv2.WINDOW_NORMAL)\ncv2.imshow('st', img)\ncv2.imwrite('mes.png', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": "import tensorflow as tf\r\nimport cv2 \r\nimg=cv2.imread('d:\\st.jpg',0)\r\ncv2.namedWindow('st',cv2.WINDOW_NORMAL)#可以调整图像窗口大小\r\ncv2.imshow('st',img)\r\ncv2.imwrite('mes.png',img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def RegularGramSchmidt():
while True:
vectors = utility.get_matrix_from_user(5)
if len(vectors) > 0:
calc = RGSC.RegularGramSchmidt()
result_matrix = calc.calc(vectors)
if result_matrix is not None:
print(result_matrix)
utility.print_if_matrix_is_basis(result_matrix)
answer = input('Start over? (Y/n)')
if answer.lower() == 'n':
break
else:
continue
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def RegularGramSchmidt():
while True:
vectors = utility.get_matrix_from_user(5)
if len(vectors) > 0:
calc = RGSC.RegularGramSchmidt()
result_matrix = calc.calc(vectors)
if result_matrix is not None:
print(result_matrix)
utility.print_if_matrix_is_basis(result_matrix)
answer = input('Start over? (Y/n)')
if answer.lower() == 'n':
break
else:
continue
def CustomGramSchmidt():
while True:
print('Enter the inner product matrix 3x3')
inner_product_matrix = utility.get_matrix_from_user(3, True)
calc = CGSC.CustomGramSchmidt(inner_product_matrix)
print('Enter vectors from R(3)')
vectors = utility.get_matrix_from_user(3)
if len(vectors) > 0:
result_matrix = calc.calc(vectors)
if result_matrix is not None:
print(result_matrix)
utility.print_if_matrix_is_basis(result_matrix)
answer = input('Start over? (Y/n)')
if answer.lower() == 'n':
break
else:
continue
<|reserved_special_token_1|>
from . import utility
from . import regular_gram_schmidt as RGSC
from . import custom_gram_schmidt as CGSC
def RegularGramSchmidt():
while True:
vectors = utility.get_matrix_from_user(5)
if len(vectors) > 0:
calc = RGSC.RegularGramSchmidt()
result_matrix = calc.calc(vectors)
if result_matrix is not None:
print(result_matrix)
utility.print_if_matrix_is_basis(result_matrix)
answer = input('Start over? (Y/n)')
if answer.lower() == 'n':
break
else:
continue
def CustomGramSchmidt():
while True:
print('Enter the inner product matrix 3x3')
inner_product_matrix = utility.get_matrix_from_user(3, True)
calc = CGSC.CustomGramSchmidt(inner_product_matrix)
print('Enter vectors from R(3)')
vectors = utility.get_matrix_from_user(3)
if len(vectors) > 0:
result_matrix = calc.calc(vectors)
if result_matrix is not None:
print(result_matrix)
utility.print_if_matrix_is_basis(result_matrix)
answer = input('Start over? (Y/n)')
if answer.lower() == 'n':
break
else:
continue
<|reserved_special_token_1|>
from . import utility
from . import regular_gram_schmidt as RGSC
from . import custom_gram_schmidt as CGSC
def RegularGramSchmidt():
while True:
vectors = utility.get_matrix_from_user(5)
if len(vectors) > 0:
calc = RGSC.RegularGramSchmidt()
result_matrix = calc.calc(vectors)
if result_matrix is not None:
print(result_matrix)
utility.print_if_matrix_is_basis(result_matrix)
answer = input("Start over? (Y/n)")
if answer.lower() == 'n':
break
else:
continue
def CustomGramSchmidt():
while True:
print("Enter the inner product matrix 3x3")
inner_product_matrix = utility.get_matrix_from_user(3, True)
calc = CGSC.CustomGramSchmidt(inner_product_matrix)
print("Enter vectors from R(3)")
vectors = utility.get_matrix_from_user(3)
if len(vectors) > 0:
result_matrix = calc.calc(vectors)
if result_matrix is not None:
print(result_matrix)
utility.print_if_matrix_is_basis(result_matrix)
answer = input("Start over? (Y/n)")
if answer.lower() == 'n':
break
else:
continue
|
flexible
|
{
"blob_id": "b6b3d94db62b47aac9bf78e8224a38ccff9335e3",
"index": 7591,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef RegularGramSchmidt():\n while True:\n vectors = utility.get_matrix_from_user(5)\n if len(vectors) > 0:\n calc = RGSC.RegularGramSchmidt()\n result_matrix = calc.calc(vectors)\n if result_matrix is not None:\n print(result_matrix)\n utility.print_if_matrix_is_basis(result_matrix)\n answer = input('Start over? (Y/n)')\n if answer.lower() == 'n':\n break\n else:\n continue\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef RegularGramSchmidt():\n while True:\n vectors = utility.get_matrix_from_user(5)\n if len(vectors) > 0:\n calc = RGSC.RegularGramSchmidt()\n result_matrix = calc.calc(vectors)\n if result_matrix is not None:\n print(result_matrix)\n utility.print_if_matrix_is_basis(result_matrix)\n answer = input('Start over? (Y/n)')\n if answer.lower() == 'n':\n break\n else:\n continue\n\n\ndef CustomGramSchmidt():\n while True:\n print('Enter the inner product matrix 3x3')\n inner_product_matrix = utility.get_matrix_from_user(3, True)\n calc = CGSC.CustomGramSchmidt(inner_product_matrix)\n print('Enter vectors from R(3)')\n vectors = utility.get_matrix_from_user(3)\n if len(vectors) > 0:\n result_matrix = calc.calc(vectors)\n if result_matrix is not None:\n print(result_matrix)\n utility.print_if_matrix_is_basis(result_matrix)\n answer = input('Start over? (Y/n)')\n if answer.lower() == 'n':\n break\n else:\n continue\n",
"step-4": "from . import utility\nfrom . import regular_gram_schmidt as RGSC\nfrom . import custom_gram_schmidt as CGSC\n\n\ndef RegularGramSchmidt():\n while True:\n vectors = utility.get_matrix_from_user(5)\n if len(vectors) > 0:\n calc = RGSC.RegularGramSchmidt()\n result_matrix = calc.calc(vectors)\n if result_matrix is not None:\n print(result_matrix)\n utility.print_if_matrix_is_basis(result_matrix)\n answer = input('Start over? (Y/n)')\n if answer.lower() == 'n':\n break\n else:\n continue\n\n\ndef CustomGramSchmidt():\n while True:\n print('Enter the inner product matrix 3x3')\n inner_product_matrix = utility.get_matrix_from_user(3, True)\n calc = CGSC.CustomGramSchmidt(inner_product_matrix)\n print('Enter vectors from R(3)')\n vectors = utility.get_matrix_from_user(3)\n if len(vectors) > 0:\n result_matrix = calc.calc(vectors)\n if result_matrix is not None:\n print(result_matrix)\n utility.print_if_matrix_is_basis(result_matrix)\n answer = input('Start over? (Y/n)')\n if answer.lower() == 'n':\n break\n else:\n continue\n",
"step-5": "from . import utility\nfrom . import regular_gram_schmidt as RGSC\nfrom . import custom_gram_schmidt as CGSC\n\n\ndef RegularGramSchmidt():\n while True:\n vectors = utility.get_matrix_from_user(5)\n if len(vectors) > 0:\n calc = RGSC.RegularGramSchmidt()\n result_matrix = calc.calc(vectors)\n if result_matrix is not None:\n print(result_matrix)\n utility.print_if_matrix_is_basis(result_matrix)\n answer = input(\"Start over? (Y/n)\")\n if answer.lower() == 'n':\n break\n else:\n continue\n\n\ndef CustomGramSchmidt():\n while True:\n print(\"Enter the inner product matrix 3x3\")\n inner_product_matrix = utility.get_matrix_from_user(3, True)\n calc = CGSC.CustomGramSchmidt(inner_product_matrix)\n print(\"Enter vectors from R(3)\")\n vectors = utility.get_matrix_from_user(3)\n if len(vectors) > 0:\n result_matrix = calc.calc(vectors)\n if result_matrix is not None:\n print(result_matrix)\n utility.print_if_matrix_is_basis(result_matrix)\n answer = input(\"Start over? (Y/n)\")\n if answer.lower() == 'n':\n break\n else:\n continue\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import retro # pip install gym-retro
import numpy as np # pip install numpy
#import cv2 # pip install opencv-python
import neat # pip install neat-python
import pickle # pip install cloudpickle
import os
import multiprocessing
import cv2
import time
env = retro.make(game='Pong-Atari2600')
def eval_genome(genome, config):
net = neat.nn.FeedForwardNetwork.create(genome, config)
env.reset()
ob, _, _, _ = env.step(env.action_space.sample())
inx = int(ob.shape[0]/8)
iny = int(ob.shape[1]/8)
fitnesses = []
score1=0
score2=0
# Run the given simulation for up to num_steps time steps.
fitness = 0.0
done = False
start_time=time.time()
series_of_keys=[]
series_of_nnOut=[]
while not done:
env.render()
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
imgarray = np.ndarray.flatten(ob)
imgarray = np.interp(imgarray, (0, 254), (-1, +1))
nnOut = net.activate(imgarray)
for o in nnOut:
if o > 0.:
keys = [1, 0]
else:
keys = [0, 1]
actions=[0]*4+keys+[0]*2
series_of_keys.append(keys)
series_of_nnOut.append(nnOut)
ob, rew, done, info = env.step(actions)
score1=info['score1']
score2=info['score2']
if score1 >19 or score2 >19:
done = True
print(series_of_keys)
# print(series_of_actions)
run_time=time.time()-start_time
fitness=score2-score1/(run_time-2)
return fitness
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run():
# Load the config file, which is assumed to live in
# the same directory as this script.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'pong_config')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pop.add_reporter(neat.StdOutReporter(True))
pe = neat.ParallelEvaluator(10, eval_genome)
winner = pop.run(pe.evaluate)
# Save the winner.
with open('winner-feedforward', 'wb') as f:
pickle.dump(winner, f)
print(winner)
visualize.plot_stats(stats, ylog=True, view=True, filename="feedforward-fitness.svg")
visualize.plot_species(stats, view=True, filename="feedforward-speciation.svg")
node_names = {-1: 'x', -2: 'dx', -3: 'theta', -4: 'dtheta', 0: 'control'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename="winner-feedforward.gv")
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename="winner-feedforward-enabled.gv", show_disabled=False)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename="winner-feedforward-enabled-pruned.gv", show_disabled=False, prune_unused=True)
if __name__ == '__main__':
run()
|
normal
|
{
"blob_id": "36e350e0d578e169efaafb9e311566d71d6bc59e",
"index": 1438,
"step-1": "<mask token>\n\n\ndef eval_genome(genome, config):\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n env.reset()\n ob, _, _, _ = env.step(env.action_space.sample())\n inx = int(ob.shape[0] / 8)\n iny = int(ob.shape[1] / 8)\n fitnesses = []\n score1 = 0\n score2 = 0\n fitness = 0.0\n done = False\n start_time = time.time()\n series_of_keys = []\n series_of_nnOut = []\n while not done:\n env.render()\n ob = cv2.resize(ob, (inx, iny))\n ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)\n ob = np.reshape(ob, (inx, iny))\n imgarray = np.ndarray.flatten(ob)\n imgarray = np.interp(imgarray, (0, 254), (-1, +1))\n nnOut = net.activate(imgarray)\n for o in nnOut:\n if o > 0.0:\n keys = [1, 0]\n else:\n keys = [0, 1]\n actions = [0] * 4 + keys + [0] * 2\n series_of_keys.append(keys)\n series_of_nnOut.append(nnOut)\n ob, rew, done, info = env.step(actions)\n score1 = info['score1']\n score2 = info['score2']\n if score1 > 19 or score2 > 19:\n done = True\n print(series_of_keys)\n run_time = time.time() - start_time\n fitness = score2 - score1 / (run_time - 2)\n return fitness\n\n\ndef eval_genomes(genomes, config):\n for genome_id, genome in genomes:\n genome.fitness = eval_genome(genome, config)\n\n\ndef run():\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'pong_config')\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat\n .DefaultSpeciesSet, neat.DefaultStagnation, config_path)\n pop = neat.Population(config)\n stats = neat.StatisticsReporter()\n pop.add_reporter(stats)\n pop.add_reporter(neat.StdOutReporter(True))\n pe = neat.ParallelEvaluator(10, eval_genome)\n winner = pop.run(pe.evaluate)\n with open('winner-feedforward', 'wb') as f:\n pickle.dump(winner, f)\n print(winner)\n visualize.plot_stats(stats, ylog=True, view=True, filename=\n 'feedforward-fitness.svg')\n visualize.plot_species(stats, view=True, filename=\n 'feedforward-speciation.svg')\n node_names = {(-1): 'x', (-2): 'dx', (-3): 'theta', (-4): 'dtheta', (0):\n 'control'}\n visualize.draw_net(config, winner, True, node_names=node_names)\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward.gv')\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward-enabled.gv', show_disabled=False)\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward-enabled-pruned.gv', show_disabled=\n False, prune_unused=True)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef eval_genome(genome, config):\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n env.reset()\n ob, _, _, _ = env.step(env.action_space.sample())\n inx = int(ob.shape[0] / 8)\n iny = int(ob.shape[1] / 8)\n fitnesses = []\n score1 = 0\n score2 = 0\n fitness = 0.0\n done = False\n start_time = time.time()\n series_of_keys = []\n series_of_nnOut = []\n while not done:\n env.render()\n ob = cv2.resize(ob, (inx, iny))\n ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)\n ob = np.reshape(ob, (inx, iny))\n imgarray = np.ndarray.flatten(ob)\n imgarray = np.interp(imgarray, (0, 254), (-1, +1))\n nnOut = net.activate(imgarray)\n for o in nnOut:\n if o > 0.0:\n keys = [1, 0]\n else:\n keys = [0, 1]\n actions = [0] * 4 + keys + [0] * 2\n series_of_keys.append(keys)\n series_of_nnOut.append(nnOut)\n ob, rew, done, info = env.step(actions)\n score1 = info['score1']\n score2 = info['score2']\n if score1 > 19 or score2 > 19:\n done = True\n print(series_of_keys)\n run_time = time.time() - start_time\n fitness = score2 - score1 / (run_time - 2)\n return fitness\n\n\ndef eval_genomes(genomes, config):\n for genome_id, genome in genomes:\n genome.fitness = eval_genome(genome, config)\n\n\ndef run():\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'pong_config')\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat\n .DefaultSpeciesSet, neat.DefaultStagnation, config_path)\n pop = neat.Population(config)\n stats = neat.StatisticsReporter()\n pop.add_reporter(stats)\n pop.add_reporter(neat.StdOutReporter(True))\n pe = neat.ParallelEvaluator(10, eval_genome)\n winner = pop.run(pe.evaluate)\n with open('winner-feedforward', 'wb') as f:\n pickle.dump(winner, f)\n print(winner)\n visualize.plot_stats(stats, ylog=True, view=True, filename=\n 'feedforward-fitness.svg')\n visualize.plot_species(stats, view=True, filename=\n 'feedforward-speciation.svg')\n node_names = {(-1): 'x', (-2): 'dx', (-3): 'theta', (-4): 'dtheta', (0):\n 'control'}\n visualize.draw_net(config, winner, True, node_names=node_names)\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward.gv')\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward-enabled.gv', show_disabled=False)\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward-enabled-pruned.gv', show_disabled=\n False, prune_unused=True)\n\n\nif __name__ == '__main__':\n run()\n",
"step-3": "<mask token>\nenv = retro.make(game='Pong-Atari2600')\n\n\ndef eval_genome(genome, config):\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n env.reset()\n ob, _, _, _ = env.step(env.action_space.sample())\n inx = int(ob.shape[0] / 8)\n iny = int(ob.shape[1] / 8)\n fitnesses = []\n score1 = 0\n score2 = 0\n fitness = 0.0\n done = False\n start_time = time.time()\n series_of_keys = []\n series_of_nnOut = []\n while not done:\n env.render()\n ob = cv2.resize(ob, (inx, iny))\n ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)\n ob = np.reshape(ob, (inx, iny))\n imgarray = np.ndarray.flatten(ob)\n imgarray = np.interp(imgarray, (0, 254), (-1, +1))\n nnOut = net.activate(imgarray)\n for o in nnOut:\n if o > 0.0:\n keys = [1, 0]\n else:\n keys = [0, 1]\n actions = [0] * 4 + keys + [0] * 2\n series_of_keys.append(keys)\n series_of_nnOut.append(nnOut)\n ob, rew, done, info = env.step(actions)\n score1 = info['score1']\n score2 = info['score2']\n if score1 > 19 or score2 > 19:\n done = True\n print(series_of_keys)\n run_time = time.time() - start_time\n fitness = score2 - score1 / (run_time - 2)\n return fitness\n\n\ndef eval_genomes(genomes, config):\n for genome_id, genome in genomes:\n genome.fitness = eval_genome(genome, config)\n\n\ndef run():\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'pong_config')\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat\n .DefaultSpeciesSet, neat.DefaultStagnation, config_path)\n pop = neat.Population(config)\n stats = neat.StatisticsReporter()\n pop.add_reporter(stats)\n pop.add_reporter(neat.StdOutReporter(True))\n pe = neat.ParallelEvaluator(10, eval_genome)\n winner = pop.run(pe.evaluate)\n with open('winner-feedforward', 'wb') as f:\n pickle.dump(winner, f)\n print(winner)\n visualize.plot_stats(stats, ylog=True, view=True, filename=\n 'feedforward-fitness.svg')\n visualize.plot_species(stats, view=True, filename=\n 'feedforward-speciation.svg')\n node_names = {(-1): 'x', (-2): 'dx', (-3): 'theta', (-4): 'dtheta', (0):\n 'control'}\n visualize.draw_net(config, winner, True, node_names=node_names)\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward.gv')\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward-enabled.gv', show_disabled=False)\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward-enabled-pruned.gv', show_disabled=\n False, prune_unused=True)\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "import retro\nimport numpy as np\nimport neat\nimport pickle\nimport os\nimport multiprocessing\nimport cv2\nimport time\nenv = retro.make(game='Pong-Atari2600')\n\n\ndef eval_genome(genome, config):\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n env.reset()\n ob, _, _, _ = env.step(env.action_space.sample())\n inx = int(ob.shape[0] / 8)\n iny = int(ob.shape[1] / 8)\n fitnesses = []\n score1 = 0\n score2 = 0\n fitness = 0.0\n done = False\n start_time = time.time()\n series_of_keys = []\n series_of_nnOut = []\n while not done:\n env.render()\n ob = cv2.resize(ob, (inx, iny))\n ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)\n ob = np.reshape(ob, (inx, iny))\n imgarray = np.ndarray.flatten(ob)\n imgarray = np.interp(imgarray, (0, 254), (-1, +1))\n nnOut = net.activate(imgarray)\n for o in nnOut:\n if o > 0.0:\n keys = [1, 0]\n else:\n keys = [0, 1]\n actions = [0] * 4 + keys + [0] * 2\n series_of_keys.append(keys)\n series_of_nnOut.append(nnOut)\n ob, rew, done, info = env.step(actions)\n score1 = info['score1']\n score2 = info['score2']\n if score1 > 19 or score2 > 19:\n done = True\n print(series_of_keys)\n run_time = time.time() - start_time\n fitness = score2 - score1 / (run_time - 2)\n return fitness\n\n\ndef eval_genomes(genomes, config):\n for genome_id, genome in genomes:\n genome.fitness = eval_genome(genome, config)\n\n\ndef run():\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'pong_config')\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat\n .DefaultSpeciesSet, neat.DefaultStagnation, config_path)\n pop = neat.Population(config)\n stats = neat.StatisticsReporter()\n pop.add_reporter(stats)\n pop.add_reporter(neat.StdOutReporter(True))\n pe = neat.ParallelEvaluator(10, eval_genome)\n winner = pop.run(pe.evaluate)\n with open('winner-feedforward', 'wb') as f:\n pickle.dump(winner, f)\n print(winner)\n visualize.plot_stats(stats, ylog=True, view=True, filename=\n 'feedforward-fitness.svg')\n visualize.plot_species(stats, view=True, filename=\n 'feedforward-speciation.svg')\n node_names = {(-1): 'x', (-2): 'dx', (-3): 'theta', (-4): 'dtheta', (0):\n 'control'}\n visualize.draw_net(config, winner, True, node_names=node_names)\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward.gv')\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward-enabled.gv', show_disabled=False)\n visualize.draw_net(config, winner, view=True, node_names=node_names,\n filename='winner-feedforward-enabled-pruned.gv', show_disabled=\n False, prune_unused=True)\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": "import retro # pip install gym-retro\r\nimport numpy as np # pip install numpy\r\n#import cv2 # pip install opencv-python\r\nimport neat # pip install neat-python\r\nimport pickle # pip install cloudpickle\r\nimport os\r\nimport multiprocessing\r\nimport cv2\r\nimport time\r\n\r\nenv = retro.make(game='Pong-Atari2600')\r\n\r\n\r\n\r\ndef eval_genome(genome, config):\r\n net = neat.nn.FeedForwardNetwork.create(genome, config)\r\n\r\n\r\n env.reset()\r\n ob, _, _, _ = env.step(env.action_space.sample())\r\n inx = int(ob.shape[0]/8)\r\n iny = int(ob.shape[1]/8)\r\n fitnesses = []\r\n\r\n\r\n score1=0\r\n score2=0\r\n # Run the given simulation for up to num_steps time steps.\r\n fitness = 0.0\r\n done = False\r\n start_time=time.time()\r\n series_of_keys=[]\r\n series_of_nnOut=[]\r\n while not done:\r\n env.render()\r\n\r\n\r\n ob = cv2.resize(ob, (inx, iny))\r\n ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)\r\n ob = np.reshape(ob, (inx, iny))\r\n imgarray = np.ndarray.flatten(ob)\r\n imgarray = np.interp(imgarray, (0, 254), (-1, +1))\r\n nnOut = net.activate(imgarray)\r\n\r\n\r\n for o in nnOut:\r\n if o > 0.:\r\n keys = [1, 0]\r\n else:\r\n keys = [0, 1]\r\n actions=[0]*4+keys+[0]*2\r\n\r\n series_of_keys.append(keys)\r\n series_of_nnOut.append(nnOut)\r\n\r\n ob, rew, done, info = env.step(actions)\r\n\r\n score1=info['score1']\r\n score2=info['score2']\r\n\r\n\r\n if score1 >19 or score2 >19:\r\n done = True\r\n\r\n print(series_of_keys)\r\n# print(series_of_actions)\r\n run_time=time.time()-start_time\r\n\r\n fitness=score2-score1/(run_time-2)\r\n return fitness\r\n\r\ndef eval_genomes(genomes, config):\r\n for genome_id, genome in genomes:\r\n genome.fitness = eval_genome(genome, config)\r\n\r\n\r\ndef run():\r\n # Load the config file, which is assumed to live in\r\n # the same directory as this script.\r\n local_dir = os.path.dirname(__file__)\r\n config_path = os.path.join(local_dir, 'pong_config')\r\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\r\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\r\n config_path)\r\n\r\n pop = neat.Population(config)\r\n stats = neat.StatisticsReporter()\r\n pop.add_reporter(stats)\r\n pop.add_reporter(neat.StdOutReporter(True))\r\n\r\n pe = neat.ParallelEvaluator(10, eval_genome)\r\n winner = pop.run(pe.evaluate)\r\n\r\n # Save the winner.\r\n with open('winner-feedforward', 'wb') as f:\r\n pickle.dump(winner, f)\r\n\r\n print(winner)\r\n\r\n visualize.plot_stats(stats, ylog=True, view=True, filename=\"feedforward-fitness.svg\")\r\n visualize.plot_species(stats, view=True, filename=\"feedforward-speciation.svg\")\r\n\r\n node_names = {-1: 'x', -2: 'dx', -3: 'theta', -4: 'dtheta', 0: 'control'}\r\n visualize.draw_net(config, winner, True, node_names=node_names)\r\n\r\n visualize.draw_net(config, winner, view=True, node_names=node_names,\r\n filename=\"winner-feedforward.gv\")\r\n visualize.draw_net(config, winner, view=True, node_names=node_names,\r\n filename=\"winner-feedforward-enabled.gv\", show_disabled=False)\r\n visualize.draw_net(config, winner, view=True, node_names=node_names,\r\n filename=\"winner-feedforward-enabled-pruned.gv\", show_disabled=False, prune_unused=True)\r\n\r\n\r\nif __name__ == '__main__':\r\n run()\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
def main():
# defaults to 0
print a
a = 7
a *= 6
print a
|
normal
|
{
"blob_id": "0527dc2b6fa0fe703b604c6e28fba44fe6def83b",
"index": 1862,
"step-1": "def main():\n # defaults to 0\n print a\n\n a = 7\n a *= 6\n print a\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
DATABASE_NAME = "user_db"
|
normal
|
{
"blob_id": "8c8bbbc682889c8d79c893f27def76ad70e8bf8d",
"index": 233,
"step-1": "<mask token>\n",
"step-2": "DATABASE_NAME = 'user_db'\n",
"step-3": "DATABASE_NAME = \"user_db\"",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python3
"""Shows how to call C code from python"""
__appname__ = "myccalc.py"
__author__ = "Joseph Palmer <[email protected]>"
__version__ = "0.0.1"
__license__ = "License for this code/"
__date__ = "Dec-2018"
## imports ##
import os
import ctypes
# Load the C library into python - needs the full path for some reason!
so_filepath = "{}/libmycalc.so".format(os.getcwd())
ctypes.cdll.LoadLibrary(so_filepath)
myccalc = ctypes.CDLL(so_filepath)
# make a simpler name for the mycalc.add_floats
add_floats = myccalc.add_floats
# tell python what variables this function takes & returns
add_floats.argtypes = [ctypes.c_float, ctypes.c_float]
add_floats.restype = ctypes.c_float
# the function can now be used
x = 1.2
y = 3.3
a = add_floats(x, y)
print("The sum of %.1f and %.1f is %.1f" % (x, y, a))
# we can do the same for others
sf = myccalc.subtract_floats
sf.argtypes = [ctypes.c_float, ctypes.c_float]
sf.restype = ctypes.c_float
b = sf(y, x)
print("Subtracting %.1f from %.1f is %.1f" % (x, y, b))
|
normal
|
{
"blob_id": "12ecfd2750f79fd19355665b6e57c2103a3cac3e",
"index": 4257,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nctypes.cdll.LoadLibrary(so_filepath)\n<mask token>\nprint('The sum of %.1f and %.1f is %.1f' % (x, y, a))\n<mask token>\nprint('Subtracting %.1f from %.1f is %.1f' % (x, y, b))\n",
"step-3": "<mask token>\n__appname__ = 'myccalc.py'\n__author__ = 'Joseph Palmer <[email protected]>'\n__version__ = '0.0.1'\n__license__ = 'License for this code/'\n__date__ = 'Dec-2018'\n<mask token>\nso_filepath = '{}/libmycalc.so'.format(os.getcwd())\nctypes.cdll.LoadLibrary(so_filepath)\nmyccalc = ctypes.CDLL(so_filepath)\nadd_floats = myccalc.add_floats\nadd_floats.argtypes = [ctypes.c_float, ctypes.c_float]\nadd_floats.restype = ctypes.c_float\nx = 1.2\ny = 3.3\na = add_floats(x, y)\nprint('The sum of %.1f and %.1f is %.1f' % (x, y, a))\nsf = myccalc.subtract_floats\nsf.argtypes = [ctypes.c_float, ctypes.c_float]\nsf.restype = ctypes.c_float\nb = sf(y, x)\nprint('Subtracting %.1f from %.1f is %.1f' % (x, y, b))\n",
"step-4": "<mask token>\n__appname__ = 'myccalc.py'\n__author__ = 'Joseph Palmer <[email protected]>'\n__version__ = '0.0.1'\n__license__ = 'License for this code/'\n__date__ = 'Dec-2018'\nimport os\nimport ctypes\nso_filepath = '{}/libmycalc.so'.format(os.getcwd())\nctypes.cdll.LoadLibrary(so_filepath)\nmyccalc = ctypes.CDLL(so_filepath)\nadd_floats = myccalc.add_floats\nadd_floats.argtypes = [ctypes.c_float, ctypes.c_float]\nadd_floats.restype = ctypes.c_float\nx = 1.2\ny = 3.3\na = add_floats(x, y)\nprint('The sum of %.1f and %.1f is %.1f' % (x, y, a))\nsf = myccalc.subtract_floats\nsf.argtypes = [ctypes.c_float, ctypes.c_float]\nsf.restype = ctypes.c_float\nb = sf(y, x)\nprint('Subtracting %.1f from %.1f is %.1f' % (x, y, b))\n",
"step-5": "#!/usr/bin/env python3\n\"\"\"Shows how to call C code from python\"\"\"\n__appname__ = \"myccalc.py\"\n__author__ = \"Joseph Palmer <[email protected]>\"\n__version__ = \"0.0.1\"\n__license__ = \"License for this code/\"\n__date__ = \"Dec-2018\"\n\n## imports ##\nimport os\nimport ctypes\n\n# Load the C library into python - needs the full path for some reason!\nso_filepath = \"{}/libmycalc.so\".format(os.getcwd())\nctypes.cdll.LoadLibrary(so_filepath)\nmyccalc = ctypes.CDLL(so_filepath)\n\n# make a simpler name for the mycalc.add_floats\nadd_floats = myccalc.add_floats\n\n# tell python what variables this function takes & returns\nadd_floats.argtypes = [ctypes.c_float, ctypes.c_float]\nadd_floats.restype = ctypes.c_float\n\n# the function can now be used\nx = 1.2\ny = 3.3\na = add_floats(x, y)\nprint(\"The sum of %.1f and %.1f is %.1f\" % (x, y, a))\n\n# we can do the same for others\nsf = myccalc.subtract_floats\nsf.argtypes = [ctypes.c_float, ctypes.c_float]\nsf.restype = ctypes.c_float\nb = sf(y, x)\nprint(\"Subtracting %.1f from %.1f is %.1f\" % (x, y, b))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os, sys
import json
import paramiko
"""
Copies the credentials.json file locally from robot
"""
def copy_credentials_file(hostname, username, password, src_path, dst_path):
# create ssh connection
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=hostname, username=username, password=password)
# ftp file from robot to local path
ftp_client = ssh_client.open_sftp()
ftp_client.get(src_path, dst_path)
ftp_client.close()
"""
Creates a default config file for AWS
(aws_config.json)
"""
def create_default_config(path):
data = {}
data['method'] = 'GET'
data['service'] = 'ec2'
data['host'] = 'ec2.amazonaws.com'
data['region'] = 'us-east-1'
data['endpoint'] = 'https://ec2.amazonaws.com'
with open(path, 'w+') as file:
json.dump(data, file)
"""
Checks for the aws_config.json file,
creates the file and populates with default values
if not found.
"""
def check_aws_config():
config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')
if not os.path.exists(config_path):
print("\nCreating default AWS config...")
create_default_config(config_path)
print("Done.\n")
return config_path
"""
Checks for the credentials.json file,
creates the file and populates with values from
robot if not found.
"""
def check_credentials():
login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')
login_data = load_json(login_file)
robot_name = login_data['robot_name']
username = login_data['username']
password = login_data['password']
src_path = '/var/jibo/credentials.json'
dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')
if not os.path.exists(dst_path):
print("\nGrabbing AWS credentials from robot...")
copy_credentials_file(robot_name, username, password, src_path, dst_path)
print("Done.\n")
return dst_path
"""
Reads and returns contents of JSON file
"""
def load_json(path):
with open(path, 'r') as file:
data = json.load(file)
return data
|
normal
|
{
"blob_id": "27f162f2e350fdb284740bd67f4293535f0ab593",
"index": 8451,
"step-1": "<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef create_default_config(path):\n data = {}\n data['method'] = 'GET'\n data['service'] = 'ec2'\n data['host'] = 'ec2.amazonaws.com'\n data['region'] = 'us-east-1'\n data['endpoint'] = 'https://ec2.amazonaws.com'\n with open(path, 'w+') as file:\n json.dump(data, file)\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef create_default_config(path):\n data = {}\n data['method'] = 'GET'\n data['service'] = 'ec2'\n data['host'] = 'ec2.amazonaws.com'\n data['region'] = 'us-east-1'\n data['endpoint'] = 'https://ec2.amazonaws.com'\n with open(path, 'w+') as file:\n json.dump(data, file)\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n\n\ndef load_json(path):\n with open(path, 'r') as file:\n data = json.load(file)\n return data\n",
"step-4": "import os, sys\nimport json\nimport paramiko\n<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef create_default_config(path):\n data = {}\n data['method'] = 'GET'\n data['service'] = 'ec2'\n data['host'] = 'ec2.amazonaws.com'\n data['region'] = 'us-east-1'\n data['endpoint'] = 'https://ec2.amazonaws.com'\n with open(path, 'w+') as file:\n json.dump(data, file)\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n\n\ndef load_json(path):\n with open(path, 'r') as file:\n data = json.load(file)\n return data\n",
"step-5": "import os, sys\nimport json\nimport paramiko\n\n\"\"\"\n\tCopies the credentials.json file locally from robot\n\"\"\"\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n\t# create ssh connection\n\tssh_client = paramiko.SSHClient()\n\tssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\tssh_client.connect(hostname=hostname, username=username, password=password)\n\n\t# ftp file from robot to local path\n\tftp_client = ssh_client.open_sftp()\n\tftp_client.get(src_path, dst_path)\n\tftp_client.close()\n\n\n\"\"\"\n\tCreates a default config file for AWS\n\t(aws_config.json)\n\"\"\"\ndef create_default_config(path):\n\tdata = {}\n\tdata['method'] = 'GET'\n\tdata['service'] = 'ec2'\n\tdata['host'] = 'ec2.amazonaws.com'\n\tdata['region'] = 'us-east-1'\n\tdata['endpoint'] = 'https://ec2.amazonaws.com'\n\n\twith open(path, 'w+') as file:\n\t\tjson.dump(data, file)\n\n\n\"\"\"\n\tChecks for the aws_config.json file,\n\tcreates the file and populates with default values\n\tif not found.\n\"\"\"\ndef check_aws_config():\n\tconfig_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n\n\tif not os.path.exists(config_path):\n\t\tprint(\"\\nCreating default AWS config...\")\n\t\tcreate_default_config(config_path)\n\t\tprint(\"Done.\\n\")\n\n\treturn config_path\n\n\n\"\"\"\n\tChecks for the credentials.json file,\n\tcreates the file and populates with values from\n\trobot if not found.\n\"\"\"\ndef check_credentials():\n\tlogin_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n\tlogin_data = load_json(login_file)\n\n\trobot_name = login_data['robot_name']\n\tusername = login_data['username']\n\tpassword = login_data['password']\n\t\n\tsrc_path = '/var/jibo/credentials.json'\n\tdst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n\n\tif not os.path.exists(dst_path):\n\t\tprint(\"\\nGrabbing AWS credentials from robot...\")\n\t\tcopy_credentials_file(robot_name, username, password, src_path, dst_path)\n\t\tprint(\"Done.\\n\")\n\n\treturn dst_path\n\n\n\"\"\"\n\tReads and returns contents of JSON file\n\"\"\"\ndef load_json(path):\n\twith open(path, 'r') as file:\n\t\tdata = json.load(file)\n\treturn data\n\n\t",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
TRAIN_INPUT_PATH = '~/Projects/competitions/titanic/data/train.csv'
TEST_INPUT_PATH = '~/Projects/competitions/titanic/data/test.csv'
OUTPUT_PATH = 'output/'
TRAIN_VAL_SPLIT = 0.75
RANDOM_SEED = 42
MODEL = 'LOGISTIC_REGRESSION'
LOG_PATH = 'logs/'
<|reserved_special_token_1|>
TRAIN_INPUT_PATH = "~/Projects/competitions/titanic/data/train.csv"
TEST_INPUT_PATH = "~/Projects/competitions/titanic/data/test.csv"
OUTPUT_PATH = "output/"
TRAIN_VAL_SPLIT = 0.75
RANDOM_SEED = 42
MODEL = "LOGISTIC_REGRESSION"
LOG_PATH = "logs/"
|
flexible
|
{
"blob_id": "f1f708f00e05941c9a18a24b9a7556558583c3c7",
"index": 738,
"step-1": "<mask token>\n",
"step-2": "TRAIN_INPUT_PATH = '~/Projects/competitions/titanic/data/train.csv'\nTEST_INPUT_PATH = '~/Projects/competitions/titanic/data/test.csv'\nOUTPUT_PATH = 'output/'\nTRAIN_VAL_SPLIT = 0.75\nRANDOM_SEED = 42\nMODEL = 'LOGISTIC_REGRESSION'\nLOG_PATH = 'logs/'\n",
"step-3": "TRAIN_INPUT_PATH = \"~/Projects/competitions/titanic/data/train.csv\"\nTEST_INPUT_PATH = \"~/Projects/competitions/titanic/data/test.csv\"\nOUTPUT_PATH = \"output/\"\nTRAIN_VAL_SPLIT = 0.75\nRANDOM_SEED = 42\nMODEL = \"LOGISTIC_REGRESSION\"\nLOG_PATH = \"logs/\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@contextmanager
def temp_context() ->Generator[Path, None, None]:
origin = Path().absolute()
try:
with tempfile.TemporaryDirectory(ignore_cleanup_errors=True
) as tempdir:
os.chdir(tempdir)
yield origin
finally:
os.chdir(origin)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Generator
@contextmanager
def temp_context() ->Generator[Path, None, None]:
origin = Path().absolute()
try:
with tempfile.TemporaryDirectory(ignore_cleanup_errors=True
) as tempdir:
os.chdir(tempdir)
yield origin
finally:
os.chdir(origin)
<|reserved_special_token_1|>
"""
opsi-utils
Test utilities
"""
import os
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Generator
@contextmanager
def temp_context() -> Generator[Path, None, None]:
origin = Path().absolute()
try:
with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tempdir:
os.chdir(tempdir)
yield origin # return original path
finally:
os.chdir(origin)
|
flexible
|
{
"blob_id": "3c2a611fd001f145703853f5ecfe70d0e93844e4",
"index": 4665,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@contextmanager\ndef temp_context() ->Generator[Path, None, None]:\n origin = Path().absolute()\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True\n ) as tempdir:\n os.chdir(tempdir)\n yield origin\n finally:\n os.chdir(origin)\n",
"step-3": "<mask token>\nimport os\nimport tempfile\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Generator\n\n\n@contextmanager\ndef temp_context() ->Generator[Path, None, None]:\n origin = Path().absolute()\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True\n ) as tempdir:\n os.chdir(tempdir)\n yield origin\n finally:\n os.chdir(origin)\n",
"step-4": "\"\"\"\nopsi-utils\n\nTest utilities\n\"\"\"\n\nimport os\nimport tempfile\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Generator\n\n\n@contextmanager\ndef temp_context() -> Generator[Path, None, None]:\n\torigin = Path().absolute()\n\ttry:\n\t\twith tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tempdir:\n\t\t\tos.chdir(tempdir)\n\t\t\tyield origin # return original path\n\tfinally:\n\t\tos.chdir(origin)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.